diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 678143b7f31e..43ddc7d33ac7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,3 +9,4 @@ /java-vertexai/ @googleapis/vertexai-team @googleapis/cloud-sdk-java-team /java-bigquerystorage/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team /java-bigquery/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team +/java-storage/ @googleapis/gcs-team @googleapis/cloud-sdk-java-team diff --git a/.gitignore b/.gitignore index 8bd08dcd00d9..94bd4eba71c7 100644 --- a/.gitignore +++ b/.gitignore @@ -73,3 +73,5 @@ monorepo *.tfstate.backup *.tfstate.*.backup *.tfstate.lock.info + +.jqwik-database \ No newline at end of file diff --git a/.kokoro/common.sh b/.kokoro/common.sh index c2db30296c22..1cda3ee53845 100644 --- a/.kokoro/common.sh +++ b/.kokoro/common.sh @@ -23,6 +23,7 @@ excluded_modules=( 'java-bigquerystorage' 'java-datastore' 'java-logging-logback' + 'java-storage' ) function retry_with_backoff { diff --git a/.kokoro/presubmit/logging-graalvm-native-presubmit.cfg b/.kokoro/presubmit/logging-graalvm-native-presubmit.cfg index d16598bb817f..71ca12b5da0a 100644 --- a/.kokoro/presubmit/logging-graalvm-native-presubmit.cfg +++ b/.kokoro/presubmit/logging-graalvm-native-presubmit.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.56.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { @@ -14,25 +14,29 @@ env_vars: { # TODO: remove this after we've migrated all tests and scripts env_vars: { key: "GCLOUD_PROJECT" - value: "cloud-java-ci-test" + value: "gcloud-devel" } env_vars: { key: "GOOGLE_CLOUD_PROJECT" - value: "cloud-java-ci-test" + value: "gcloud-devel" } env_vars: { key: "GOOGLE_APPLICATION_CREDENTIALS" - value: "secret_manager/cloud-java-ci-it-service-account" + value: "secret_manager/java-it-service-account" } env_vars: { key: "SECRET_MANAGER_KEYS" - value: "cloud-java-ci-it-service-account, java-bigqueryconnection-samples-secrets" + value: "java-it-service-account" } +env_vars: { + key: "IT_SERVICE_ACCOUNT_EMAIL" + value: "it-service-account@gcloud-devel.iam.gserviceaccount.com" +} env_vars: { key: "BUILD_SUBDIR" - value: "java-logging" + value: "java-storage" } diff --git a/.kokoro/presubmit/logging-integration.cfg b/.kokoro/presubmit/logging-integration.cfg index 30143885f2b4..d5b05fb25c92 100644 --- a/.kokoro/presubmit/logging-integration.cfg +++ b/.kokoro/presubmit/logging-integration.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/java11" + value: "gcr.io/cloud-devrel-kokoro-resources/java8" } env_vars: { @@ -14,25 +14,31 @@ env_vars: { # TODO: remove this after we've migrated all tests and scripts env_vars: { key: "GCLOUD_PROJECT" - value: "cloud-java-ci-test" + value: "gcloud-devel" } env_vars: { key: "GOOGLE_CLOUD_PROJECT" - value: "cloud-java-ci-test" + value: "gcloud-devel" } env_vars: { key: "GOOGLE_APPLICATION_CREDENTIALS" - value: "secret_manager/cloud-java-ci-it-service-account" + value: "secret_manager/java-it-service-account" } env_vars: { key: "SECRET_MANAGER_KEYS" - value: "cloud-java-ci-it-service-account, java-bigqueryconnection-samples-secrets" + value: "java-it-service-account,client-library-test-universe-domain-credential" } +env_vars: { + key: "IT_SERVICE_ACCOUNT_EMAIL" + value: "it-service-account@gcloud-devel.iam.gserviceaccount.com" +} + + env_vars: { key: "BUILD_SUBDIR" - value: "java-logging" + value: "java-storage" } diff --git a/.kokoro/presubmit/storage-graalvm-native-presubmit.cfg b/.kokoro/presubmit/storage-graalvm-native-presubmit.cfg new file mode 100644 index 000000000000..71ca12b5da0a --- /dev/null +++ b/.kokoro/presubmit/storage-graalvm-native-presubmit.cfg @@ -0,0 +1,42 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} +} + +env_vars: { + key: "JOB_TYPE" + value: "graalvm-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account" +} + +env_vars: { + key: "IT_SERVICE_ACCOUNT_EMAIL" + value: "it-service-account@gcloud-devel.iam.gserviceaccount.com" +} +env_vars: { + key: "BUILD_SUBDIR" + value: "java-storage" +} diff --git a/.kokoro/presubmit/storage-integration.cfg b/.kokoro/presubmit/storage-integration.cfg new file mode 100644 index 000000000000..d5b05fb25c92 --- /dev/null +++ b/.kokoro/presubmit/storage-integration.cfg @@ -0,0 +1,44 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/java8" +} + +env_vars: { + key: "JOB_TYPE" + value: "integration-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account,client-library-test-universe-domain-credential" +} + +env_vars: { + key: "IT_SERVICE_ACCOUNT_EMAIL" + value: "it-service-account@gcloud-devel.iam.gserviceaccount.com" +} + + +env_vars: { + key: "BUILD_SUBDIR" + value: "java-storage" +} diff --git a/gapic-libraries-bom/pom.xml b/gapic-libraries-bom/pom.xml index 70790ff3c588..a28f738a5512 100644 --- a/gapic-libraries-bom/pom.xml +++ b/gapic-libraries-bom/pom.xml @@ -1256,6 +1256,13 @@ pom import + + com.google.cloud + google-cloud-storage-bom + 2.64.1-SNAPSHOT + pom + import + com.google.cloud google-cloud-storage-transfer-bom diff --git a/generation/check_non_release_please_versions.sh b/generation/check_non_release_please_versions.sh index 14393d905915..8af9636bf531 100755 --- a/generation/check_non_release_please_versions.sh +++ b/generation/check_non_release_please_versions.sh @@ -12,6 +12,7 @@ for pomFile in $(find . -mindepth 2 -name pom.xml | sort ); do [[ "${pomFile}" =~ .*java-datastore.* ]] || \ [[ "${pomFile}" =~ .*java-logging-logback.* ]] || \ [[ "${pomFile}" =~ .*java-bigquery.* ]] || \ + [[ "${pomFile}" =~ .*java-storage.* ]] || \ [[ "${pomFile}" =~ .*.github*. ]]; then continue fi diff --git a/generation_config.yaml b/generation_config.yaml index 0909dc1bc824..99d66cff0519 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -2495,6 +2495,28 @@ libraries: - proto_path: google/cloud/speech/v1 - proto_path: google/cloud/speech/v1p1beta1 - proto_path: google/cloud/speech/v2 +- api_shortname: storage + name_pretty: Cloud Storage + product_documentation: https://cloud.google.com/storage + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-storage/latest/history + api_description: 'is a durable and highly available object storage service. Google + Cloud Storage is almost infinitely scalable and guarantees consistency: when a + write succeeds, the latest copy of the object will be returned to any GET, globally.' + issue_tracker: https://issuetracker.google.com/savedsearches/559782 + release_level: stable + language: java + distribution_name: com.google.cloud:google-cloud-storage + codeowner_team: '@googleapis/gcs-team' + api_id: storage.googleapis.com + requires_billing: true + library_type: GAPIC_COMBO + extra_versioned_modules: gapic-google-cloud-storage-v2 + excluded_poms: google-cloud-storage-bom,google-cloud-storage + recommended_package: com.google.cloud.storage + transport: rest + GAPICs: + - proto_path: google/storage/v2 + - proto_path: google/storage/control/v2 - api_shortname: storagetransfer name_pretty: Storage Transfer Service product_documentation: https://cloud.google.com/storage-transfer-service diff --git a/java-storage/.OwlBot-hermetic.yaml b/java-storage/.OwlBot-hermetic.yaml new file mode 100644 index 000000000000..3a281f188c5f --- /dev/null +++ b/java-storage/.OwlBot-hermetic.yaml @@ -0,0 +1,38 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deep-preserve-regex: +- /java-storage/google-.*/src/test/java/com/google/storage/control/v2/ITFoldersTest.java +deep-remove-regex: +- /java-storage/grpc-google-.*/src +- /java-storage/proto-google-.*/src +- /java-storage/gapic-google-.*/src +- /java-storage/google-cloud-storage-control/src +deep-copy-regex: +- source: /google/storage/v2/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-storage/v2/proto-google-cloud-storage-v2/src +- source: /google/storage/v2/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-storage/v2/grpc-google-cloud-storage-v2/src +- source: /google/storage/v2/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-storage/v2/gapic-google-cloud-storage-v2/src +- source: /google/storage/v2/.*-java/gapic-google-.*/src/main/java/com/google/storage/v2/gapic_metadata.json + dest: /owl-bot-staging/java-storage/v2/gapic-google-cloud-storage-v2/src/main/resources/com/google/storage/v2/gapic_metadata.json +- source: /google/storage/control/v2/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-storage/v2/proto-google-cloud-storage-control-v2/src +- source: /google/storage/control/v2/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-storage/v2/grpc-google-cloud-storage-control-v2/src +- source: /google/storage/control/v2/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-storage/v2/google-cloud-storage-control/src +- source: /google/storage/control/v2/.*-java/gapic-google-.*/src/main/java/com/google/storage/control/v2/gapic_metadata.json + dest: /owl-bot-staging/java-storage/v2/google-cloud-storage-control/src/main/resources/com/google/storage/control/v2/gapic_metadata.json diff --git a/java-storage/.cloudbuild/samples_build.yaml b/java-storage/.cloudbuild/samples_build.yaml new file mode 100644 index 000000000000..c22f133517d0 --- /dev/null +++ b/java-storage/.cloudbuild/samples_build.yaml @@ -0,0 +1,35 @@ +steps: +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: ls + args: [ + '-alt', + ] +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: curl + args: [ + '--header', + 'Metadata-Flavor: Google', + 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email' + ] +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: pwd +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: bash + args: [ + '.kokoro/build.sh' + ] + env: + - 'JOB_TYPE=samples' + - 'GOOGLE_CLOUD_PROJECT=cloud-java-ci-sample' + - 'GOOGLE_CLOUD_PROJECT_NUMBER=615621127317' + - 'IT_SERVICE_ACCOUNT_EMAIL=samples@cloud-java-ci-sample.iam.gserviceaccount.com' +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: echo + args: [ + 'Sample job succeeded', + ] +timeout: 3600s +options: + defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET + + diff --git a/java-storage/.gemini/config.yaml b/java-storage/.gemini/config.yaml new file mode 100644 index 000000000000..8afb84853471 --- /dev/null +++ b/java-storage/.gemini/config.yaml @@ -0,0 +1,10 @@ +# https://developers.google.com/gemini-code-assist/docs/customize-gemini-behavior-github#custom-configuration +have_fun: false +code_review: + disable: false + comment_severity_threshold: HIGH + max_review_comments: -1 + pull_request_opened: + help: false + summary: false + code_review: false diff --git a/java-storage/.readme-partials.yaml b/java-storage/.readme-partials.yaml new file mode 100644 index 000000000000..78d4134690b8 --- /dev/null +++ b/java-storage/.readme-partials.yaml @@ -0,0 +1,117 @@ +custom_content: | + #### Creating an authorized service object + + To make authenticated requests to Google Cloud Storage, you must create a service object with credentials. You can + then make API calls by calling methods on the Storage service object. The simplest way to authenticate is to use + [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). + These credentials are automatically inferred from your environment, so you only need the following code to create your + service object: + + ```java + import com.google.cloud.storage.Storage; + import com.google.cloud.storage.StorageOptions; + + Storage storage = StorageOptions.getDefaultInstance().getService(); + ``` + + For other authentication options, see the [Authentication](https://github.com/googleapis/google-cloud-java#authentication) page in Google Cloud Java. + + #### Storing data + Stored objects are called "blobs" in `google-cloud` and are organized into containers called "buckets". `Blob`, a + subclass of `BlobInfo`, adds a layer of service-related functionality over `BlobInfo`. Similarly, `Bucket` adds a + layer of service-related functionality over `BucketInfo`. In this code snippet, we will create a new bucket and + upload a blob to that bucket. + + Add the following imports at the top of your file: + + ```java + import static java.nio.charset.StandardCharsets.UTF_8; + + import com.google.cloud.storage.Blob; + import com.google.cloud.storage.Bucket; + import com.google.cloud.storage.BucketInfo; + ``` + + Then add the following code to create a bucket and upload a simple blob. + + *Important: Bucket names have to be globally unique (among all users of Cloud Storage). If you choose a bucket name + that already exists, you'll get a helpful error message telling you to choose another name. In the code below, replace + "my_unique_bucket" with a unique bucket name. See more about naming rules + [here](https://cloud.google.com/storage/docs/bucket-naming?hl=en#requirements).* + + ```java + // Create a bucket + String bucketName = "my_unique_bucket"; // Change this to something unique + Bucket bucket = storage.create(BucketInfo.of(bucketName)); + + // Upload a blob to the newly created bucket + BlobId blobId = BlobId.of(bucketName, "my_blob_name"); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + Blob blob = storage.create(blobInfo, "a simple blob".getBytes(UTF_8)); + ``` + + A complete example for creating a blob can be found at + [UploadObject.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java). + + At this point, you will be able to see your newly created bucket and blob on the Google Developers Console. + + #### Retrieving data + Now that we have content uploaded to the server, we can see how to read data from the server. Add the following line + to your program to get back the blob we uploaded. + + ```java + BlobId blobId = BlobId.of(bucketName, "my_blob_name"); + byte[] content = storage.readAllBytes(blobId); + String contentString = new String(content, UTF_8); + ``` + + A complete example for accessing blobs can be found at + [DownloadObject.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java). + + #### Updating data + Another thing we may want to do is update a blob. The following snippet shows how to update a Storage blob if it exists. + + ``` java + BlobId blobId = BlobId.of(bucketName, "my_blob_name"); + Blob blob = storage.get(blobId); + if (blob != null) { + byte[] prevContent = blob.getContent(); + System.out.println(new String(prevContent, UTF_8)); + WritableByteChannel channel = blob.writer(); + channel.write(ByteBuffer.wrap("Updated content".getBytes(UTF_8))); + channel.close(); + } + ``` + + #### Listing buckets and contents of buckets + Suppose that you've added more buckets and blobs, and now you want to see the names of your buckets and the contents + of each one. Add the following code to list all your buckets and all the blobs inside each bucket. + + ```java + // List all your buckets + System.out.println("My buckets:"); + for (Bucket bucket : storage.list().iterateAll()) { + System.out.println(bucket); + + // List all blobs in the bucket + System.out.println("Blobs in the bucket:"); + for (Blob blob : bucket.list().iterateAll()) { + System.out.println(blob); + } + } + ``` + + #### Complete source code + + See [ListObjects.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java) for a complete example. + + ### Example Applications + + - [`Bookshelf`](https://github.com/GoogleCloudPlatform/getting-started-java/tree/main/bookshelf) - An App Engine application that manages a virtual bookshelf. + - This app uses `google-cloud` to interface with Cloud Datastore and Cloud Storage. It also uses Cloud SQL, another Google Cloud Platform service. + - [`Flexible Environment/Storage example`](https://github.com/GoogleCloudPlatform/java-docs-samples/tree/main/flexible/cloudstorage) - An app that uploads files to a public Cloud Storage bucket on the App Engine Flexible Environment runtime. + +versioning: | + This library follows [Semantic Versioning](http://semver.org/), but does update [Storage interface](src/main/java/com.google.cloud.storage/Storage.java) + to introduce new methods which can break your implementations if you implement this interface for testing purposes. + diff --git a/java-storage/.repo-metadata.json b/java-storage/.repo-metadata.json new file mode 100644 index 000000000000..0b0e5709e90d --- /dev/null +++ b/java-storage/.repo-metadata.json @@ -0,0 +1,21 @@ +{ + "api_shortname": "storage", + "name_pretty": "Cloud Storage", + "product_documentation": "https://cloud.google.com/storage", + "api_description": "is a durable and highly available object storage service. Google Cloud Storage is almost infinitely scalable and guarantees consistency: when a write succeeds, the latest copy of the object will be returned to any GET, globally.", + "client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-storage/latest/history", + "release_level": "stable", + "transport": "http", + "language": "java", + "repo": "googleapis/google-cloud-java", + "repo_short": "java-storage", + "distribution_name": "com.google.cloud:google-cloud-storage", + "api_id": "storage.googleapis.com", + "library_type": "GAPIC_COMBO", + "requires_billing": true, + "codeowner_team": "@googleapis/gcs-team", + "excluded_poms": "google-cloud-storage-bom,google-cloud-storage", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559782", + "extra_versioned_modules": "gapic-google-cloud-storage-v2", + "recommended_package": "com.google.cloud.storage" +} \ No newline at end of file diff --git a/java-storage/CHANGELOG.md b/java-storage/CHANGELOG.md new file mode 100644 index 000000000000..da103c69b515 --- /dev/null +++ b/java-storage/CHANGELOG.md @@ -0,0 +1,2826 @@ +# Changelog + +## [2.64.0](https://github.com/googleapis/java-storage/compare/v2.63.0...v2.64.0) (2026-02-27) + + +### Features + +* Adding Otel for MPU ([#3387](https://github.com/googleapis/java-storage/issues/3387)) ([2fef010](https://github.com/googleapis/java-storage/commit/2fef01016bede7892340b3434e1c229ddfa83904)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.67.0 ([ce2e03a](https://github.com/googleapis/java-storage/commit/ce2e03a80ea4308ac1365697306071e3abd92a88)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.57.0 ([#3500](https://github.com/googleapis/java-storage/issues/3500)) ([1617b39](https://github.com/googleapis/java-storage/commit/1617b39bdca6bedb8a5e909794acb00eebcdbbcd)) +* Update dependency node to v24 ([#3492](https://github.com/googleapis/java-storage/issues/3492)) ([0ae3315](https://github.com/googleapis/java-storage/commit/0ae3315c1f06547de3308704c772a72645c9ab88)) +* Update googleapis/sdk-platform-java action to v2.67.0 ([#3501](https://github.com/googleapis/java-storage/issues/3501)) ([f1ff0c9](https://github.com/googleapis/java-storage/commit/f1ff0c9e061e38d2c0414bc6a82b53123e59c106)) + +## [2.63.0](https://github.com/googleapis/java-storage/compare/v2.62.1...v2.63.0) (2026-02-12) + + +### Features + +* Add a DeleteFolderRecursive API definition ([87642bd](https://github.com/googleapis/java-storage/commit/87642bd58759a61f5fdf04b1765b77297ea443ea)) +* Added a new field `ComposeObjectRequest.delete_source_objects` field ([87642bd](https://github.com/googleapis/java-storage/commit/87642bd58759a61f5fdf04b1765b77297ea443ea)) +* Next release from main branch is 2.63.0 ([#3486](https://github.com/googleapis/java-storage/issues/3486)) ([412b5fb](https://github.com/googleapis/java-storage/commit/412b5fbe7e724e1220561f04a59251746c8a99a2)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.66.1 ([87642bd](https://github.com/googleapis/java-storage/commit/87642bd58759a61f5fdf04b1765b77297ea443ea)) +* Validate blob paths to prevent directory traversal in TransferManager downloads ([#3455](https://github.com/googleapis/java-storage/issues/3455)) ([49abf75](https://github.com/googleapis/java-storage/commit/49abf7559028d890e1c4848a5a356f266f6788be)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.1 ([#3484](https://github.com/googleapis/java-storage/issues/3484)) ([3a5deee](https://github.com/googleapis/java-storage/commit/3a5deee364175fc8c879cb22f176354a62117d22)) +* Update dependency node to v24 ([#3368](https://github.com/googleapis/java-storage/issues/3368)) ([ed2ddb7](https://github.com/googleapis/java-storage/commit/ed2ddb79c215fb7a9400e4168d10a7516ca2e664)) + +## [2.62.1](https://github.com/googleapis/java-storage/compare/v2.62.0...v2.62.1) (2026-01-28) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.66.0 ([557be35](https://github.com/googleapis/java-storage/commit/557be35b220bdb8b732fb593e5abf869a0c2bb53)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.0 ([#3468](https://github.com/googleapis/java-storage/issues/3468)) ([d2a1a3a](https://github.com/googleapis/java-storage/commit/d2a1a3a661f42f1327a53dd14295ccaa4cd19e4e)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.60.0 ([#3466](https://github.com/googleapis/java-storage/issues/3466)) ([2b860e3](https://github.com/googleapis/java-storage/commit/2b860e356fba0037d40ba0eaa1320fb3e4a9e0ae)) +* Update googleapis/sdk-platform-java action to v2.66.0 ([#3469](https://github.com/googleapis/java-storage/issues/3469)) ([bd2f0c6](https://github.com/googleapis/java-storage/commit/bd2f0c6c881db6b1cf41c85c5bd4bb7df11bc7a6)) + +## [2.62.0](https://github.com/googleapis/java-storage/compare/v2.61.0...v2.62.0) (2026-01-15) + + +### Features + +* Add default e2e checksum validation in the final call of resumable uploads ([#3450](https://github.com/googleapis/java-storage/issues/3450)) ([37d2bba](https://github.com/googleapis/java-storage/commit/37d2bba93dfbdc9a81195aff9325a66883b6795f)) + + +### Bug Fixes + +* Update appendable upload retry logic to be able to more gracefully handle slow uploads ([#3438](https://github.com/googleapis/java-storage/issues/3438)) ([e660e5a](https://github.com/googleapis/java-storage/commit/e660e5a24f35798c6fcd6c9b30cf0a8324e5f5f8)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.55.1 ([#3461](https://github.com/googleapis/java-storage/issues/3461)) ([7e45b9e](https://github.com/googleapis/java-storage/commit/7e45b9e6f03ddea3e0fce31b95f5b54c37799be4)) +* Update googleapis/sdk-platform-java action to v2.65.1 ([#3460](https://github.com/googleapis/java-storage/issues/3460)) ([2a42131](https://github.com/googleapis/java-storage/commit/2a42131d3850e661a53a93dd36b87fd58acd2a4f)) + +## [2.61.0](https://github.com/googleapis/java-storage/compare/v2.60.0...v2.61.0) (2025-12-15) + + +### Features + +* Add support for partial success in ListBuckets for json ([#3415](https://github.com/googleapis/java-storage/issues/3415)) ([37ef7f3](https://github.com/googleapis/java-storage/commit/37ef7f3894a867257d5366ab36129043b9a884f8)) +* Modifying getters and setters to be more inline with s3 interface. ([0a8bbea](https://github.com/googleapis/java-storage/commit/0a8bbeaf8babd296fce0690421d2e480b361ca7e)) +* **mpu:** Breaking change modifying getters and setters in MPU to be more inline with s3 interface. ([66d54e2](https://github.com/googleapis/java-storage/commit/66d54e2dc36d90853bae4d529a620f45a7001487)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.64.1 ([511ff51](https://github.com/googleapis/java-storage/commit/511ff514410e55425e968ab900ac1b8825fe507a)) + + +### Dependencies + +* Update actions/checkout action to v6 ([d934ad9](https://github.com/googleapis/java-storage/commit/d934ad91aa1bf50e04870b1af747f1947fb2cbea)) +* Update actions/checkout action to v6 ([d99dd53](https://github.com/googleapis/java-storage/commit/d99dd53a0401f5460a63be7a6b927a27cdad0997)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20251118-2.0.0 ([f48fa3f](https://github.com/googleapis/java-storage/commit/f48fa3f1dc5feabd89be063b138804b83f722c0c)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20251118-2.0.0 ([#3427](https://github.com/googleapis/java-storage/issues/3427)) ([4612e72](https://github.com/googleapis/java-storage/commit/4612e72051d35b22135811a8723a65646a90d45e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([90a71be](https://github.com/googleapis/java-storage/commit/90a71be0899d06995b515d875c65cdc5f852c7b0)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([#3424](https://github.com/googleapis/java-storage/issues/3424)) ([c989dc3](https://github.com/googleapis/java-storage/commit/c989dc3e65386e55bc25d2f6f63e499deda3fc69)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.59.0 ([f5d8337](https://github.com/googleapis/java-storage/commit/f5d8337ad556770a55cc260975e44e85e8e831b0)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.59.0 ([5480ce3](https://github.com/googleapis/java-storage/commit/5480ce347493ec425b675f462a42f45c22b06c28)) +* Update googleapis/sdk-platform-java action to v2.64.2 ([23494d7](https://github.com/googleapis/java-storage/commit/23494d79d3a2785c30c75f50fd3b0a03598c3599)) +* Update googleapis/sdk-platform-java action to v2.64.2 ([#3425](https://github.com/googleapis/java-storage/issues/3425)) ([fad2d7a](https://github.com/googleapis/java-storage/commit/fad2d7a780a084acd8ccdf2bb2f97a335c438594)) + +## [2.60.0](https://github.com/googleapis/java-storage/compare/v2.59.0...v2.60.0) (2025-11-07) + + +### Features + +* Add preview MultipartUploadClient#abortMultipartUpload https://github.com/googleapis/java-storage/pull/3361 ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) +* Add preview MultipartUploadClient#completeMultipartUpload https://github.com/googleapis/java-storage/pull/3372 ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) +* Add preview MultipartUploadClient#createMultipartUpload https://github.com/googleapis/java-storage/pull/3356 ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) +* Add preview MultipartUploadClient#listParts https://github.com/googleapis/java-storage/pull/3359 ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) +* Add preview MultipartUploadClient#uploadPart https://github.com/googleapis/java-storage/pull/3375 ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) +* Add preview MultipartUploadSettings ([160fa9a](https://github.com/googleapis/java-storage/commit/160fa9af7aa492373a9d9b40f65a6c56d7cab5ef)) + + +### Bug Fixes + +* Add new system property (com.google.cloud.storage.grpc.bound_token) to allow disabling bound token use with grpc ([#3365](https://github.com/googleapis/java-storage/issues/3365)) ([ebf5e6d](https://github.com/googleapis/java-storage/commit/ebf5e6d30d8dc197ab388a70cc0d465c0f740496)) +* Call response.disconnect() after resolving resumable upload url ([#3385](https://github.com/googleapis/java-storage/issues/3385)) ([ac3be4b](https://github.com/googleapis/java-storage/commit/ac3be4b7e82d9340ede7d527a26ffe3e2ba58909)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.63.0 ([c1a8968](https://github.com/googleapis/java-storage/commit/c1a8968799c1cf5a970fe9f303adccdad0a117c8)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.1 ([#3381](https://github.com/googleapis/java-storage/issues/3381)) ([e3d3700](https://github.com/googleapis/java-storage/commit/e3d3700e06de2b0113e1cb01e99ef4aeed3c62c9)) + +## [2.59.0](https://github.com/googleapis/java-storage/compare/v2.58.1...v2.59.0) (2025-10-21) + + +### Features + +* Add per-message checksum validation for gRPC ReadObject operations ([#3336](https://github.com/googleapis/java-storage/issues/3336)) ([6eef1b0](https://github.com/googleapis/java-storage/commit/6eef1b0f587b9f32041ac4bcef1a16b1b0bc4bb3)) + + +### Bug Fixes + +* Add case insensitive check for X-Goog-Content-SHA256 in SignatureInfo ([#3337](https://github.com/googleapis/java-storage/issues/3337)) ([54bc2c1](https://github.com/googleapis/java-storage/commit/54bc2c12f2d0e8c164e4ddcaa1a61d2de3911131)) +* Migrate away from GoogleCredentials.fromStream() usages ([#3339](https://github.com/googleapis/java-storage/issues/3339)) ([7e42c2f](https://github.com/googleapis/java-storage/commit/7e42c2fbca53ca6b1266f784e58cee00cfed7d62)) +* Update BlobReadSession channels to not implicitly close once EOF is observed ([#3344](https://github.com/googleapis/java-storage/issues/3344)) ([9f0a93e](https://github.com/googleapis/java-storage/commit/9f0a93eb4c6bb8aab13915ca1cb40ba9e229a2f9)) +* Update grpc single-shot uploads to attach the callers stracktrace as suppressed exception if an error happens in the background ([#3330](https://github.com/googleapis/java-storage/issues/3330)) ([64e2b2e](https://github.com/googleapis/java-storage/commit/64e2b2ef839e69da0605b9e53989c1f5a2b09e66)) +* Update retry logic for grpc start resumable upload to properly handle client side deadline_exceeded ([#3354](https://github.com/googleapis/java-storage/issues/3354)) ([6eb3331](https://github.com/googleapis/java-storage/commit/6eb33311d8dd7344e30ddcb92334fd52c7c63b4d)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.53.0 ([#3351](https://github.com/googleapis/java-storage/issues/3351)) ([e64565a](https://github.com/googleapis/java-storage/commit/e64565ab674f586ea4850408a3f30544997f4b1b)) + +## [2.58.1](https://github.com/googleapis/java-storage/compare/v2.58.0...v2.58.1) (2025-10-06) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.3 ([ba84793](https://github.com/googleapis/java-storage/commit/ba847937e553f6a47aa459f634f63ed42310762d)) +* Update BlobReadSession ScatteringByteChannel projection to use less CPU ([#3324](https://github.com/googleapis/java-storage/issues/3324)) ([678fecc](https://github.com/googleapis/java-storage/commit/678feccc972e557380e9ba5fcd52be099440197d)) +* Update DefaultRetryContext to trap and forward RejectedExceptionException to onFailure ([#3327](https://github.com/googleapis/java-storage/issues/3327)) ([1be31bd](https://github.com/googleapis/java-storage/commit/1be31bdfbc0283733e86b049d3be1911db50fb96)) +* Update PCU request building logic to properly clear crc32c and md5 ([#3323](https://github.com/googleapis/java-storage/issues/3323)) ([4da9f31](https://github.com/googleapis/java-storage/commit/4da9f3108d27f5c2ed3cc39eec161651f421e4db)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250925-2.0.0 ([#3313](https://github.com/googleapis/java-storage/issues/3313)) ([ab310eb](https://github.com/googleapis/java-storage/commit/ab310eb5af51ed332329abd6c3441d18f9965571)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.3 ([#3325](https://github.com/googleapis/java-storage/issues/3325)) ([4d3e3be](https://github.com/googleapis/java-storage/commit/4d3e3be27811ad92becc93321048c4268cec2fcf)) +* Update googleapis/sdk-platform-java action to v2.62.3 ([#3322](https://github.com/googleapis/java-storage/issues/3322)) ([a5808ea](https://github.com/googleapis/java-storage/commit/a5808ea168a81f07040276c1a05da67108fda37f)) + +## [2.58.0](https://github.com/googleapis/java-storage/compare/v2.57.0...v2.58.0) (2025-09-23) + + +### Features + +* **storagecontrol:** Add GetIamPolicy, SetIamPolicy, and TestIamPermissions RPCs ([c884551](https://github.com/googleapis/java-storage/commit/c884551048a323f2a3fd7aaf4fce469d4d4f543e)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.2 ([984f8ca](https://github.com/googleapis/java-storage/commit/984f8ca23a38c7a892a2256a694b72431e44aa27)) +* Fix appendable upload finalization race condition ([#3295](https://github.com/googleapis/java-storage/issues/3295)) ([485be18](https://github.com/googleapis/java-storage/commit/485be184c08c7b857d8c9a9443f32903df879b23)) +* Fix IllegalMonitorStateException thrown from BlobAppendableUpload.isOpen() ([#3302](https://github.com/googleapis/java-storage/issues/3302)) ([aa90468](https://github.com/googleapis/java-storage/commit/aa904688b784d7427454318196ef88628e415246)) +* Update object context diff logic to be shallow rather than deep ([#3287](https://github.com/googleapis/java-storage/issues/3287)) ([2fd15f6](https://github.com/googleapis/java-storage/commit/2fd15f69e93a3df2b8dbbd4f08edd07c087e957c)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.2 ([#3298](https://github.com/googleapis/java-storage/issues/3298)) ([1489f3a](https://github.com/googleapis/java-storage/commit/1489f3a74c8a27f0888c40600c83adedcfd9a9ec)) +* Update googleapis/sdk-platform-java action to v2.62.2 ([#3299](https://github.com/googleapis/java-storage/issues/3299)) ([c3b05ac](https://github.com/googleapis/java-storage/commit/c3b05ac8798140f9ddcab098948a3a2f3638dc6b)) + +## [2.57.0](https://github.com/googleapis/java-storage/compare/v2.56.0...v2.57.0) (2025-09-09) + + +### Features + +* Add BlobInfo.ObjectContexts ([#3259](https://github.com/googleapis/java-storage/issues/3259)) ([485aefd](https://github.com/googleapis/java-storage/commit/485aefd3047c52c98d8bd913033c8aee1473e988)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.1 ([0e348db](https://github.com/googleapis/java-storage/commit/0e348dbee247e1e65713d0155e1aa29ae5c5e0e4)) +* Update BlobAppendableUpload implementation to periodically flush for large writes ([#3278](https://github.com/googleapis/java-storage/issues/3278)) ([d0ffe18](https://github.com/googleapis/java-storage/commit/d0ffe18084b32936c889bb280005294c7ae7064d)) +* Update otel integration to properly activate span context for lazy RPCs such as reads & writes pt.2 ([#3277](https://github.com/googleapis/java-storage/issues/3277)) ([3240f67](https://github.com/googleapis/java-storage/commit/3240f67c192a855c92256526aeb2fa689ea15445)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.1 ([#3280](https://github.com/googleapis/java-storage/issues/3280)) ([d046ea3](https://github.com/googleapis/java-storage/commit/d046ea3da19288b64c48300bdd4f94a0ebf35458)) +* Update googleapis/sdk-platform-java action to v2.62.1 ([#3281](https://github.com/googleapis/java-storage/issues/3281)) ([c9078bb](https://github.com/googleapis/java-storage/commit/c9078bb98e3999234f95ab2e4c842c9dd7191c3d)) + +## [2.56.0](https://github.com/googleapis/java-storage/compare/v2.55.0...v2.56.0) (2025-08-25) + + +### Features + +* *breaking behavior* rewrite Storage.blobAppendableUpload to be non-blocking and have improved throughput ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Add AppendableUploadWriteableByteChannel#flush() ([#3261](https://github.com/googleapis/java-storage/issues/3261)) ([950c56f](https://github.com/googleapis/java-storage/commit/950c56f0e622d75faff51257d5cbc9f3ddc7e1ce)) +* Add MinFlushSizeFlushPolicy#withMaxPendingBytes(long) ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Add StorageChannelUtils to provide helper methods to perform blocking read/write to/from non-blocking channels ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) + + +### Bug Fixes + +* Make FlushPolicy${Min,Max}FlushSizeFlushPolicy constructors private ([#3217](https://github.com/googleapis/java-storage/issues/3217)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Update BlobAppendableUploadConfig and FlushPolicy.MinFlushSizeFlushPolicy to default to 4MiB minFlushSize and 16MiB maxPendingBytes ([#3249](https://github.com/googleapis/java-storage/issues/3249)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Update otel integration to properly activate span context for lazy RPCs such as reads & writes ([#3255](https://github.com/googleapis/java-storage/issues/3255)) ([d6587f4](https://github.com/googleapis/java-storage/commit/d6587f42b65a586a2e3f30e0559975801726a812)) + + +### Dependencies + +* Update actions/checkout action to v5 ([#3239](https://github.com/googleapis/java-storage/issues/3239)) ([33f024b](https://github.com/googleapis/java-storage/commit/33f024b1ae094bf3e3605e1a835cb55eb5c9e750)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250815-2.0.0 ([#3245](https://github.com/googleapis/java-storage/issues/3245)) ([87afe1a](https://github.com/googleapis/java-storage/commit/87afe1ac5f500053e4c0639d5b824304d03796f4)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 ([#3250](https://github.com/googleapis/java-storage/issues/3250)) ([0782e62](https://github.com/googleapis/java-storage/commit/0782e62fc9534e3cecfaaa4d78b58904ecf699d6)) + +## [2.55.0](https://github.com/googleapis/java-storage/compare/v2.54.0...v2.55.0) (2025-08-05) + + +### Features + +* Add new preview Bucket encryption policy configuration ([#3204](https://github.com/googleapis/java-storage/issues/3204)) ([7b250dd](https://github.com/googleapis/java-storage/commit/7b250dd53cfa29bbb6a0a4cb4a345aeb2dab5c86)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.61.0 ([f98b686](https://github.com/googleapis/java-storage/commit/f98b686ef940879458acb1e56339adf869400b94)) +* Enable ALTS bound token (for DirectPath) in the grpc channel provider ([#2919](https://github.com/googleapis/java-storage/issues/2919)) ([38d248d](https://github.com/googleapis/java-storage/commit/38d248d9511e808e88c1bac0b6bb2ba54897830d)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.51.0 ([#3213](https://github.com/googleapis/java-storage/issues/3213)) ([86ff697](https://github.com/googleapis/java-storage/commit/86ff69788b30d8f82b6b95d010df507093852889)) + +## [2.54.0](https://github.com/googleapis/java-storage/compare/v2.53.3...v2.54.0) (2025-07-24) + + +### Features + +* Add BucketInfo.IpFilter ([#3177](https://github.com/googleapis/java-storage/issues/3177)) ([14a91ec](https://github.com/googleapis/java-storage/commit/14a91ec208067e6afc55923cffda5f69aa33d8b4)) +* Add default end-to-end checksumming for JournalingBlobWriteSessionConfig [#3180](https://github.com/googleapis/java-storage/issues/3180) ([fa0f6a0](https://github.com/googleapis/java-storage/commit/fa0f6a03380af78e239bd0079267649ba4138f38)) +* Add default end-to-end crc32c checksumming for several upload methods via grpc transport [#3176](https://github.com/googleapis/java-storage/issues/3176) ([fa0f6a0](https://github.com/googleapis/java-storage/commit/fa0f6a03380af78e239bd0079267649ba4138f38)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.60.2 ([bd1f199](https://github.com/googleapis/java-storage/commit/bd1f199cf57c2b8039c303586d5beac64aeca0ba)) +* Give user provided checksum precondition priority for Storage#create methods that accept byte[] [#3182](https://github.com/googleapis/java-storage/issues/3182) ([fa0f6a0](https://github.com/googleapis/java-storage/commit/fa0f6a03380af78e239bd0079267649ba4138f38)) +* Move crc32c computation before writing to disk for BufferToDiskThenUpload BlobWriteSession config [#3187](https://github.com/googleapis/java-storage/issues/3187) ([fa0f6a0](https://github.com/googleapis/java-storage/commit/fa0f6a03380af78e239bd0079267649ba4138f38)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250718-2.0.0 ([#3203](https://github.com/googleapis/java-storage/issues/3203)) ([18978e4](https://github.com/googleapis/java-storage/commit/18978e4ec54790df2939490ef76fc19b9f72eb04)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.2 ([#3201](https://github.com/googleapis/java-storage/issues/3201)) ([782c3c4](https://github.com/googleapis/java-storage/commit/782c3c416583704a196b17f23e9c12c33659f67d)) +* Update googleapis/sdk-platform-java action to v2.60.1 ([#3196](https://github.com/googleapis/java-storage/issues/3196)) ([6ba56e5](https://github.com/googleapis/java-storage/commit/6ba56e5a4b86a75a9f48beccf79ff6d5fdd3e19f)) + +## [2.53.3](https://github.com/googleapis/java-storage/compare/v2.53.2...v2.53.3) (2025-07-09) + + +### Bug Fixes + +* Fix DefaultBlobWriteSessionConfig init to work when grpc classes are excluded ([#3147](https://github.com/googleapis/java-storage/issues/3147)) ([8571ba8](https://github.com/googleapis/java-storage/commit/8571ba8eee82d055cdeb5f0b6970d5b814eaa24e)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250629-2.0.0 ([#3185](https://github.com/googleapis/java-storage/issues/3185)) ([4ce8281](https://github.com/googleapis/java-storage/commit/4ce8281246cbe84ed068205532cac4a03853c331)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.1 ([#3189](https://github.com/googleapis/java-storage/issues/3189)) ([7fbfb01](https://github.com/googleapis/java-storage/commit/7fbfb013a8cfb72d49e3d752ad25e73b6ccaab4f)) + +## [2.53.2](https://github.com/googleapis/java-storage/compare/v2.53.1...v2.53.2) (2025-06-25) + + +### Bug Fixes + +* Fix Journaling BlobWriteSessionConfig to properly handle multiple consecutive retries ([#3166](https://github.com/googleapis/java-storage/issues/3166)) ([895bfbd](https://github.com/googleapis/java-storage/commit/895bfbda902a77d16a33fe5238349a6b3d397c10)) + + +### Dependencies + +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.36.0 ([#3162](https://github.com/googleapis/java-storage/issues/3162)) ([41a1030](https://github.com/googleapis/java-storage/commit/41a1030a2e77036cf961a16d472068b07e624192)) +* Update sdk-platform-java dependencies ([#3164](https://github.com/googleapis/java-storage/issues/3164)) ([c22a131](https://github.com/googleapis/java-storage/commit/c22a1319d8e2d92beeb03abac6bf2af8d09d49ee)) + +## [2.53.1](https://github.com/googleapis/java-storage/compare/v2.53.0...v2.53.1) (2025-06-18) + + +### Bug Fixes + +* Cancel the future in RemoteStorageHelper#forceDelete when TimeoutException happens ([#3136](https://github.com/googleapis/java-storage/issues/3136)) ([e6007d5](https://github.com/googleapis/java-storage/commit/e6007d56e8801be65209cb5761f62749369425c9)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.59.0 ([7dba9f0](https://github.com/googleapis/java-storage/commit/7dba9f09f100062cc8c04e5a2735a4349d8e7ed1)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250605-2.0.0 ([#3143](https://github.com/googleapis/java-storage/issues/3143)) ([17a80d8](https://github.com/googleapis/java-storage/commit/17a80d8b49fef65557215b310895b0f08ee25235)) +* Update sdk-platform-java dependencies ([#3152](https://github.com/googleapis/java-storage/issues/3152)) ([2f78192](https://github.com/googleapis/java-storage/commit/2f78192d97e9d3ca29c97a52a66a074777dce196)) + +## [2.53.0](https://github.com/googleapis/java-storage/compare/v2.52.3...v2.53.0) (2025-06-04) + + +### Features + +* Expose BucketInfo.getProject as a BigInteger ([#3119](https://github.com/googleapis/java-storage/issues/3119)) ([64bbb60](https://github.com/googleapis/java-storage/commit/64bbb608033f757cb6e31e75a78740d8ed1dccab)), closes [#3023](https://github.com/googleapis/java-storage/issues/3023) +* **storagecontrol:** Add Anywhere cache control APIs ([06572b7](https://github.com/googleapis/java-storage/commit/06572b7ced2829cdc00bf648521c024a52d93b3a)) +* **storagecontrol:** Add Client Libraries Storage IntelligenceConfig ([06572b7](https://github.com/googleapis/java-storage/commit/06572b7ced2829cdc00bf648521c024a52d93b3a)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.58.0 ([06572b7](https://github.com/googleapis/java-storage/commit/06572b7ced2829cdc00bf648521c024a52d93b3a)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250521-2.0.0 ([#3118](https://github.com/googleapis/java-storage/issues/3118)) ([e1be49e](https://github.com/googleapis/java-storage/commit/e1be49e6c987daccf9542c15c6ba418c007d2fb7)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250524-2.0.0 ([#3127](https://github.com/googleapis/java-storage/issues/3127)) ([2a4499d](https://github.com/googleapis/java-storage/commit/2a4499d1686e93e8495f29b5198488d166caaa06)) +* Update sdk-platform-java dependencies ([#3129](https://github.com/googleapis/java-storage/issues/3129)) ([31cd058](https://github.com/googleapis/java-storage/commit/31cd058dcaf5a891ecb7a955602b09634d912560)) + + +### Documentation + +* Add explicit Optional annotations to fields that have always been treated as optional ([53b6927](https://github.com/googleapis/java-storage/commit/53b6927de9e5b948e1192e6cf716b88cc872c632)) +* Add note that Bucket.project output format is always project number format ([53b6927](https://github.com/googleapis/java-storage/commit/53b6927de9e5b948e1192e6cf716b88cc872c632)) +* Add note that managedFolders are supported for GetIamPolicy and SetIamPolicy ([53b6927](https://github.com/googleapis/java-storage/commit/53b6927de9e5b948e1192e6cf716b88cc872c632)) + +## [2.52.3](https://github.com/googleapis/java-storage/compare/v2.52.2...v2.52.3) (2025-05-19) + + +### Bug Fixes + +* Update grpc client side metrics detection to be graceful when not running on gcp ([#3097](https://github.com/googleapis/java-storage/issues/3097)) ([10cd32d](https://github.com/googleapis/java-storage/commit/10cd32d51aa061304b5b4d0d632a2eed694cd1d6)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250509-2.0.0 ([#3103](https://github.com/googleapis/java-storage/issues/3103)) ([1fd1090](https://github.com/googleapis/java-storage/commit/1fd109094814bfb6270e10a3e4fd5ec4d20e7fba)) +* Update sdk-platform-java dependencies ([#3102](https://github.com/googleapis/java-storage/issues/3102)) ([3b53b94](https://github.com/googleapis/java-storage/commit/3b53b942f31805c5291ca109b4f3e501fbc6fc0d)) + +## [2.52.2](https://github.com/googleapis/java-storage/compare/v2.52.1...v2.52.2) (2025-05-06) + + +### Bug Fixes + +* Fix a possible NPE that could happen when shutting down a grpc Storage instance ([#3089](https://github.com/googleapis/java-storage/issues/3089)) ([56f5d0a](https://github.com/googleapis/java-storage/commit/56f5d0ae8826d3d03c436fd0b91630cd09c09a3d)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250424-2.0.0 ([#3084](https://github.com/googleapis/java-storage/issues/3084)) ([c7afbde](https://github.com/googleapis/java-storage/commit/c7afbde3b1a78a56c017283850f90938d66e44fd)) +* Update sdk-platform-java dependencies ([#3087](https://github.com/googleapis/java-storage/issues/3087)) ([762ca13](https://github.com/googleapis/java-storage/commit/762ca1374a8e738fbafc49d17fd38375962d026f)) + +## [2.52.1](https://github.com/googleapis/java-storage/compare/v2.52.0...v2.52.1) (2025-05-01) + + +### Bug Fixes + +* Fix grpc ReadObject memory leak introduced in 2.51.0 ([#3080](https://github.com/googleapis/java-storage/issues/3080)) ([7057629](https://github.com/googleapis/java-storage/commit/705762947fad452462ad0e55ec5898e6389a0ac3)) + +## [2.52.0](https://github.com/googleapis/java-storage/compare/v2.51.0...v2.52.0) (2025-04-28) + +> [!IMPORTANT] +> This release has a direct memory leak when using gRPC transport for downloads. +> +> Please update to [2.52.1](https://github.com/googleapis/java-storage/releases/tag/v2.52.1) or later which contains the fix. + +### Features + +* Add Storage.BlobListOption#includeTrailingDelimiter ([#3038](https://github.com/googleapis/java-storage/issues/3038)) ([0b7a0df](https://github.com/googleapis/java-storage/commit/0b7a0dff91af0e22e84ba3ca08862141292b3b30)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.56.2 ([74c46dd](https://github.com/googleapis/java-storage/commit/74c46ddcd91553bcb8145c536389bbe0ca32886c)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250416-2.0.0 ([#3063](https://github.com/googleapis/java-storage/issues/3063)) ([d496d5b](https://github.com/googleapis/java-storage/commit/d496d5bcc1f6ee922bb8f02bfc91d4aeffb8f9a7)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250420-2.0.0 ([#3070](https://github.com/googleapis/java-storage/issues/3070)) ([1ef50f2](https://github.com/googleapis/java-storage/commit/1ef50f2936e62602db689cffbe166c9494ac58f6)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.2 ([#3061](https://github.com/googleapis/java-storage/issues/3061)) ([cb43a6c](https://github.com/googleapis/java-storage/commit/cb43a6c368367e15dfd7c2205689df04547d1a9c)) +* Update googleapis/sdk-platform-java action to v2.56.2 ([#3055](https://github.com/googleapis/java-storage/issues/3055)) ([7025ad7](https://github.com/googleapis/java-storage/commit/7025ad7db438741ab6d74d5e3f768d0a2bd4da5c)) + +## [2.51.0](https://github.com/googleapis/java-storage/compare/v2.50.0...v2.51.0) (2025-04-23) + +> [!IMPORTANT] +> This release has a direct memory leak when using gRPC transport for downloads. +> +> Please update to [2.52.1](https://github.com/googleapis/java-storage/releases/tag/v2.52.1) or later which contains the fix. + +### Features + +* Add @BetaApi Storage#blobAppendableUpload for gRPC Transport ([#3020](https://github.com/googleapis/java-storage/pull/3020)) ([62b6248](https://github.com/googleapis/java-storage/commit/62b62482c10d79c9f7d1b7c6cedd9e7d422a58ad)) +* Add @BetaApi Storage#blobReadSession for gRPC Transport ([#3020](https://github.com/googleapis/java-storage/pull/3020)) ([62b6248](https://github.com/googleapis/java-storage/commit/62b62482c10d79c9f7d1b7c6cedd9e7d422a58ad)) +* Implement improved retry context information ([#3020](https://github.com/googleapis/java-storage/pull/3020)) ([62b6248](https://github.com/googleapis/java-storage/commit/62b62482c10d79c9f7d1b7c6cedd9e7d422a58ad)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.56.0 ([8f9f5ec](https://github.com/googleapis/java-storage/commit/8f9f5ec4506bde58fbf2351c99f0d67cdcfcd88e)) +* Ensure object generation is sent for Storage#update(BlobInfo) using HTTP Transport ([#3006](https://github.com/googleapis/java-storage/issues/3006)) ([2a3e0e7](https://github.com/googleapis/java-storage/commit/2a3e0e7453c5e3e45bc06eec1ba6d2bc193143e6)), closes [#2980](https://github.com/googleapis/java-storage/issues/2980) +* Update 416 handling for ReadChannel ([#3018](https://github.com/googleapis/java-storage/issues/3018)) ([4a9c3e4](https://github.com/googleapis/java-storage/commit/4a9c3e46e8d4fa64813869cadf247cf77f1844d5)) +* Update gRPC Bidi resumable upload to have more robust error message generation ([#2998](https://github.com/googleapis/java-storage/issues/2998)) ([79b5d85](https://github.com/googleapis/java-storage/commit/79b5d8559b2e655178db2ba75116ddba5a581a7b)) +* Update gRPC implementation for storage.buckets.get to translate NOT_FOUND to null ([#3005](https://github.com/googleapis/java-storage/issues/3005)) ([704af65](https://github.com/googleapis/java-storage/commit/704af65b25fe38d146b960775a69644cd80f2e78)) + + +### Dependencies + +* Remove explicit version declarations for packages that are in shared-dependencies ([#3014](https://github.com/googleapis/java-storage/issues/3014)) ([61cdb30](https://github.com/googleapis/java-storage/commit/61cdb30f250d2fdaaf79e0d060eb573197c7a90e)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250312-2.0.0 ([#3000](https://github.com/googleapis/java-storage/issues/3000)) ([78fc076](https://github.com/googleapis/java-storage/commit/78fc0763c89fb0e603d75b20c9c67eabc2b9f729)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.34.0 ([#2938](https://github.com/googleapis/java-storage/issues/2938)) ([ff6f696](https://github.com/googleapis/java-storage/commit/ff6f696e8c4a539b5e6755fbd550096ee4688ecc)) +* Update sdk-platform-java dependencies ([#3046](https://github.com/googleapis/java-storage/issues/3046)) ([861f958](https://github.com/googleapis/java-storage/commit/861f9586e041f65061fb3da7f88955c4214d450c)) +* Update sdk-platform-java dependencies ([#3053](https://github.com/googleapis/java-storage/issues/3053)) ([921d1ba](https://github.com/googleapis/java-storage/commit/921d1ba0a547242c70cbb7dfb2cb190fa761398f)) + +## [2.50.0](https://github.com/googleapis/java-storage/compare/v2.49.0...v2.50.0) (2025-03-14) + + +### Features + +* Next release from main branch is 2.50.0 ([#2968](https://github.com/googleapis/java-storage/issues/2968)) ([4a69fcc](https://github.com/googleapis/java-storage/commit/4a69fcc16787904675e5ba15f42bcab67553a7aa)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.54.0 ([22e7e3d](https://github.com/googleapis/java-storage/commit/22e7e3d4e8c56184f9c91b1e0bf52d5c4aa4cf9f)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.55.1 ([81c8c61](https://github.com/googleapis/java-storage/commit/81c8c611ab51a706d63670dff86db5bfd04ef544)) +* Improve 503 handling for json resumable uploads ([#2987](https://github.com/googleapis/java-storage/issues/2987)) ([9bc2b14](https://github.com/googleapis/java-storage/commit/9bc2b14a0058cded0321e5afa3ea3fc59bf3421b)) +* Update usages of String.format to explicitly pass Locale.US ([#2974](https://github.com/googleapis/java-storage/issues/2974)) ([8bcb2de](https://github.com/googleapis/java-storage/commit/8bcb2de22ea39cfc0b4dda07daba78ea192b5e98)), closes [#2972](https://github.com/googleapis/java-storage/issues/2972) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250224-2.0.0 ([#2969](https://github.com/googleapis/java-storage/issues/2969)) ([80a40c4](https://github.com/googleapis/java-storage/commit/80a40c41527a5fb83a41ac4754d5be22f5cd5c2b)) +* Update googleapis/sdk-platform-java action to v2.55.1 ([#2985](https://github.com/googleapis/java-storage/issues/2985)) ([e22a2de](https://github.com/googleapis/java-storage/commit/e22a2de0fe1dc66e6e5b6d311f4812c290177203)) +* Update sdk-platform-java dependencies ([#2983](https://github.com/googleapis/java-storage/issues/2983)) ([9eeb82a](https://github.com/googleapis/java-storage/commit/9eeb82af81ad8095c21542808a8eaa5098dee074)) +* Update sdk-platform-java dependencies ([#2986](https://github.com/googleapis/java-storage/issues/2986)) ([10b922a](https://github.com/googleapis/java-storage/commit/10b922a8ae831fcd0107abf54091566442ae5fde)) + +## [2.49.0](https://github.com/googleapis/java-storage/compare/v2.48.2...v2.49.0) (2025-02-26) + + +### Features + +* Add new Options to allow per method header values ([#2941](https://github.com/googleapis/java-storage/issues/2941)) ([297802d](https://github.com/googleapis/java-storage/commit/297802d1715e3289dd720fba851c563004b8c5f2)) +* **transfer-manager:** Add ParallelUploadConfig.Builder#setUploadBlobInfoFactory ([#2936](https://github.com/googleapis/java-storage/issues/2936)) ([86e9ae8](https://github.com/googleapis/java-storage/commit/86e9ae80772aa202d0b6563b8dd37722d8b5e0e0)), closes [#2638](https://github.com/googleapis/java-storage/issues/2638) + + +### Bug Fixes + +* Categorize a WatchdogTimeoutException as retriable for grpc ReadObject ([#2954](https://github.com/googleapis/java-storage/issues/2954)) ([b53bd53](https://github.com/googleapis/java-storage/commit/b53bd53c26984b3e850355ced608b511688b74d1)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.53.0 ([9946d6b](https://github.com/googleapis/java-storage/commit/9946d6bdc7ec8398bf1bd1df63f272df1351539e)) +* Update grpc based Storage to defer project id validation ([#2930](https://github.com/googleapis/java-storage/issues/2930)) ([cc03784](https://github.com/googleapis/java-storage/commit/cc037848be7d21cb827c97d7f71618f1bfae941d)) +* Update kms key handling when opening a resumable upload to clear the value in the json to be null rather than empty string ([#2939](https://github.com/googleapis/java-storage/issues/2939)) ([43553de](https://github.com/googleapis/java-storage/commit/43553dedce33093e751143fadb372024d975706c)) + + +### Dependencies + +* Update sdk-platform-java dependencies ([#2957](https://github.com/googleapis/java-storage/issues/2957)) ([40cfda6](https://github.com/googleapis/java-storage/commit/40cfda62d4bff72a857d0269eaa7c0225f216650)) + + +### Documentation + +* Add note about HNS support to moveBlob ([#2929](https://github.com/googleapis/java-storage/issues/2929)) ([c461546](https://github.com/googleapis/java-storage/commit/c461546bfd016b21eb99f8e25604ee3fd001c9c6)) + +## [2.48.2](https://github.com/googleapis/java-storage/compare/v2.48.1...v2.48.2) (2025-02-11) + + +### Dependencies + +* Update sdk-platform-java dependencies ([#2921](https://github.com/googleapis/java-storage/issues/2921)) ([fa9b0a8](https://github.com/googleapis/java-storage/commit/fa9b0a8d6e2b185c0588d824329b409718bef302)) + + +### Documentation + +* Update storage_copy_file to include MegabytesCopiedPerChunk ([#2910](https://github.com/googleapis/java-storage/issues/2910)) ([971ca5d](https://github.com/googleapis/java-storage/commit/971ca5d107967960e6db6ca76a55e44b9aefc605)) + +## [2.48.1](https://github.com/googleapis/java-storage/compare/v2.48.0...v2.48.1) (2025-02-03) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.52.0 ([00754bc](https://github.com/googleapis/java-storage/commit/00754bc0b30da4ebc13e75f55525d9bd885b0572)) +* Update batch handling to ensure each operation has its own unique idempotency-token ([#2905](https://github.com/googleapis/java-storage/issues/2905)) ([8d79b8d](https://github.com/googleapis/java-storage/commit/8d79b8d9cea30c6bba0d2550fa397b8c8b7acc3c)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.42.0 ([#2895](https://github.com/googleapis/java-storage/issues/2895)) ([145afb0](https://github.com/googleapis/java-storage/commit/145afb0d3a783fe73a388a7be174b598a195a2c6)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#2904](https://github.com/googleapis/java-storage/issues/2904)) ([2a5242e](https://github.com/googleapis/java-storage/commit/2a5242e580185a5952181f922c4eca25790bcec6)) + + +### Documentation + +* Create OpenTelemetry Quickstart Sample ([#2861](https://github.com/googleapis/java-storage/issues/2861)) ([31df9b7](https://github.com/googleapis/java-storage/commit/31df9b7a6350714ff354934ccbd27c5dd68762f5)) + +## [2.48.0](https://github.com/googleapis/java-storage/compare/v2.47.0...v2.48.0) (2025-01-27) + + +### Features + +* Add new Storage#moveBlob method to atomically rename an object ([#2882](https://github.com/googleapis/java-storage/issues/2882)) ([c49fd08](https://github.com/googleapis/java-storage/commit/c49fd08582c7235919270c1dd4eb2ece6933d302)) +* Next release from main branch is 2.48.0 ([#2885](https://github.com/googleapis/java-storage/issues/2885)) ([34e5903](https://github.com/googleapis/java-storage/commit/34e5903df9b7221ba9ebabc07046d7b819f9f209)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.1 ([09ed029](https://github.com/googleapis/java-storage/commit/09ed02995f4688db69c0e0db1c3d72cadce0c395)) +* Update Signed URL default scheme to resolve from storage options host ([#2880](https://github.com/googleapis/java-storage/issues/2880)) ([7ae7e39](https://github.com/googleapis/java-storage/commit/7ae7e3998930c1bec72ff7c06ebc2b66343852ca)), closes [#2870](https://github.com/googleapis/java-storage/issues/2870) +* Update StorageException translation of an ApiException to include error details ([#2872](https://github.com/googleapis/java-storage/issues/2872)) ([8ad5010](https://github.com/googleapis/java-storage/commit/8ad501012fab0dfd8d0f0dce49d7c681540022a9)) + + +### Dependencies + +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#2873](https://github.com/googleapis/java-storage/issues/2873)) ([39509d5](https://github.com/googleapis/java-storage/commit/39509d53ee283694526efbec58daa24c3d8ca080)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.52.0 ([#2883](https://github.com/googleapis/java-storage/issues/2883)) ([a64a3d5](https://github.com/googleapis/java-storage/commit/a64a3d58410dd2e9db20a36cd7169037f71eaeeb)) + +## [2.47.0](https://github.com/googleapis/java-storage/compare/v2.46.0...v2.47.0) (2025-01-08) + + +### Features + +* Add MoveObject RPC ([34b8ac4](https://github.com/googleapis/java-storage/commit/34b8ac4239bab67b53c73050d2341615254a3ae0)) +* Introductory beta level support for OpenTelemetry tracing on c.g.c.storage.Storage methods ([#2837](https://github.com/googleapis/java-storage/issues/2837)) ([dd889ea](https://github.com/googleapis/java-storage/commit/dd889ea0d0a57490ef106ab92ba557f26d414406)) + + +### Bug Fixes + +* De-beta storage-v2 artifacts ([#2852](https://github.com/googleapis/java-storage/issues/2852)) ([77a2e8a](https://github.com/googleapis/java-storage/commit/77a2e8af341528a4ff3c34a880a7983f828b8cfd)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.0 ([34b8ac4](https://github.com/googleapis/java-storage/commit/34b8ac4239bab67b53c73050d2341615254a3ae0)) +* Fix interrupt spiral in grpc ReadObject drainQueue ([#2850](https://github.com/googleapis/java-storage/issues/2850)) ([c1dac83](https://github.com/googleapis/java-storage/commit/c1dac837387ffc40f00344c8fb0e86e09d009358)) +* Update request handling of gRPC based CopyWriter ([#2858](https://github.com/googleapis/java-storage/issues/2858)) ([093cb87](https://github.com/googleapis/java-storage/commit/093cb8759d5cfaafa6fd9df43de1bb91c1285f35)) + + +### Dependencies + +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.51.0 ([#2860](https://github.com/googleapis/java-storage/issues/2860)) ([980ac4e](https://github.com/googleapis/java-storage/commit/980ac4ebe09636d0de255e89f747bde8cc9ce041)) +* Update googleapis/sdk-platform-java action to v2.51.1 ([#2864](https://github.com/googleapis/java-storage/issues/2864)) ([b731c06](https://github.com/googleapis/java-storage/commit/b731c06b11186e9695fb89f32abf35988d558bc9)) +* Update sdk-platform-java dependencies ([#2866](https://github.com/googleapis/java-storage/issues/2866)) ([562df7f](https://github.com/googleapis/java-storage/commit/562df7f087b34db0f3d49c6e0b87643a606ef9c6)) + +## [2.46.0](https://github.com/googleapis/java-storage/compare/v2.45.0...v2.46.0) (2024-12-13) + + +### Features + +* Introduce `java.time` methods and variables ([#2826](https://github.com/googleapis/java-storage/issues/2826)) ([baf30ee](https://github.com/googleapis/java-storage/commit/baf30ee91febbcda7d0f64b0083b789c4384a3c0)) + + +### Bug Fixes + +* Update retry lifecycle when attempting to decompress a gzip object ([#2840](https://github.com/googleapis/java-storage/issues/2840)) ([7dba13c](https://github.com/googleapis/java-storage/commit/7dba13cbbfd38d3c2147b25264934f33c5dc78e3)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20241113-2.0.0 ([#2823](https://github.com/googleapis/java-storage/issues/2823)) ([503e518](https://github.com/googleapis/java-storage/commit/503e518550b67870c3e856957fdadbc012eff1ed)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20241206-2.0.0 ([#2839](https://github.com/googleapis/java-storage/issues/2839)) ([8f3cdd3](https://github.com/googleapis/java-storage/commit/8f3cdd39dbbbbd63c560c3cb44d1032c0bd41749)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.50.0 ([#2825](https://github.com/googleapis/java-storage/issues/2825)) ([9aa68a6](https://github.com/googleapis/java-storage/commit/9aa68a67d931b19175e23f883a109f22b99411ca)) +* Update sdk-platform-java dependencies ([#2841](https://github.com/googleapis/java-storage/issues/2841)) ([2a70481](https://github.com/googleapis/java-storage/commit/2a704815ee60b8ec7ff2c1af557fee4366ea2e19)) + + +### Documentation + +* Add samples for soft delete (objects) ([#2754](https://github.com/googleapis/java-storage/issues/2754)) ([41bc807](https://github.com/googleapis/java-storage/commit/41bc807e94f1c0eebd41b4a7baa12301858cc16f)) + +## [2.45.0](https://github.com/googleapis/java-storage/compare/v2.44.1...v2.45.0) (2024-11-18) + + +### Features + +* Adds support for restore token ([aef367d](https://github.com/googleapis/java-storage/commit/aef367dac146147bfbbb46b64aa91a7aebe05b6c)) +* Adds support for restore token ([#2768](https://github.com/googleapis/java-storage/issues/2768)) ([0394354](https://github.com/googleapis/java-storage/commit/0394354c81b35b66d46d84b9afb090d891ee6a12)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.49.0 ([aef367d](https://github.com/googleapis/java-storage/commit/aef367dac146147bfbbb46b64aa91a7aebe05b6c)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.50.0 ([281cccb](https://github.com/googleapis/java-storage/commit/281cccbafd2ef17c56ab4095e1f14aab2d11427e)) +* Set default values for monitored resource ([#2809](https://github.com/googleapis/java-storage/issues/2809)) ([27829a4](https://github.com/googleapis/java-storage/commit/27829a48813a2cd89d4ddf78cf8f925b64388cd8)) + + +### Dependencies + +* Update sdk-platform-java dependencies ([#2817](https://github.com/googleapis/java-storage/issues/2817)) ([9e961c4](https://github.com/googleapis/java-storage/commit/9e961c4a5058234f04bf71eb45185f5641c0b9a4)) + +## [2.44.1](https://github.com/googleapis/java-storage/compare/v2.44.0...v2.44.1) (2024-10-25) + + +### Dependencies + +* Update sdk-platform-java dependencies ([#2795](https://github.com/googleapis/java-storage/issues/2795)) ([aeb86e5](https://github.com/googleapis/java-storage/commit/aeb86e5467269d55f9019638fe8b05331b423e55)) + +## [2.44.0](https://github.com/googleapis/java-storage/compare/v2.43.2...v2.44.0) (2024-10-23) + + +### Features + +* Promote gRPC transport to GA ([#2766](https://github.com/googleapis/java-storage/issues/2766)) ([41fd72a](https://github.com/googleapis/java-storage/commit/41fd72a57e3577217e98f7a179d8958c14c5676a)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.47.0 ([c517798](https://github.com/googleapis/java-storage/commit/c5177983a48b1bf26e8abb5d4d27355bfcd62d3b)) +* Fix createFrom resumable upload retry offset calculation ([#2771](https://github.com/googleapis/java-storage/issues/2771)) ([1126cdc](https://github.com/googleapis/java-storage/commit/1126cdcde07fed3c57d99dfa2b2505b06d5cd25a)), closes [#2770](https://github.com/googleapis/java-storage/issues/2770) +* Update gRPC ReadObject retry to avoid double retry ([#2765](https://github.com/googleapis/java-storage/issues/2765)) ([1fc57b9](https://github.com/googleapis/java-storage/commit/1fc57b9b4fe2ec2d08333fb62be66f272294deca)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20241008-2.0.0 ([#2776](https://github.com/googleapis/java-storage/issues/2776)) ([0545b5e](https://github.com/googleapis/java-storage/commit/0545b5e7d7e292da38fb42a9160fb1938628684b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.38.0 ([#2787](https://github.com/googleapis/java-storage/issues/2787)) ([a470e88](https://github.com/googleapis/java-storage/commit/a470e880f348a4950102546c7ee844761e1c5b57)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.48.0 ([#2781](https://github.com/googleapis/java-storage/issues/2781)) ([8fa013e](https://github.com/googleapis/java-storage/commit/8fa013e09e3c02c6deeb6d49911c051b940ef79c)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.49.0 ([#2782](https://github.com/googleapis/java-storage/issues/2782)) ([a7baffb](https://github.com/googleapis/java-storage/commit/a7baffb9579f865a20fe42d189f8ba9d8b4ee716)) +* Update googleapis/sdk-platform-java action to v2.48.0 ([#2786](https://github.com/googleapis/java-storage/issues/2786)) ([2893e61](https://github.com/googleapis/java-storage/commit/2893e617b865717959da34a47d2a972ad90cde72)) + +## [2.43.2](https://github.com/googleapis/java-storage/compare/v2.43.1...v2.43.2) (2024-10-08) + + +### Bug Fixes + +* Plumb list blobs match glob option for grpc transport ([#2759](https://github.com/googleapis/java-storage/issues/2759)) ([207abd1](https://github.com/googleapis/java-storage/commit/207abd110ac2e1854804332025e6ea7806df1785)) + + +### Dependencies + +* Update dependency com.google.api:gapic-generator-java to v2.47.0 ([#2750](https://github.com/googleapis/java-storage/issues/2750)) ([9041f24](https://github.com/googleapis/java-storage/commit/9041f242c185a8a96361a087d42c5be7ba06a583)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240924-2.0.0 ([#2719](https://github.com/googleapis/java-storage/issues/2719)) ([7b19831](https://github.com/googleapis/java-storage/commit/7b19831b48153bb7f6744f5d7b8559c244d483d7)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.37.0 ([#2751](https://github.com/googleapis/java-storage/issues/2751)) ([003d6fa](https://github.com/googleapis/java-storage/commit/003d6faaa3d64310cf91a1e304247e2f44a5c9c7)) +* Update googleapis/sdk-platform-java action to v2.47.0 ([#2749](https://github.com/googleapis/java-storage/issues/2749)) ([befa415](https://github.com/googleapis/java-storage/commit/befa415d69743faf079930315c5ebd8afaa52d7f)) + +## [2.43.1](https://github.com/googleapis/java-storage/compare/v2.43.0...v2.43.1) (2024-09-26) + + +### Bug Fixes + +* Add managed folder to testIamPermissions method ([556dd95](https://github.com/googleapis/java-storage/commit/556dd95c2cf7180cb14c12a2f7ecc288c0c49ea9)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.46.0 ([556dd95](https://github.com/googleapis/java-storage/commit/556dd95c2cf7180cb14c12a2f7ecc288c0c49ea9)) +* Remove server unimplemented GrpcStorageImpl#{get,list,create,delete,Update}HmacKey ([#2717](https://github.com/googleapis/java-storage/issues/2717)) ([06f7292](https://github.com/googleapis/java-storage/commit/06f7292036c39ca7f10119a1b906c604a0c34344)) +* Remove server unimplemented GrpcStorageImpl#{get,list,create,delete}Notification ([#2710](https://github.com/googleapis/java-storage/issues/2710)) ([310c9b2](https://github.com/googleapis/java-storage/commit/310c9b285024573a7e58dfb07f41b482a8a3372f)) +* Remove server unimplemented GrpcStorageImpl#getServiceAccount ([#2718](https://github.com/googleapis/java-storage/issues/2718)) ([51076a8](https://github.com/googleapis/java-storage/commit/51076a87eae57ec6763bdbfa20dcc390b688fc82)) +* Update grpc based ReadObject rpcs to remove race condition between cancellation and message handling ([#2708](https://github.com/googleapis/java-storage/issues/2708)) ([2c7f088](https://github.com/googleapis/java-storage/commit/2c7f08868df8adba623178d679a100f19d10f070)) +* Update grpc upload error diagnostics to be tolerant of receiving an error if no request has been sent ([#2732](https://github.com/googleapis/java-storage/issues/2732)) ([fff72d5](https://github.com/googleapis/java-storage/commit/fff72d5cfeca9ad3348e7c9b72e2740024dcbbb6)) +* Update GrpcStorageOptions to attempt direct path by default ([#2715](https://github.com/googleapis/java-storage/issues/2715)) ([9de9a92](https://github.com/googleapis/java-storage/commit/9de9a92be05f36bbe6afabeffcc597470a92b5a3)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.0 ([#2721](https://github.com/googleapis/java-storage/issues/2721)) ([11f09fe](https://github.com/googleapis/java-storage/commit/11f09febb76a50fc0e19ec096c9bbad485764c8b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.1 ([#2738](https://github.com/googleapis/java-storage/issues/2738)) ([eb320e1](https://github.com/googleapis/java-storage/commit/eb320e104bca9e5a7c192d58147045bf7201ffc7)) +* Update googleapis/sdk-platform-java action to v2.46.1 ([#2736](https://github.com/googleapis/java-storage/issues/2736)) ([795f2c3](https://github.com/googleapis/java-storage/commit/795f2c34c1bc389179c2ca2bafba0803fff5e38e)) + +## [2.43.0](https://github.com/googleapis/java-storage/compare/v2.42.0...v2.43.0) (2024-09-13) + + +### Features + +* Allow specifying an expected object size for resumable operations. ([#2661](https://github.com/googleapis/java-storage/issues/2661)) ([3405611](https://github.com/googleapis/java-storage/commit/3405611f20153246691910aa33fed800ab989669)), closes [#2511](https://github.com/googleapis/java-storage/issues/2511) + + +### Bug Fixes + +* Close pending zero-copy responses when Storage#close is called ([#2696](https://github.com/googleapis/java-storage/issues/2696)) ([1855308](https://github.com/googleapis/java-storage/commit/185530823a7d2378add1f95589f191326c9ae173)) +* Github workflow vulnerable to script injection ([#2663](https://github.com/googleapis/java-storage/issues/2663)) ([9151ac2](https://github.com/googleapis/java-storage/commit/9151ac27638e4491628d5bbb51643abc6bcd5f54)) +* Make ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier#cachedPool a singleton ([#2691](https://github.com/googleapis/java-storage/issues/2691)) ([1494809](https://github.com/googleapis/java-storage/commit/1494809af5624d7076b70087da8f81f31c6f61e7)) + + +### Dependencies + +* Promote storage-v2 artifacts to beta ([9d22597](https://github.com/googleapis/java-storage/commit/9d225978639cdf49601759d6f7c65a420be71c7a)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240819-2.0.0 ([#2665](https://github.com/googleapis/java-storage/issues/2665)) ([3df1000](https://github.com/googleapis/java-storage/commit/3df1000f137d54ef42c9b25e90a301c512644e2b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.35.0 ([#2698](https://github.com/googleapis/java-storage/issues/2698)) ([1dd51c3](https://github.com/googleapis/java-storage/commit/1dd51c32948cae2cadbc6e3be1b23ab0bd6063a3)) + +## [2.42.0](https://github.com/googleapis/java-storage/compare/v2.41.0...v2.42.0) (2024-08-19) + + +### Features + +* Enable grpc.lb.locality label for client-side metrics ([#2659](https://github.com/googleapis/java-storage/issues/2659)) ([b681ee0](https://github.com/googleapis/java-storage/commit/b681ee0d7c535db3dc4ede0c2b67bf2306aaf9f0)) + + +### Bug Fixes + +* Update modified field handling for blob and bucket with json transport to properly clear fields ([#2664](https://github.com/googleapis/java-storage/issues/2664)) ([e2f5537](https://github.com/googleapis/java-storage/commit/e2f553788eb3f3685056728de75c358893887604)), closes [#2662](https://github.com/googleapis/java-storage/issues/2662) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.34.0 ([#2673](https://github.com/googleapis/java-storage/issues/2673)) ([453c29a](https://github.com/googleapis/java-storage/commit/453c29abb8e970a1a639a39af3c0e3e8516fdda5)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.45.0 ([#2667](https://github.com/googleapis/java-storage/issues/2667)) ([1f66dff](https://github.com/googleapis/java-storage/commit/1f66dfff67dcab49596f25aa43651c7f098f12ad)) + +## [2.41.0](https://github.com/googleapis/java-storage/compare/v2.40.1...v2.41.0) (2024-07-31) + + +### Features + +* Enable gRPC client open telemetry metrics reporting ([#2590](https://github.com/googleapis/java-storage/issues/2590)) ([d153228](https://github.com/googleapis/java-storage/commit/d153228a301007b5952de9722f370dda0784473a)) + + +### Bug Fixes + +* Add UnknownHostException to set of retriable exception ([#2651](https://github.com/googleapis/java-storage/issues/2651)) ([18de9fc](https://github.com/googleapis/java-storage/commit/18de9fcdb831132336eca4112dfe0515174bba7b)) +* Update grpc resumable upload error categorization to be more tolerant ([#2644](https://github.com/googleapis/java-storage/issues/2644)) ([95697dd](https://github.com/googleapis/java-storage/commit/95697dd3d744351058c13793c6ae576820f6b638)) +* Update Storage#readAllBytes to respect shouldReturnRawInputStream option ([#2635](https://github.com/googleapis/java-storage/issues/2635)) ([dc883cc](https://github.com/googleapis/java-storage/commit/dc883cce5f547def7cfb34c4f8a2d409493e4cb9)) +* Update TransferManager downloads to reduce in memory buffering ([#2630](https://github.com/googleapis/java-storage/issues/2630)) ([fc2fd75](https://github.com/googleapis/java-storage/commit/fc2fd750ed60b840e6285a4b1f4ecce739df4c09)) +* Use fast calculation for totalRemaining number of bytes from multiple ByteBuffers ([#2633](https://github.com/googleapis/java-storage/issues/2633)) ([758b3dd](https://github.com/googleapis/java-storage/commit/758b3dd3cc4f6dfc2dfc12c3a77472d97c31c5d5)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240625-2.0.0 ([#2616](https://github.com/googleapis/java-storage/issues/2616)) ([b22babb](https://github.com/googleapis/java-storage/commit/b22babbe26572d8c4289a65a0b125b2a60e8ef79)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240706-2.0.0 ([#2634](https://github.com/googleapis/java-storage/issues/2634)) ([1ccaa0c](https://github.com/googleapis/java-storage/commit/1ccaa0c64887a0661438957e9427237ee005ccf1)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.33.0 ([#2647](https://github.com/googleapis/java-storage/issues/2647)) ([8196259](https://github.com/googleapis/java-storage/commit/8196259927330ecfe3e604c24d248f7935e7fe0d)) +* Update dependency net.jqwik:jqwik to v1.9.0 ([#2608](https://github.com/googleapis/java-storage/issues/2608)) ([a20eb66](https://github.com/googleapis/java-storage/commit/a20eb660ddfa4b68d79ce04496064f3025676d5a)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.3 ([#2604](https://github.com/googleapis/java-storage/issues/2604)) ([8c79f39](https://github.com/googleapis/java-storage/commit/8c79f39ad78d100065c189bcf8e18644b29ff9ed)) +* Update junit-platform.version to v5.10.3 ([#2605](https://github.com/googleapis/java-storage/issues/2605)) ([a532ee4](https://github.com/googleapis/java-storage/commit/a532ee49e2ff5972ea8a2aabbab2dcf6fe0df774)) + +## [2.40.1](https://github.com/googleapis/java-storage/compare/v2.40.0...v2.40.1) (2024-06-26) + + +### Bug Fixes + +* Add a workaround to make sure grpc clients' hosts always match their universe domain ([#2588](https://github.com/googleapis/java-storage/issues/2588)) ([87bf737](https://github.com/googleapis/java-storage/commit/87bf7371b6c4300b0f306ca36d1918d52adf721b)) +* Include x-goog-user-project on resumable upload puts for grpc transport ([#2586](https://github.com/googleapis/java-storage/issues/2586)) ([6f2f504](https://github.com/googleapis/java-storage/commit/6f2f5045bb7c1dabdd9b1c19ce7d2b02163c0eb8)) +* Update grpc bidi resumable uploads to validate ack'd object size ([#2570](https://github.com/googleapis/java-storage/issues/2570)) ([5c9cecf](https://github.com/googleapis/java-storage/commit/5c9cecf04ceb3858d58b4e2e487ffd1dddf933ab)) +* Update grpc finalize on close resumable uploads to validate ack'd object size ([#2572](https://github.com/googleapis/java-storage/issues/2572)) ([55a6d15](https://github.com/googleapis/java-storage/commit/55a6d155e4c6a4c33f22ec87ff0b9f6ccfab7a83)) +* Update grpc single-shot uploads to validate ack'd object size ([#2567](https://github.com/googleapis/java-storage/issues/2567)) ([65c8808](https://github.com/googleapis/java-storage/commit/65c8808da9094365171f165dcf0654f56cf51207)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240524-2.0.0 ([#2565](https://github.com/googleapis/java-storage/issues/2565)) ([d193243](https://github.com/googleapis/java-storage/commit/d193243e1bcc41d09d46f9aa521ed5dd1b374b52)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240621-2.0.0 ([#2596](https://github.com/googleapis/java-storage/issues/2596)) ([73b8753](https://github.com/googleapis/java-storage/commit/73b8753c244ca8ac9605c1430251b6aebaf82905)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.32.0 ([#2597](https://github.com/googleapis/java-storage/issues/2597)) ([25940a4](https://github.com/googleapis/java-storage/commit/25940a4e8c8f178dcfb35ef7c77748650d7b1639)) + + +### Documentation + +* Add Hierarchical Namespace Bucket and Folders samples ([#2583](https://github.com/googleapis/java-storage/issues/2583)) ([3030081](https://github.com/googleapis/java-storage/commit/30300815f2faeef5780877dd74e2f8381b4a8caa)), closes [#2569](https://github.com/googleapis/java-storage/issues/2569) +* Remove allowlist note from Folders RPCs ([#2593](https://github.com/googleapis/java-storage/issues/2593)) ([82161de](https://github.com/googleapis/java-storage/commit/82161dedfb1962f39f5186ac6d8443046d6b1e88)) +* Update DeleteObject Sample to be clearer on object versioning behavior ([#2595](https://github.com/googleapis/java-storage/issues/2595)) ([79b7cf0](https://github.com/googleapis/java-storage/commit/79b7cf05326ea135c552cbeee1b97e7ff115189f)) + +## [2.40.0](https://github.com/googleapis/java-storage/compare/v2.39.0...v2.40.0) (2024-06-06) + + +### Features + +* Promote google-cloud-storage-control to GA ([#2575](https://github.com/googleapis/java-storage/issues/2575)) ([129f188](https://github.com/googleapis/java-storage/commit/129f188a9877b3bd71db3b00ba3d8d6e4095fb67)) + + +### Bug Fixes + +* Reduce Java 21 Virtual Thread Pinning in IO operations ([#2553](https://github.com/googleapis/java-storage/issues/2553)) ([498fd0b](https://github.com/googleapis/java-storage/commit/498fd0b391c9d1d01c453fe31abea7890c0b498f)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#2571](https://github.com/googleapis/java-storage/issues/2571)) ([67ce3d6](https://github.com/googleapis/java-storage/commit/67ce3d6505e1e48f496ab52884f5f5c2852aa445)) +* Update dependency net.jqwik:jqwik to v1.8.5 ([#2563](https://github.com/googleapis/java-storage/issues/2563)) ([88f7d86](https://github.com/googleapis/java-storage/commit/88f7d86d960aa6986cf41165d80d03a551502ec2)) + + +### Documentation + +* Managed Folders samples ([#2562](https://github.com/googleapis/java-storage/issues/2562)) ([5ffc1f2](https://github.com/googleapis/java-storage/commit/5ffc1f2fa75ac2e1c0022301e02b6ea6627faf03)) +* Update javadoc for createFrom ([#2522](https://github.com/googleapis/java-storage/issues/2522)) ([dc31e95](https://github.com/googleapis/java-storage/commit/dc31e95158e66d0eb4c85122edd29431a610a204)) + +## [2.39.0](https://github.com/googleapis/java-storage/compare/v2.38.0...v2.39.0) (2024-05-22) + + +### Features + +* Plumb PartNamingStrategy for Parallel Composite Uploads in Transfer Manager ([#2547](https://github.com/googleapis/java-storage/issues/2547)) ([79d721d](https://github.com/googleapis/java-storage/commit/79d721dc44d1c5f4b2df3697f62b958a3b9b999c)) + + +### Bug Fixes + +* Update GapicUnbufferedChunkedResumableWritableByteChannel to be tolerant of non-quantum writes ([#2537](https://github.com/googleapis/java-storage/issues/2537)) ([1701fde](https://github.com/googleapis/java-storage/commit/1701fde04a80aa8ce97c5e762158406024456782)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.1 ([#2550](https://github.com/googleapis/java-storage/issues/2550)) ([e9807ec](https://github.com/googleapis/java-storage/commit/e9807ecda46615c8d1108bcea7a25ecba8154b6c)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.2 ([#2552](https://github.com/googleapis/java-storage/issues/2552)) ([a207829](https://github.com/googleapis/java-storage/commit/a207829040c176909141de100e4273f5aa32e78f)) + +## [2.38.0](https://github.com/googleapis/java-storage/compare/v2.37.0...v2.38.0) (2024-05-09) + + +### Features + +* Promoted google-cloud-storage-control to beta ([#2531](https://github.com/googleapis/java-storage/issues/2531)) ([09f7191](https://github.com/googleapis/java-storage/commit/09f719194fd9296ecb21758096b8e65eb208dfc2)) + + +### Bug Fixes + +* Add strict client side response validation for gRPC chunked resumable uploads ([#2527](https://github.com/googleapis/java-storage/issues/2527)) ([c1d1f4a](https://github.com/googleapis/java-storage/commit/c1d1f4a5c88d27296f69df0a832659e2b1eb9ca0)) +* An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([#2524](https://github.com/googleapis/java-storage/issues/2524)) ([7d7f526](https://github.com/googleapis/java-storage/commit/7d7f5263f7e7f5357c5970c68c4d6ff117fc9e93)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.39.0 ([#2501](https://github.com/googleapis/java-storage/issues/2501)) ([518d4be](https://github.com/googleapis/java-storage/commit/518d4beefdc3738fd19168baccd4016dd002d4fa)) +* ParallelCompositeUpload in Transfer Manager hangs when encountering OOM ([#2526](https://github.com/googleapis/java-storage/issues/2526)) ([67a7c6b](https://github.com/googleapis/java-storage/commit/67a7c6b6582b5afab9272b32e01897d009e7c0bf)) +* Update grpc WriteObject response handling to provide context when a failure happens ([#2532](https://github.com/googleapis/java-storage/issues/2532)) ([170a3f5](https://github.com/googleapis/java-storage/commit/170a3f54c8ccb6af108c35992e652c07ebb1109c)) +* Update GzipReadableByteChannel to be tolerant of one byte reads ([#2512](https://github.com/googleapis/java-storage/issues/2512)) ([87b63f4](https://github.com/googleapis/java-storage/commit/87b63f4995f7b9654f40a9585996ec4a73235858)) +* Update StorageOptions to carry forward fields that aren't part of ServiceOptions ([#2521](https://github.com/googleapis/java-storage/issues/2521)) ([b84654e](https://github.com/googleapis/java-storage/commit/b84654e79ee53087badc0315ea3fb2d1b0b9ca57)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.0 ([#2523](https://github.com/googleapis/java-storage/issues/2523)) ([3e573f7](https://github.com/googleapis/java-storage/commit/3e573f79b3935e423e200537549507ab1f252ada)) +* Update dependency info.picocli:picocli to v4.7.6 ([#2535](https://github.com/googleapis/java-storage/issues/2535)) ([f26888a](https://github.com/googleapis/java-storage/commit/f26888ab1467d6bbcad9bfced9360daa4abd2824)) + + +### Documentation + +* Add in Transfer Manager chunked upload/download samples ([#2518](https://github.com/googleapis/java-storage/issues/2518)) ([d1f6bcc](https://github.com/googleapis/java-storage/commit/d1f6bccc2c9b5da5c4ff196711c5cd410649d60f)) +* Update readme to include gradle instructions for storage control ([#2503](https://github.com/googleapis/java-storage/issues/2503)) ([50ac93b](https://github.com/googleapis/java-storage/commit/50ac93b6b61806911737e389253739436dfb515c)) +* Update TransportCompatibility annotation for Storage#blobWriteSession ([#2520](https://github.com/googleapis/java-storage/issues/2520)) ([b7d673c](https://github.com/googleapis/java-storage/commit/b7d673cbac52407d470eace9d08e1cea2584da30)) + +## [2.37.0](https://github.com/googleapis/java-storage/compare/v2.36.1...v2.37.0) (2024-04-19) + + +### Features + +* Adds a ZeroCopy response marshaller for grpc ReadObject handling ([#2489](https://github.com/googleapis/java-storage/issues/2489)) ([8c7404d](https://github.com/googleapis/java-storage/commit/8c7404dea8ef16a7b61accad4fe614e033a5ad2c)) +* Port BufferToDiskThenUpload to work with HttpStorageOptions ([#2473](https://github.com/googleapis/java-storage/issues/2473)) ([d84e255](https://github.com/googleapis/java-storage/commit/d84e25559afecc27026c7f4fe7aeaf0d0ce705b4)) +* Port DefaultBlobWriteSessionConfig to work with HttpStorageOptions ([#2472](https://github.com/googleapis/java-storage/issues/2472)) ([e5772a4](https://github.com/googleapis/java-storage/commit/e5772a4f4ec5d747fcd403477a35ceeb94492280)) +* Port ParallelCompositeUploadBlobWriteSessionConfig to work with HttpStorageOptions ([#2474](https://github.com/googleapis/java-storage/issues/2474)) ([3bf6026](https://github.com/googleapis/java-storage/commit/3bf60264f47aad8101bb4b4cff9cc0449cf1c4f3)) +* Transfer Manager ParallelCompositeUploads ([#2494](https://github.com/googleapis/java-storage/issues/2494)) ([8b54549](https://github.com/googleapis/java-storage/commit/8b5454910c74563c9598f9d5cc23fa509ec749d9)) + + +### Bug Fixes + +* Ensure all BlobWriteSession types conform to the semantics specified in BlobWriteSession ([#2482](https://github.com/googleapis/java-storage/issues/2482)) ([d47afcf](https://github.com/googleapis/java-storage/commit/d47afcfe2bcb2e7e266a5197456b6f9661e91004)) +* Fix BidiBlobWriteSessionConfigs to respect preconditions ([#2481](https://github.com/googleapis/java-storage/issues/2481)) ([955d78a](https://github.com/googleapis/java-storage/commit/955d78af0fecd4b65506df4bfd44fc683e23c2ec)) +* Update ApiaryUnbufferedWritableByteChannel to be graceful of non-quantum aligned write calls ([#2493](https://github.com/googleapis/java-storage/issues/2493)) ([f548335](https://github.com/googleapis/java-storage/commit/f5483356a3ba6327a712ef2f5534b61a649174df)) +* Update BidiBlobWriteSessionConfig to respect a provided bufferSize ([#2471](https://github.com/googleapis/java-storage/issues/2471)) ([e1fb857](https://github.com/googleapis/java-storage/commit/e1fb8577e6b7a36981fb78937fe8c5dd606a4c2a)) +* Update grpc handling of IAM Policy etag to account for base64 encoding ([#2499](https://github.com/googleapis/java-storage/issues/2499)) ([032f2f2](https://github.com/googleapis/java-storage/commit/032f2f2693c6c75cc4ae0339be805c0bb94fa064)) +* Update Grpc Retry Conformance after new additions to testbench ([#2309](https://github.com/googleapis/java-storage/issues/2309)) ([09043c5](https://github.com/googleapis/java-storage/commit/09043c5587ae9e6adeb425f5c2f7c4f65ec5e20e)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240319-2.0.0 ([#2460](https://github.com/googleapis/java-storage/issues/2460)) ([9c2ee90](https://github.com/googleapis/java-storage/commit/9c2ee909d02fef5afc201589f5ced4062e2f14e8)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#2467](https://github.com/googleapis/java-storage/issues/2467)) ([c12f329](https://github.com/googleapis/java-storage/commit/c12f3290a029c25cdac458cda44835c0a43c33e7)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.29.0 ([#2502](https://github.com/googleapis/java-storage/issues/2502)) ([7ed8446](https://github.com/googleapis/java-storage/commit/7ed8446dcbff165408331df97e2d15a1cd70b7be)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.43.0 ([#2459](https://github.com/googleapis/java-storage/issues/2459)) ([2dc4748](https://github.com/googleapis/java-storage/commit/2dc4748353be4fe06ec7df2e413ed5752d350f8b)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.44.0 ([#2497](https://github.com/googleapis/java-storage/issues/2497)) ([9b0253c](https://github.com/googleapis/java-storage/commit/9b0253ca1a03d6a90a1008794b00c6825d26ff0c)) + + +### Documentation + +* Add summary and reference docs for Storage Control API to readme ([#2485](https://github.com/googleapis/java-storage/issues/2485)) ([70fd088](https://github.com/googleapis/java-storage/commit/70fd088975280ed9a650ca09825ccb28297d89f7)) +* Create Samples for transfer manager ([#2492](https://github.com/googleapis/java-storage/issues/2492)) ([e2030b2](https://github.com/googleapis/java-storage/commit/e2030b281d45b69840962e0a80c4b6b85e300b20)) + +## [2.36.1](https://github.com/googleapis/java-storage/compare/v2.36.0...v2.36.1) (2024-03-20) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240311-2.0.0 ([#2446](https://github.com/googleapis/java-storage/issues/2446)) ([27b4780](https://github.com/googleapis/java-storage/commit/27b4780ff64ce068555887c1cc4f88489dde0a83)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#2450](https://github.com/googleapis/java-storage/issues/2450)) ([bf35a9a](https://github.com/googleapis/java-storage/commit/bf35a9a041f012b56e4a858d48b018f1270f319d)) + +## [2.36.0](https://github.com/googleapis/java-storage/compare/v2.35.0...v2.36.0) (2024-03-15) + + +### Features + +* Add Custom Part Metadata Decorator to ParallelCompositeUploadConfig ([#2434](https://github.com/googleapis/java-storage/issues/2434)) ([43b8006](https://github.com/googleapis/java-storage/commit/43b800645ba3622e5de635825e1d082c6d26c2eb)) +* Add hierarchical namespace and folders features ([#2445](https://github.com/googleapis/java-storage/issues/2445)) ([8074fff](https://github.com/googleapis/java-storage/commit/8074fffed5208a8578e5afe694fdd3d8df627b8c)) +* Add soft delete feature ([#2403](https://github.com/googleapis/java-storage/issues/2403)) ([989f36f](https://github.com/googleapis/java-storage/commit/989f36fbb206832a6a3584c77546d3d560ac0df8)) + + +### Bug Fixes + +* Fix name digest for noprefix ([#2448](https://github.com/googleapis/java-storage/issues/2448)) ([12c9db8](https://github.com/googleapis/java-storage/commit/12c9db8935f25a5f9e4633af85ea96acaa914d23)) +* Missing serialVersionUID of serializable classes ([#2344](https://github.com/googleapis/java-storage/issues/2344)) ([736865b](https://github.com/googleapis/java-storage/commit/736865b4b97aa3940e1eab6a582d0ef38db31bba)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240307-2.0.0 ([#2442](https://github.com/googleapis/java-storage/issues/2442)) ([1352203](https://github.com/googleapis/java-storage/commit/1352203859c3798423ef78823ed10577b93eebef)) +* Update dependency net.jqwik:jqwik to v1.8.4 ([#2447](https://github.com/googleapis/java-storage/issues/2447)) ([110b80c](https://github.com/googleapis/java-storage/commit/110b80cdde24da4868e46c3909020db7b02d5491)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.42.0 ([#2441](https://github.com/googleapis/java-storage/issues/2441)) ([80745d4](https://github.com/googleapis/java-storage/commit/80745d415810353b3466a3eda12f6ca50a2c71b0)) + +## [2.35.0](https://github.com/googleapis/java-storage/compare/v2.34.0...v2.35.0) (2024-03-04) + + +### Features + +* Add Bidi write feature ([#2343](https://github.com/googleapis/java-storage/issues/2343)) ([47fde85](https://github.com/googleapis/java-storage/commit/47fde853c17d16689a732d5d8eadc70a45efea49)) +* Add soft_delete_time and hard_delete_time object metadata fields ([#2415](https://github.com/googleapis/java-storage/issues/2415)) ([0b67e2d](https://github.com/googleapis/java-storage/commit/0b67e2d2a176e03bcac876bc0b081a69e981ca82)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.27.0 ([#2432](https://github.com/googleapis/java-storage/issues/2432)) ([334119c](https://github.com/googleapis/java-storage/commit/334119ca7e63858b9d1534e0b0b72214de0ad0d1)) +* Update dependency net.jqwik:jqwik to v1.8.3 ([#2425](https://github.com/googleapis/java-storage/issues/2425)) ([17f366c](https://github.com/googleapis/java-storage/commit/17f366c05c2b299ee17e3a936aa470c96c09e5a7)) +* Update dependency net.jqwik:jqwik to v1.8.3 ([#2428](https://github.com/googleapis/java-storage/issues/2428)) ([ba485b3](https://github.com/googleapis/java-storage/commit/ba485b3347f84f35465dcbe63443fdff9c43c7cf)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.1 ([#2421](https://github.com/googleapis/java-storage/issues/2421)) ([fff0295](https://github.com/googleapis/java-storage/commit/fff0295f2c397f60eae1dc9215b2ba3cbba42f33)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.1 ([#2427](https://github.com/googleapis/java-storage/issues/2427)) ([d031c46](https://github.com/googleapis/java-storage/commit/d031c465064c86844293b0f0395bf8734bbcd91e)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.41.0 ([#2419](https://github.com/googleapis/java-storage/issues/2419)) ([4b05639](https://github.com/googleapis/java-storage/commit/4b05639756eb19ca197c882581cd6ad1a1729683)) + + +### Documentation + +* Add samples for object retention ([#2417](https://github.com/googleapis/java-storage/issues/2417)) ([45837d3](https://github.com/googleapis/java-storage/commit/45837d33a88deb017a97e403b1437ab15c2c8c67)) + +## [2.34.0](https://github.com/googleapis/java-storage/compare/v2.33.0...v2.34.0) (2024-02-14) + + +### Features + +* Enable automatic retries for create RenameFolder LRO in Storage Control API ([#2410](https://github.com/googleapis/java-storage/issues/2410)) ([b0450b8](https://github.com/googleapis/java-storage/commit/b0450b8eca26cd018f81a7b7236a1901d8b35b9b)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240205-2.0.0 ([#2400](https://github.com/googleapis/java-storage/issues/2400)) ([70d8c8b](https://github.com/googleapis/java-storage/commit/70d8c8b83b58c47ebb43c7ec15734c1237dc41ac)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240209-2.0.0 ([#2407](https://github.com/googleapis/java-storage/issues/2407)) ([6a25dbb](https://github.com/googleapis/java-storage/commit/6a25dbbd8d8e817fb13ac2b9018a07343733af94)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.25.0 ([#2411](https://github.com/googleapis/java-storage/issues/2411)) ([cba3415](https://github.com/googleapis/java-storage/commit/cba341551cb449bf74654ee33693d55c5c2aed4e)) + +## [2.33.0](https://github.com/googleapis/java-storage/compare/v2.32.1...v2.33.0) (2024-02-07) + + +### Features + +* Adds TPC support ([#2362](https://github.com/googleapis/java-storage/issues/2362)) ([8b636db](https://github.com/googleapis/java-storage/commit/8b636dbe8a16b9b81afbb51b8d0032bcbdbef635)) + + +### Bug Fixes + +* Temporarily restoring storage_grpc_service_config.json to fix broken test ([#2365](https://github.com/googleapis/java-storage/issues/2365)) ([50b6610](https://github.com/googleapis/java-storage/commit/50b6610de94d44815d80381649e7d3a86c38f894)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240202-2.0.0 ([#2393](https://github.com/googleapis/java-storage/issues/2393)) ([3391e1d](https://github.com/googleapis/java-storage/commit/3391e1d3d925604fe57a69c40c823e863ce7593e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.24.0 ([#2389](https://github.com/googleapis/java-storage/issues/2389)) ([9e4e00d](https://github.com/googleapis/java-storage/commit/9e4e00db40e8235b513f7f798674aba31631ddf4)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.0 ([#2390](https://github.com/googleapis/java-storage/issues/2390)) ([59ac502](https://github.com/googleapis/java-storage/commit/59ac502ee4c063ff46aa9dd0409df2e6fda7bc99)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.2 ([#2394](https://github.com/googleapis/java-storage/issues/2394)) ([8dbd4bf](https://github.com/googleapis/java-storage/commit/8dbd4bfb4f76634f97b2e9cd3487128f4c98f58a)) +* Update junit-platform.version to v5.10.2 ([#2395](https://github.com/googleapis/java-storage/issues/2395)) ([0c5aef3](https://github.com/googleapis/java-storage/commit/0c5aef34682ee56786f2006156f4cd645c22d8a8)) + +## [2.32.1](https://github.com/googleapis/java-storage/compare/v2.32.0...v2.32.1) (2024-01-24) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.23.0 ([#2378](https://github.com/googleapis/java-storage/issues/2378)) ([798aadf](https://github.com/googleapis/java-storage/commit/798aadfd585cc584c57cf702f4c7ba341a0b6718)) + +## [2.32.0](https://github.com/googleapis/java-storage/compare/v2.31.0...v2.32.0) (2024-01-22) + + +### Features + +* Add ability to create a PCU Prefix at the object level ([#2345](https://github.com/googleapis/java-storage/issues/2345)) ([d39a4e4](https://github.com/googleapis/java-storage/commit/d39a4e46eb429b77ee75d822521ed31567ed5ec4)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.22.0 ([#2363](https://github.com/googleapis/java-storage/issues/2363)) ([e2e2d7e](https://github.com/googleapis/java-storage/commit/e2e2d7ef5e2bc353357053df7bc26adeb0fe074a)) + +## [2.31.0](https://github.com/googleapis/java-storage/compare/v2.30.1...v2.31.0) (2024-01-09) + + +### Features + +* Next release from main branch is 2.31.0 ([#2346](https://github.com/googleapis/java-storage/issues/2346)) ([2855f11](https://github.com/googleapis/java-storage/commit/2855f11e21b6aca1ea15fad45130ea689fbf4e23)) + + +### Bug Fixes + +* Add an exception to zero byte uploads on CreateFrom ([#2342](https://github.com/googleapis/java-storage/issues/2342)) ([2b5b27e](https://github.com/googleapis/java-storage/commit/2b5b27eac3279db815b36b252830d0905ade0665)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.31.0 ([#2349](https://github.com/googleapis/java-storage/issues/2349)) ([578d5d0](https://github.com/googleapis/java-storage/commit/578d5d01f4469409b6d8731bbdc965ab224956ad)) + + +### Dependencies + +* Update actions/setup-java action to v4 ([#2321](https://github.com/googleapis/java-storage/issues/2321)) ([d7e5b20](https://github.com/googleapis/java-storage/commit/d7e5b20b2e675025e73ee1586014f52f76ed36e6)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20231202-2.0.0 ([#2327](https://github.com/googleapis/java-storage/issues/2327)) ([8983c39](https://github.com/googleapis/java-storage/commit/8983c393127a36cce9a359a912b24710b347e5e9)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20240105-2.0.0 ([#2351](https://github.com/googleapis/java-storage/issues/2351)) ([e9608c6](https://github.com/googleapis/java-storage/commit/e9608c6e8d59deae714b503792f995f157fd2aaf)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.21.0 ([#2348](https://github.com/googleapis/java-storage/issues/2348)) ([321adb6](https://github.com/googleapis/java-storage/commit/321adb65ae06578e2b2654f1f36ad73b31d20fbd)) + +## [2.30.1](https://github.com/googleapis/java-storage/compare/v2.30.0...v2.30.1) (2023-12-06) + + +### Bug Fixes + +* Revert ReadAllBytes fix ([#2331](https://github.com/googleapis/java-storage/issues/2331)) ([4b8458f](https://github.com/googleapis/java-storage/commit/4b8458f01f4375b18f1c447761e8419d43d0cbc9)) + +## [2.30.0](https://github.com/googleapis/java-storage/compare/v2.29.1...v2.30.0) (2023-12-01) + + +### Features + +* Add object retention feature ([#2277](https://github.com/googleapis/java-storage/issues/2277)) ([3deb29b](https://github.com/googleapis/java-storage/commit/3deb29b2b78310a46b268e20d88f383b9a818e0b)) + + +### Bug Fixes + +* Fix JSON read handling when socket broken resulting in partial bytes copied ([#2303](https://github.com/googleapis/java-storage/issues/2303)) ([d4bfcf0](https://github.com/googleapis/java-storage/commit/d4bfcf0d4d5155a78c9230bc85be739f99391b1a)), closes [#2301](https://github.com/googleapis/java-storage/issues/2301) +* Fix Storage#readAllBytes to allow reading compressed bytes ([#2304](https://github.com/googleapis/java-storage/issues/2304)) ([68b96a9](https://github.com/googleapis/java-storage/commit/68b96a97352925a6142e677949395044ddc6c605)) +* Update implementation of readAllBytes and downloadTo to be more robust to retryable errors ([#2305](https://github.com/googleapis/java-storage/issues/2305)) ([21821da](https://github.com/googleapis/java-storage/commit/21821da1483570b0b913f6bfcbc7c5637a637bca)) + + +### Dependencies + +* Update actions/github-script action to v7 ([#2300](https://github.com/googleapis/java-storage/issues/2300)) ([062df62](https://github.com/googleapis/java-storage/commit/062df62948c5bd4fc11090f42495043fc697c38a)) +* Update actions/github-script action to v7 ([#2302](https://github.com/googleapis/java-storage/issues/2302)) ([c46c24f](https://github.com/googleapis/java-storage/commit/c46c24f64ab0164cffc30b68ed070d854bb4b156)) +* Update actions/github-script action to v7 ([#2318](https://github.com/googleapis/java-storage/issues/2318)) ([44c1b00](https://github.com/googleapis/java-storage/commit/44c1b0022fe892177ae4ae45acaf52e8e9971ec2)) +* Update actions/setup-java action to v4 ([#2315](https://github.com/googleapis/java-storage/issues/2315)) ([5791470](https://github.com/googleapis/java-storage/commit/57914706d2ef8de9895c0c80e6b89755b378c89f)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20231117-2.0.0 ([#2320](https://github.com/googleapis/java-storage/issues/2320)) ([b5a34d8](https://github.com/googleapis/java-storage/commit/b5a34d8ce920b0d0a766e4dc9d488c5e4e5e21ba)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.20.0 ([#2316](https://github.com/googleapis/java-storage/issues/2316)) ([49cdc20](https://github.com/googleapis/java-storage/commit/49cdc2085adbba5024b27fda5b1add328de82684)) +* Update dependency net.jqwik:jqwik to v1.8.2 ([#2312](https://github.com/googleapis/java-storage/issues/2312)) ([70d1d1c](https://github.com/googleapis/java-storage/commit/70d1d1c91c28e9a1fa5f9f938301ff5346a56a7b)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.1 ([#2292](https://github.com/googleapis/java-storage/issues/2292)) ([7471a7c](https://github.com/googleapis/java-storage/commit/7471a7ced7a3d53c67c08aeb9a56bb7b2b23b2d0)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.40.0 ([#2306](https://github.com/googleapis/java-storage/issues/2306)) ([29f4ea6](https://github.com/googleapis/java-storage/commit/29f4ea60891914eb87db9227a76b1c51b818a492)) +* Update junit-platform.version to v5.10.1 ([#2293](https://github.com/googleapis/java-storage/issues/2293)) ([6e36cc4](https://github.com/googleapis/java-storage/commit/6e36cc4feba180247a87b095e4f9c145f724d9e8)) + + +### Documentation + +* Add @TransportCompatibility to StorageBatch ([#2276](https://github.com/googleapis/java-storage/issues/2276)) ([d3b7bb3](https://github.com/googleapis/java-storage/commit/d3b7bb38537117c508527fd843bc9450ebbfe1ee)) + +## [2.29.1](https://github.com/googleapis/java-storage/compare/v2.29.0...v2.29.1) (2023-11-02) + + +### Bug Fixes + +* Improve 503 handling for json resumable uploads ([#2289](https://github.com/googleapis/java-storage/issues/2289)) ([9b4bb82](https://github.com/googleapis/java-storage/commit/9b4bb8221294bcd94037b69281a37f33b364b174)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20231028-2.0.0 ([#2281](https://github.com/googleapis/java-storage/issues/2281)) ([94b8dd6](https://github.com/googleapis/java-storage/commit/94b8dd601d33c25edcff05885b0fadf0decbb86e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.19.0 ([#2288](https://github.com/googleapis/java-storage/issues/2288)) ([cc65fd0](https://github.com/googleapis/java-storage/commit/cc65fd0897a064d7efc7b50f33acecee9ffa2c4b)) + +## [2.29.0](https://github.com/googleapis/java-storage/compare/v2.28.0...v2.29.0) (2023-10-23) + + +### Features + +* Add Autoclass v2.1 support ([#2258](https://github.com/googleapis/java-storage/issues/2258)) ([a134994](https://github.com/googleapis/java-storage/commit/a13499453932189d3b4efdbb8d774e6d15a96cc1)) + + +### Bug Fixes + +* Remove PCU internals which should be hidden ([#2263](https://github.com/googleapis/java-storage/issues/2263)) ([eff00fb](https://github.com/googleapis/java-storage/commit/eff00fbb9b36f7225291cf0be0b5d7aab3ea2114)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20231012-2.0.0 ([#2257](https://github.com/googleapis/java-storage/issues/2257)) ([e75d8bf](https://github.com/googleapis/java-storage/commit/e75d8bf85db50427a4b89089cca9d88fe4d0b6d9)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.18.0 ([#2265](https://github.com/googleapis/java-storage/issues/2265)) ([40bf665](https://github.com/googleapis/java-storage/commit/40bf6654301175ee5113af9be29be2fb80224af1)) +* Update dependency net.jqwik:jqwik to v1.8.1 ([#2267](https://github.com/googleapis/java-storage/issues/2267)) ([c518c75](https://github.com/googleapis/java-storage/commit/c518c7512e71537cd9468bb0be6ceaa7515cf651)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.28 ([#2266](https://github.com/googleapis/java-storage/issues/2266)) ([b0b1b57](https://github.com/googleapis/java-storage/commit/b0b1b57162e014994b432e94a532df4f0003c628)) + +## [2.28.0](https://github.com/googleapis/java-storage/compare/v2.27.1...v2.28.0) (2023-10-10) + + +### Features + +* Add @BetaApi BlobWriteSession#parallelCompositeUpload ([#2239](https://github.com/googleapis/java-storage/issues/2239)) ([f8f4e22](https://github.com/googleapis/java-storage/commit/f8f4e221ae81cf69387827014c9f98a466bb7f19)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.17.0 ([#2250](https://github.com/googleapis/java-storage/issues/2250)) ([df687f2](https://github.com/googleapis/java-storage/commit/df687f2604be214f05d2bebc94e4d81421003577)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.39.0 ([#2249](https://github.com/googleapis/java-storage/issues/2249)) ([fe85df9](https://github.com/googleapis/java-storage/commit/fe85df9bf46d76f067819ce3ed94d874818a739e)) + + +### Documentation + +* Reorder columns in table of BlobWriteSessionConfigs.java ([#2242](https://github.com/googleapis/java-storage/issues/2242)) ([98e694f](https://github.com/googleapis/java-storage/commit/98e694f3182b194f5e6fbde4fb0a924f17a06c40)) + +## [2.27.1](https://github.com/googleapis/java-storage/compare/v2.27.0...v2.27.1) (2023-09-25) + + +### Bug Fixes + +* Consider Storage#delete(BlobId) idempotent when id has generation ([#2222](https://github.com/googleapis/java-storage/issues/2222)) ([453dd63](https://github.com/googleapis/java-storage/commit/453dd63e63d899197b25b58452c08fa0b41f75b0)) +* Update RecoveryFileManager to allow distinct files for multiple invocations of equivalent info ([#2207](https://github.com/googleapis/java-storage/issues/2207)) ([44e9dd5](https://github.com/googleapis/java-storage/commit/44e9dd558bb979f0c7607ad24f07e6d03a641c66)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230914-2.0.0 ([#2213](https://github.com/googleapis/java-storage/issues/2213)) ([0af39d6](https://github.com/googleapis/java-storage/commit/0af39d6be333dd6993785491f14e1bd8942573ea)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230922-2.0.0 ([#2223](https://github.com/googleapis/java-storage/issues/2223)) ([844fe80](https://github.com/googleapis/java-storage/commit/844fe803af925c7501910a5ea89f9ddadc18b5b9)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.0 ([#2219](https://github.com/googleapis/java-storage/issues/2219)) ([7401f21](https://github.com/googleapis/java-storage/commit/7401f2173f60f34506d5d65396b3baadd52de5cc)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.1 ([#2225](https://github.com/googleapis/java-storage/issues/2225)) ([f092c4e](https://github.com/googleapis/java-storage/commit/f092c4ebbbbf378511c6e1d663c4656dc03a0724)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.27 ([#2210](https://github.com/googleapis/java-storage/issues/2210)) ([3c77530](https://github.com/googleapis/java-storage/commit/3c77530392dac2eca04585506e839d3215d44f0c)) +* Update gapic-generator-java to 2.26.0 ([#2224](https://github.com/googleapis/java-storage/issues/2224)) ([a105736](https://github.com/googleapis/java-storage/commit/a105736535429418740e8e8d72e92e5c705575f1)) + +## [2.27.0](https://github.com/googleapis/java-storage/compare/v2.26.1...v2.27.0) (2023-09-12) + + +### Features + +* Add new JournalingBlobWriteSessionConfig usable with gRPC transport ([#2194](https://github.com/googleapis/java-storage/issues/2194)) ([8880d94](https://github.com/googleapis/java-storage/commit/8880d94c3d1a737dd4492cf66a16ba5e08633a70)) +* Follow-up CLI Improvements ([#2184](https://github.com/googleapis/java-storage/issues/2184)) ([d985976](https://github.com/googleapis/java-storage/commit/d9859768081ea6f872097851d3e318b5bad384d9)) +* Initial CLI for SSB integration and Workload 1 ([#2166](https://github.com/googleapis/java-storage/issues/2166)) ([a349735](https://github.com/googleapis/java-storage/commit/a349735e7fe108e623a330afec0c8cd608ebeef9)) + + +### Bug Fixes + +* A resumable session without a Range header should be interpreted as 0 length ([#2182](https://github.com/googleapis/java-storage/issues/2182)) ([5302201](https://github.com/googleapis/java-storage/commit/53022011d83e6a8515a5ba008fc45fc2dae39cea)) +* Update User-Agent handling for resumable uploads ([#2168](https://github.com/googleapis/java-storage/issues/2168)) ([665b714](https://github.com/googleapis/java-storage/commit/665b714f421d3c13b557d0ff71460c328c010856)) +* Update version resolution logic to be more resilient ([#2169](https://github.com/googleapis/java-storage/issues/2169)) ([c89d275](https://github.com/googleapis/java-storage/commit/c89d27508039a014ea5a6dd8d4889f63d07db73f)) + + +### Dependencies + +* Update actions/checkout action to v4 ([#2188](https://github.com/googleapis/java-storage/issues/2188)) ([c10267e](https://github.com/googleapis/java-storage/commit/c10267e176bda21cd5755dfb0e96d0504fbc1d54)) +* Update actions/checkout action to v4 ([#2189](https://github.com/googleapis/java-storage/issues/2189)) ([5c048c4](https://github.com/googleapis/java-storage/commit/5c048c499eef224dade8f4409dfae732cb5a7017)) +* Update actions/checkout action to v4 ([#2190](https://github.com/googleapis/java-storage/issues/2190)) ([45e66e8](https://github.com/googleapis/java-storage/commit/45e66e89373ef016eff9b7deb30dbdfa818770d2)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230710-2.0.0 ([#2162](https://github.com/googleapis/java-storage/issues/2162)) ([73a9f75](https://github.com/googleapis/java-storage/commit/73a9f75d000d2a59cd680fd383a9f9e1b91570cf)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230907-2.0.0 ([#2200](https://github.com/googleapis/java-storage/issues/2200)) ([1fa49db](https://github.com/googleapis/java-storage/commit/1fa49db2810f6ffbd46755b4eb1f5efdcf980edb)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.15.0 ([#2197](https://github.com/googleapis/java-storage/issues/2197)) ([26552f4](https://github.com/googleapis/java-storage/commit/26552f4b78f77d90df4e3dfb829c3f9c092fc817)) +* Update dependency info.picocli:picocli to v4.7.4 ([#2177](https://github.com/googleapis/java-storage/issues/2177)) ([0c90814](https://github.com/googleapis/java-storage/commit/0c908147375fe58ac280179f5fba10bdd3886003)) +* Update dependency info.picocli:picocli to v4.7.5 ([#2183](https://github.com/googleapis/java-storage/issues/2183)) ([f244861](https://github.com/googleapis/java-storage/commit/f2448615ded6d9f43344bf1b9cda7ae3b191223b)) +* Update dependency net.jqwik:jqwik to v1.8.0 ([#2187](https://github.com/googleapis/java-storage/issues/2187)) ([aedbd6a](https://github.com/googleapis/java-storage/commit/aedbd6a811c4fcfedff68d7d46bb68e93bf9eeee)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.26 ([#2196](https://github.com/googleapis/java-storage/issues/2196)) ([4f8bb65](https://github.com/googleapis/java-storage/commit/4f8bb658e9ff3cba5e745acae13ec4094a1a48d5)) + +## [2.26.1](https://github.com/googleapis/java-storage/compare/v2.26.0...v2.26.1) (2023-08-14) + + +### Bug Fixes + +* Make use of ImmutableMap.Builder#buildOrThrow graceful ([#2159](https://github.com/googleapis/java-storage/issues/2159)) ([e9746f8](https://github.com/googleapis/java-storage/commit/e9746f856e9204c1c0ec62f19e6f71ff8a0b9750)) +* Update gRPC writeAndClose to only set finish_write on the last message ([#2163](https://github.com/googleapis/java-storage/issues/2163)) ([95df758](https://github.com/googleapis/java-storage/commit/95df758d6753005226556177e68a3e9c630c789b)) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.24 ([#2158](https://github.com/googleapis/java-storage/issues/2158)) ([4f5682a](https://github.com/googleapis/java-storage/commit/4f5682a4f6d6d5372a2d382ae3e47dace490ca0d)) + +## [2.26.0](https://github.com/googleapis/java-storage/compare/v2.25.0...v2.26.0) (2023-08-03) + + +### Features + +* Implement BufferToDiskThenUpload BlobWriteSessionConfig ([#2139](https://github.com/googleapis/java-storage/issues/2139)) ([4dad2d5](https://github.com/googleapis/java-storage/commit/4dad2d5c3a81eda7190ad4f95316471e7fa30f66)) +* Introduce new BlobWriteSession ([#2123](https://github.com/googleapis/java-storage/issues/2123)) ([e0191b5](https://github.com/googleapis/java-storage/commit/e0191b518e50a49fae0691894b50f0c5f33fc6af)) + + +### Bug Fixes + +* **grpc:** Return error if credentials are detected to be null ([#2142](https://github.com/googleapis/java-storage/issues/2142)) ([b61a976](https://github.com/googleapis/java-storage/commit/b61a9764a9d953d2b214edb2b543b8df42fbfa06)) +* Possible NPE when HttpStorageOptions deserialized ([#2153](https://github.com/googleapis/java-storage/issues/2153)) ([68ad8e7](https://github.com/googleapis/java-storage/commit/68ad8e7357097e3dd161c2ab5f7a42a060a3702c)) +* Update grpc default metadata projection to include acl same as json ([#2150](https://github.com/googleapis/java-storage/issues/2150)) ([330e795](https://github.com/googleapis/java-storage/commit/330e795040592e5df22d44fb5216ad7cf2448e81)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.14.0 ([#2151](https://github.com/googleapis/java-storage/issues/2151)) ([eba8b6a](https://github.com/googleapis/java-storage/commit/eba8b6a235919a27d1f6dadf770140c7d143aa1a)) + +## [2.25.0](https://github.com/googleapis/java-storage/compare/v2.24.0...v2.25.0) (2023-07-24) + + +### Features + +* BlobWriteChannelV2 - same throughput less GC ([#2110](https://github.com/googleapis/java-storage/issues/2110)) ([1b52a10](https://github.com/googleapis/java-storage/commit/1b52a1053130620011515060787bada10c324c0b)) +* Update Storage.createFrom(BlobInfo, Path) to have 150% higher throughput ([#2059](https://github.com/googleapis/java-storage/issues/2059)) ([4c2f44e](https://github.com/googleapis/java-storage/commit/4c2f44e28a1ff19ffb2a02e3cefc062a1dd98fdc)) + + +### Bug Fixes + +* Update BlobWriteChannelV2 to properly carry forward offset after incremental flush ([#2125](https://github.com/googleapis/java-storage/issues/2125)) ([c099a2f](https://github.com/googleapis/java-storage/commit/c099a2f4f8ea9afa6953270876653916b021fd9f)) +* Update GrpcStorageImpl.createFrom(BlobInfo, Path) to use RewindableContent ([#2112](https://github.com/googleapis/java-storage/issues/2112)) ([c805051](https://github.com/googleapis/java-storage/commit/c80505129baa831e492a5514e937875407211595)) + + +### Documentation + +* Fix broken link for TESTING.md ([#2126](https://github.com/googleapis/java-storage/issues/2126)) ([fe9662d](https://github.com/googleapis/java-storage/commit/fe9662d7e552aabfc9012e582ae634f46af1f255)) + + +### Dependencies + +* **test:** Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.37.0 ([#2130](https://github.com/googleapis/java-storage/issues/2130)) ([9e8b6d3](https://github.com/googleapis/java-storage/commit/9e8b6d324bfef84e2c2ee93c424b2e7fcb601945)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.1 ([#2129](https://github.com/googleapis/java-storage/issues/2129)) ([a7e854e](https://github.com/googleapis/java-storage/commit/a7e854ecb4d7fa9508a8d0844fc08d9eeab6f653)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.0 ([#2132](https://github.com/googleapis/java-storage/issues/2132)) ([f5477e4](https://github.com/googleapis/java-storage/commit/f5477e4eb168dfd3151d0e2c0462e5b35eb3de37)) +* Update junit-platform.version to v5.10.0 ([#2133](https://github.com/googleapis/java-storage/issues/2133)) ([3d22686](https://github.com/googleapis/java-storage/commit/3d2268642dcecea8ca55e78479ad4e4390075814)) + +## [2.24.0](https://github.com/googleapis/java-storage/compare/v2.23.0...v2.24.0) (2023-07-11) + + +### Features + +* Add validation around bytes received vs bytes expected ([#2078](https://github.com/googleapis/java-storage/issues/2078)) ([45d142a](https://github.com/googleapis/java-storage/commit/45d142a7ba45a80fca05fc9d8c8d7ce376d3f39c)) +* Initial Preview of Transfer Manager [#2105](https://github.com/googleapis/java-storage/issues/2105) ([8b17574](https://github.com/googleapis/java-storage/commit/8b175745d306e3da58ec3c9307442f14a34fd4ef)) + + +### Bug Fixes + +* Update grpc upload logic to follow hashing behavior of json ([#2107](https://github.com/googleapis/java-storage/issues/2107)) ([ed05232](https://github.com/googleapis/java-storage/commit/ed05232889c0442f43eacde5dbc80f44f09ebe83)) +* Update UploadCallable to use createFrom to avoid NPE trying to resolve resulting object ([#2086](https://github.com/googleapis/java-storage/issues/2086)) ([6769a2b](https://github.com/googleapis/java-storage/commit/6769a2b3b06fb297ebce819fbdfba5ee377be147)) + + +### Documentation + +* Javadocs for remainder of Transfer Manager ([#2097](https://github.com/googleapis/java-storage/issues/2097)) ([0362e80](https://github.com/googleapis/java-storage/commit/0362e80386d8055811082204536f596ba948a963)) +* Javadocs for TransferManager interface and ParallelUploadConfig ([#2094](https://github.com/googleapis/java-storage/issues/2094)) ([63d8ed3](https://github.com/googleapis/java-storage/commit/63d8ed37a80304e57b3f48aae614af89a9543ffa)) +* **samples:** Add storage_grpc_quickstart samples ([#2041](https://github.com/googleapis/java-storage/issues/2041)) ([5f916fb](https://github.com/googleapis/java-storage/commit/5f916fbba789edd33c2b4195db885d2b0191f860)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0 ([#2088](https://github.com/googleapis/java-storage/issues/2088)) ([18adb5a](https://github.com/googleapis/java-storage/commit/18adb5a18fc37eb3bb3553fe20648d68a53f987a)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#2106](https://github.com/googleapis/java-storage/issues/2106)) ([734dee0](https://github.com/googleapis/java-storage/commit/734dee0402fc16a3cc8d842eb135cc5743586e35)) +* Update dependency io.grpc:grpc-bom to v1.56.1 ([#2101](https://github.com/googleapis/java-storage/issues/2101)) ([3102941](https://github.com/googleapis/java-storage/commit/31029414d8e758c01faaab398a701afebf5e934d)) +* Update dependency net.jqwik:jqwik to v1.7.4 ([#2103](https://github.com/googleapis/java-storage/issues/2103)) ([c681ccf](https://github.com/googleapis/java-storage/commit/c681ccfffe6b861c19b92e496363146adc7d862b)) +* Update gcr.io/cloud-devrel-public-resources/storage-testbench docker tag to v0.36.0 ([#2080](https://github.com/googleapis/java-storage/issues/2080)) ([ba5c11a](https://github.com/googleapis/java-storage/commit/ba5c11a1589d1c7ffefd15e921da914289bc219e)) + +## [2.23.0](https://github.com/googleapis/java-storage/compare/v2.22.4...v2.23.0) (2023-06-21) + + +### Features + +* Add new dedup utility method to Option classes ([#2063](https://github.com/googleapis/java-storage/issues/2063)) ([2ad196c](https://github.com/googleapis/java-storage/commit/2ad196c063e67c7efdac344792b67de3479d789d)) + + +### Dependencies + +* Update dependencies io.grpc:* to v1.56.0 ([#2072](https://github.com/googleapis/java-storage/issues/2072)) ([e10bde2](https://github.com/googleapis/java-storage/commit/e10bde26416bcf17401516e43949e12246f4831c)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230617-2.0.0 ([#2077](https://github.com/googleapis/java-storage/issues/2077)) ([9f618cd](https://github.com/googleapis/java-storage/commit/9f618cddbeb471f7bd0f2332c70e501afbaccc36)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.23 ([#2074](https://github.com/googleapis/java-storage/issues/2074)) ([427f330](https://github.com/googleapis/java-storage/commit/427f330793a20b0c3da4cbe5e85984a0df508c79)) + +## [2.22.4](https://github.com/googleapis/java-storage/compare/v2.22.3...v2.22.4) (2023-06-07) + + +### Bug Fixes + +* Include invocation-id for resumable PUTs ([#2047](https://github.com/googleapis/java-storage/issues/2047)) ([a590ae3](https://github.com/googleapis/java-storage/commit/a590ae3856fe8cccde17def5ca6bef8cfeed5073)) + + +### Documentation + +* Add documentation about quantum write alignment, use of ObjectChecksums, Bucket.name format, and Object.delete_time ([#2032](https://github.com/googleapis/java-storage/issues/2032)) ([c39ec13](https://github.com/googleapis/java-storage/commit/c39ec13469342ed947b12dd7ab3d301ca1d4d01b)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.11.0 ([#2050](https://github.com/googleapis/java-storage/issues/2050)) ([88fffbd](https://github.com/googleapis/java-storage/commit/88fffbd56746ff212441fafd1e213177feac0cac)) + +## [2.22.3](https://github.com/googleapis/java-storage/compare/v2.22.2...v2.22.3) (2023-05-30) + + +### Bug Fixes + +* Add retries for auth service errors which are tagged Retryable ([#2026](https://github.com/googleapis/java-storage/issues/2026)) ([3675514](https://github.com/googleapis/java-storage/commit/367551439faab6e5e4ad516356ab146f10c96734)) +* Make GrpcBlobWriteChannel open upon construction ([#2022](https://github.com/googleapis/java-storage/issues/2022)) ([6549736](https://github.com/googleapis/java-storage/commit/65497365072a350be2587995e70fd4c9c75620a1)) +* Plumb GrpcInterceptorProvider to constructed InstantiatingGrpcChannelProvider ([#2031](https://github.com/googleapis/java-storage/issues/2031)) ([bfe0415](https://github.com/googleapis/java-storage/commit/bfe0415882e241c60147018abeacda3daabd6653)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.10.0 ([#2033](https://github.com/googleapis/java-storage/issues/2033)) ([54d6332](https://github.com/googleapis/java-storage/commit/54d633235c2bdb791cce15f119a7fbeb7c9c56f6)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.10.1 ([#2035](https://github.com/googleapis/java-storage/issues/2035)) ([0bdb5ec](https://github.com/googleapis/java-storage/commit/0bdb5ec111e25765550ffb73bba28f7ee580845d)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.22 ([#2016](https://github.com/googleapis/java-storage/issues/2016)) ([b4c4655](https://github.com/googleapis/java-storage/commit/b4c46558e9b443f65de3fb9461e81141a8364bb7)) + +## [2.22.2](https://github.com/googleapis/java-storage/compare/v2.22.1...v2.22.2) (2023-05-09) + + +### Bug Fixes + +* Make Blob and Bucket update diff aware ([#1994](https://github.com/googleapis/java-storage/issues/1994)) ([0adeb14](https://github.com/googleapis/java-storage/commit/0adeb14479fc9aef35e32d286bcd9ae414eda25a)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.9.0 ([#2010](https://github.com/googleapis/java-storage/issues/2010)) ([27b9014](https://github.com/googleapis/java-storage/commit/27b9014a9dd67ee209c2ef59b0347a1a8beab257)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.3 ([#1996](https://github.com/googleapis/java-storage/issues/1996)) ([7839768](https://github.com/googleapis/java-storage/commit/7839768123d7e8a5a28076b6cc32dc0cd983d540)) +* Update junit-platform.version to v5.9.3 ([#1997](https://github.com/googleapis/java-storage/issues/1997)) ([5683340](https://github.com/googleapis/java-storage/commit/56833407f12f7efba423c0b7779a66c6f956777c)) + +## [2.22.1](https://github.com/googleapis/java-storage/compare/v2.22.0...v2.22.1) (2023-04-26) + + +### Bug Fixes + +* Introduce new BlobId#toGsUtilUriWithGeneration ([#1998](https://github.com/googleapis/java-storage/issues/1998)) ([68de5c7](https://github.com/googleapis/java-storage/commit/68de5c7fae6df2f3df5c8c413b206bd7c7e230ea)) +* Update grpc x-goog-user-project handling gracefulness ([#1983](https://github.com/googleapis/java-storage/issues/1983)) ([f274739](https://github.com/googleapis/java-storage/commit/f2747391b6daadd6a12a2dcb54714cce237eb1ac)) + + +### Documentation + +* Update routing annotations for CancelResumableWriteRequest and QueryWriteStatusRequest ([#1992](https://github.com/googleapis/java-storage/issues/1992)) ([e36f8f1](https://github.com/googleapis/java-storage/commit/e36f8f1822d1290b984a8ae57efedd0276ca39f5)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.8.0 ([#1995](https://github.com/googleapis/java-storage/issues/1995)) ([b7fb037](https://github.com/googleapis/java-storage/commit/b7fb0373955cf9d60ce46a3827152041c19e943e)) + +## [2.22.0](https://github.com/googleapis/java-storage/compare/v2.21.0...v2.22.0) (2023-04-12) + + +### Features + +* Add matchGlob parameter to BlobListOption ([#1965](https://github.com/googleapis/java-storage/issues/1965)) ([93be97a](https://github.com/googleapis/java-storage/commit/93be97a9ae061241f510cbeb566f9f839063faa0)) +* Handle generation numbers in BlobId#{to,from}GsUtilUri ([#1929](https://github.com/googleapis/java-storage/issues/1929)) ([0a033b3](https://github.com/googleapis/java-storage/commit/0a033b36a2be7e1d92c8b2f7323f2851005034cf)) +* Implement GrpcStorageImpl#{get,list,create,delete}Notification ([#1958](https://github.com/googleapis/java-storage/issues/1958)) ([830052b](https://github.com/googleapis/java-storage/commit/830052b3f26337f50770436784f0ea9f3366dbca)) +* Make it possible to disable the buffer of ReadChannels returned from Storage.reader ([#1974](https://github.com/googleapis/java-storage/issues/1974)) ([702ab2b](https://github.com/googleapis/java-storage/commit/702ab2bb1ceb9f428296591adc6e09023b4a8484)) + + +### Bug Fixes + +* Update GrpcStorageOptions to set allowNonDefaultServiceAccount = true ([#1977](https://github.com/googleapis/java-storage/issues/1977)) ([f1b9493](https://github.com/googleapis/java-storage/commit/f1b94930509c87ecbdbe6627fec6118305331780)) + + +### Documentation + +* Update spec for WriteObjectRequest.object_size to clarify that it applies to both one-shot and resumable writes ([#1976](https://github.com/googleapis/java-storage/issues/1976)) ([7354e19](https://github.com/googleapis/java-storage/commit/7354e198521384cd2b8583f073217a5a6cb74fe0)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.7.0 ([#1978](https://github.com/googleapis/java-storage/issues/1978)) ([1f7e9c9](https://github.com/googleapis/java-storage/commit/1f7e9c9130a9516acfcdfda4b23e642fbe799146)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.21 ([#1979](https://github.com/googleapis/java-storage/issues/1979)) ([d6accdf](https://github.com/googleapis/java-storage/commit/d6accdf9c81419728c39a61e348a61683dc117ad)) + +## [2.21.0](https://github.com/googleapis/java-storage/compare/v2.20.2...v2.21.0) (2023-03-31) + + +### Features + +* Remove Bucket.retention_period, now that we've migrated to retention_duration ([#1955](https://github.com/googleapis/java-storage/issues/1955)) ([c0ffe7c](https://github.com/googleapis/java-storage/commit/c0ffe7c7cb50181cc375a945b9e79f97da9aa290)) + + +### Bug Fixes + +* Update bucket creation to set project as `bucket.project` ([#1912](https://github.com/googleapis/java-storage/issues/1912)) ([65993c0](https://github.com/googleapis/java-storage/commit/65993c0614987b96962ff28404de2aaea4ee679d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.6.0 ([#1957](https://github.com/googleapis/java-storage/issues/1957)) ([af90f16](https://github.com/googleapis/java-storage/commit/af90f162ac038479a0f09fb45289b1aa258d1aec)) +* Update dependency net.jqwik:jqwik to v1.7.3 ([#1944](https://github.com/googleapis/java-storage/issues/1944)) ([a8b96c9](https://github.com/googleapis/java-storage/commit/a8b96c9a2da7fbdf9de5e15e833058117570c24e)) + + +### Documentation + +* Revert ChecksummedData message definition not to specify ctype=CORD, ([#1960](https://github.com/googleapis/java-storage/issues/1960)) ([3a2a643](https://github.com/googleapis/java-storage/commit/3a2a643ff389741efaad244bbabf494dcd9a799b)) +* Updated ChecksummedData message definition to specify ctype=CORD, and removed incorrect earlier attempt that set that annotation in the ReadObjectResponse message definition ([#1959](https://github.com/googleapis/java-storage/issues/1959)) ([3d97bdd](https://github.com/googleapis/java-storage/commit/3d97bddc668deb2059261260125f94c6e32aac76)) + +## [2.20.2](https://github.com/googleapis/java-storage/compare/v2.20.1...v2.20.2) (2023-03-22) + + +### Bug Fixes + +* Add service_yaml_parameters to `java_gapic_library` targets ([#1933](https://github.com/googleapis/java-storage/issues/1933)) ([ceaeb3f](https://github.com/googleapis/java-storage/commit/ceaeb3fa25cdc10f82c870bf2e9cc27b6a91affd)) + + +### Documentation + +* Add clarification on non-editable metadata ([#1939](https://github.com/googleapis/java-storage/issues/1939)) ([df57705](https://github.com/googleapis/java-storage/commit/df57705bf37e430d66ff6c7be4ee4a6246d8616e)) + + +### Dependencies + +* Update dependency com.google.apis:google-api-services-storage to v1-rev20230301-2.0.0 ([#1935](https://github.com/googleapis/java-storage/issues/1935)) ([dbc7534](https://github.com/googleapis/java-storage/commit/dbc7534b86c0629d31d322356675e18494b7175b)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.5.0 ([#1940](https://github.com/googleapis/java-storage/issues/1940)) ([fc55e41](https://github.com/googleapis/java-storage/commit/fc55e41dc6b5802fca46c03cb8443528670eb47d)) + +## [2.20.1](https://github.com/googleapis/java-storage/compare/v2.20.0...v2.20.1) (2023-03-07) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.4.0 ([#1919](https://github.com/googleapis/java-storage/issues/1919)) ([fb42336](https://github.com/googleapis/java-storage/commit/fb42336145d19b0a0c73eddda4318b3c88015ad6)) + +## [2.20.0](https://github.com/googleapis/java-storage/compare/v2.19.0...v2.20.0) (2023-03-01) + + +### Features + +* Allow specifying a negative offset to ReadChannel ([#1916](https://github.com/googleapis/java-storage/issues/1916)) ([6df5469](https://github.com/googleapis/java-storage/commit/6df546945211a767f73190773ab2754bf7842e75)) + +## [2.19.0](https://github.com/googleapis/java-storage/compare/v2.18.0...v2.19.0) (2023-02-21) + + +### Features + +* Update routing annotation for CreateBucketRequest ([#1892](https://github.com/googleapis/java-storage/issues/1892)) ([581120f](https://github.com/googleapis/java-storage/commit/581120f00b9adc2d8d45ac038772685d7e134e61)) + + +### Bug Fixes + +* Update Default RetryStrategy to retry SSLException caused by SocketException ([#1900](https://github.com/googleapis/java-storage/issues/1900)) ([be2aba0](https://github.com/googleapis/java-storage/commit/be2aba0704bcdd55fdfeb233802dea532e6c7882)) +* Update GrpcStorageImpl handling to be aware of quotaProjectId ([#1877](https://github.com/googleapis/java-storage/issues/1877)) ([ca8510e](https://github.com/googleapis/java-storage/commit/ca8510e3c16a5cc2fac1499b1888af612aa8e5b2)), closes [#1736](https://github.com/googleapis/java-storage/issues/1736) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.3.0 ([#1907](https://github.com/googleapis/java-storage/issues/1907)) ([85e22d7](https://github.com/googleapis/java-storage/commit/85e22d785b3abef152b2b4df4d394a7477555b08)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.20 ([#1904](https://github.com/googleapis/java-storage/issues/1904)) ([95b9aef](https://github.com/googleapis/java-storage/commit/95b9aef5fd390a1497bfe6baed3a7200e44f13cf)) + +## [2.18.0](https://github.com/googleapis/java-storage/compare/v2.17.2...v2.18.0) (2023-02-06) + + +### Features + +* Add Storage.BlobWriteOption.{meta,}generation{Not,}Match(long) methods to allow literal value construction ([#1875](https://github.com/googleapis/java-storage/issues/1875)) ([a9fab09](https://github.com/googleapis/java-storage/commit/a9fab098e23c93887f0aae8a5de6e3477d5f0f0c)) +* Setting up 2.15.x ([#1880](https://github.com/googleapis/java-storage/issues/1880)) ([1c88e83](https://github.com/googleapis/java-storage/commit/1c88e83974798d39a63ea4dd85d8f5bb9cb8f88c)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.2.0 ([#1886](https://github.com/googleapis/java-storage/issues/1886)) ([6928556](https://github.com/googleapis/java-storage/commit/6928556f0a0a61e8c68d9bf98403ce5b27a3bee2)) + +## [2.17.2](https://github.com/googleapis/java-storage/compare/v2.17.1...v2.17.2) (2023-01-23) + + +### Bug Fixes + +* Update BlobReadChannelV2 handling to correctly restart for decompressed object ([#1867](https://github.com/googleapis/java-storage/issues/1867)) ([93e8ed4](https://github.com/googleapis/java-storage/commit/93e8ed484224477cd1aeba24b3d84ed4003e9ee6)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.2 ([#1865](https://github.com/googleapis/java-storage/issues/1865)) ([ceb9a61](https://github.com/googleapis/java-storage/commit/ceb9a61921105f6c5dff80f4a1a41a0d3b51a06d)) + +## [2.17.1](https://github.com/googleapis/java-storage/compare/v2.17.0...v2.17.1) (2023-01-13) + + +### Bug Fixes + +* Update BaseStorageReadChannel to be left open unless explicitly closed ([#1853](https://github.com/googleapis/java-storage/issues/1853)) ([1425dd9](https://github.com/googleapis/java-storage/commit/1425dd97cb7d4a58f0bbededeca543f1a89c7d5d)) + +## [2.17.0](https://github.com/googleapis/java-storage/compare/v2.16.0...v2.17.0) (2023-01-12) + + +### Features + +* Implement GrpcStorageImpl BucketAccessControl operations ([#1816](https://github.com/googleapis/java-storage/issues/1816)) ([5c52079](https://github.com/googleapis/java-storage/commit/5c52079fb5f52caf39a49ccb96df6251a9c728d3)) +* Implement GrpcStorageImpl ObjectAccessControl operations ([#1818](https://github.com/googleapis/java-storage/issues/1818)) ([2eec791](https://github.com/googleapis/java-storage/commit/2eec791122bb1bb28a1ffb14beb7ce8776c5b5ec)) +* Implement GrpcStorageImpl#createDefaultAcl & GrpcStorageImpl#updateDefaultAcl ([#1806](https://github.com/googleapis/java-storage/issues/1806)) ([0f24a11](https://github.com/googleapis/java-storage/commit/0f24a11c5289a4c07f27d8a3c29fab34520b036f)) +* Implement GrpcStorageImpl#deleteDefaultAcl ([#1807](https://github.com/googleapis/java-storage/issues/1807)) ([c783277](https://github.com/googleapis/java-storage/commit/c78327717a7936492161ddcc64c86374db72c48c)) +* Implement GrpcStorageImpl#getDefaultAcl ([#1802](https://github.com/googleapis/java-storage/issues/1802)) ([b9b7c49](https://github.com/googleapis/java-storage/commit/b9b7c49fcfcab285da156b34b186a007150e876f)) +* Implement GrpcStorageImpl#listDefaultAcl ([#1805](https://github.com/googleapis/java-storage/issues/1805)) ([03c2e66](https://github.com/googleapis/java-storage/commit/03c2e6660721b4a8bfc09b241ef44f3e4e08865b)) +* Improve throughput of http based storage#reader between 100 MiB/s and 200 MiB/s ([#1799](https://github.com/googleapis/java-storage/issues/1799)) ([94cd288](https://github.com/googleapis/java-storage/commit/94cd2887f22f6d1bb82f9929b388c27c63353d77)) +* Update GrpcBlobReadChannel to allow seek/limit after read ([#1834](https://github.com/googleapis/java-storage/issues/1834)) ([45dc983](https://github.com/googleapis/java-storage/commit/45dc983a4af8e7feb937263ce611bd34eda37e03)) + + +### Bug Fixes + +* Add missing preconditions and update samples ([#1753](https://github.com/googleapis/java-storage/issues/1753)) ([96beca2](https://github.com/googleapis/java-storage/commit/96beca2465158fb4633d58fe09a9776a4b171811)) +* **grpc:** Fix bucket logging conversion to allow clearing ([#1822](https://github.com/googleapis/java-storage/issues/1822)) ([30e19dc](https://github.com/googleapis/java-storage/commit/30e19dc55c61917c3a73055091e9e6ca0744f172)) +* Update gRPC object list implementation to include synthetic directories ([#1824](https://github.com/googleapis/java-storage/issues/1824)) ([0665c24](https://github.com/googleapis/java-storage/commit/0665c2473b5b1a18061d1e58382320ae55295520)) +* Update Grpc Write implementation to allow specifying expected md5 ([#1815](https://github.com/googleapis/java-storage/issues/1815)) ([4662572](https://github.com/googleapis/java-storage/commit/46625729b6fd62d8f133c3fb2d8ee00eb64ee8e9)) +* Update GrpcConversions to use Bucket.RetentionPolicy.retention_duration ([#1798](https://github.com/googleapis/java-storage/issues/1798)) ([82fb014](https://github.com/googleapis/java-storage/commit/82fb014508178e8ad3fd08e9efc757a8e47564da)) +* Update GrpcStorageImpl#update to support fine-grained update of BucketInfo.labels and BlobInfo.metadata ([#1843](https://github.com/googleapis/java-storage/issues/1843)) ([c8bf3c7](https://github.com/googleapis/java-storage/commit/c8bf3c70cca81ed87a52939fe7da58889c8f55ce)) + + +### Documentation + +* Document differing behavior of {get,list}{,default}Acl between HTTP and gRPC ([#1820](https://github.com/googleapis/java-storage/issues/1820)) ([9511b17](https://github.com/googleapis/java-storage/commit/9511b173e84d2b28ab1a1625b16e3e648c3856fb)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.1 ([#1836](https://github.com/googleapis/java-storage/issues/1836)) ([3b71fab](https://github.com/googleapis/java-storage/commit/3b71fab11ac71039c2a9983821ce02ce25ce311d)) +* Update dependency net.jqwik:jqwik to v1.7.2 ([#1833](https://github.com/googleapis/java-storage/issues/1833)) ([83bc261](https://github.com/googleapis/java-storage/commit/83bc261130e89e5994f21e32422054ef6ea2fe8e)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.2 ([#1837](https://github.com/googleapis/java-storage/issues/1837)) ([5b38184](https://github.com/googleapis/java-storage/commit/5b381845b4f48a691aa3f0cb96599ddefc7e463f)) +* Update junit-platform.version to v5.9.2 ([#1838](https://github.com/googleapis/java-storage/issues/1838)) ([372521b](https://github.com/googleapis/java-storage/commit/372521ba80b12e52c74fae5ac766dbe6610ff0b2)) + +## [2.16.0](https://github.com/googleapis/java-storage/compare/v2.15.1...v2.16.0) (2022-12-06) + + +### Features + +* Add {Compose,Rewrite,StartResumableWrite}Request.object_checksums and Bucket.RetentionPolicy.retention_duration ([#1790](https://github.com/googleapis/java-storage/issues/1790)) ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) +* Added a new retention_duration field of Duration type ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) +* Added object_checksums for compose/rewrite/startResumableWrite request ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) + + +### Bug Fixes + +* Removed WriteObject routing annotations ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) + + +### Documentation + +* Clarified relative resource names in gRPC IAM RPCs ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) +* Clarified the object can be deleted via DeleteObject ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) +* Updated the document link for `Naming Guidelines` ([31c1b18](https://github.com/googleapis/java-storage/commit/31c1b18acc3c118e39eb613a82ee292f3e246b8f)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.0 ([#1797](https://github.com/googleapis/java-storage/issues/1797)) ([b1d0266](https://github.com/googleapis/java-storage/commit/b1d026608a5e3772e8bf77f25f1daf68b007427a)) +* Update dependency org.apache.httpcomponents:httpclient to v4.5.14 ([#1795](https://github.com/googleapis/java-storage/issues/1795)) ([cf900f4](https://github.com/googleapis/java-storage/commit/cf900f4139f30f89e3c0784467ddc12cc00cf81c)) +* Update dependency org.apache.httpcomponents:httpcore to v4.4.16 ([#1786](https://github.com/googleapis/java-storage/issues/1786)) ([3bf403e](https://github.com/googleapis/java-storage/commit/3bf403e94c035e6cf936e062a1ced2b5221b3912)) +* Update dependency org.apache.httpcomponents:httpmime to v4.5.14 ([#1796](https://github.com/googleapis/java-storage/issues/1796)) ([c9ee3ca](https://github.com/googleapis/java-storage/commit/c9ee3ca8820531cd709bb8f8a58a736813346861)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.18 ([#1782](https://github.com/googleapis/java-storage/issues/1782)) ([5bc5176](https://github.com/googleapis/java-storage/commit/5bc517623ef04bdb9a71a51666754b9f753f4c69)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.19 ([#1791](https://github.com/googleapis/java-storage/issues/1791)) ([3184d65](https://github.com/googleapis/java-storage/commit/3184d65cce1368c2f39ff85a6ed02cf536902244)) + +## [2.15.1](https://github.com/googleapis/java-storage/compare/v2.15.0...v2.15.1) (2022-11-17) + + +### Bug Fixes + +* Disable REGAPIC transport in storage v2 ([#1762](https://github.com/googleapis/java-storage/issues/1762)) ([13d630e](https://github.com/googleapis/java-storage/commit/13d630e7ce89273c292acca7a7e048218ece4182)) +* Update GrpcStorageImpl#get(BlobId) to return null on 404 ([#1772](https://github.com/googleapis/java-storage/issues/1772)) ([8c59c64](https://github.com/googleapis/java-storage/commit/8c59c64ccf0dd7753467b4c0f0bcf5f4b49c5bf0)) + + +### Documentation + +* Annotate all Option factory methods with their Nullability bounds ([#1775](https://github.com/googleapis/java-storage/issues/1775)) ([3b8d137](https://github.com/googleapis/java-storage/commit/3b8d137a113376d7dac9010b9207d435df2622f7)) + +## [2.15.0](https://github.com/googleapis/java-storage/compare/v2.14.0...v2.15.0) (2022-11-07) + + +### Features + +* Add Autoclass support and sample ([#1697](https://github.com/googleapis/java-storage/issues/1697)) ([82aacd7](https://github.com/googleapis/java-storage/commit/82aacd7922573d6f4779f21cdc83de10616d7a08)) +* Update retries for Notifications ([#1734](https://github.com/googleapis/java-storage/issues/1734)) ([0fb2f18](https://github.com/googleapis/java-storage/commit/0fb2f1823f9eff8534f15240321003f120fed3f4)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.6 ([#1761](https://github.com/googleapis/java-storage/issues/1761)) ([803a90b](https://github.com/googleapis/java-storage/commit/803a90b7747b8972f51d1407616c51084d97c589)) +* Update dependency net.jqwik:jqwik to v1.7.1 ([#1758](https://github.com/googleapis/java-storage/issues/1758)) ([140e909](https://github.com/googleapis/java-storage/commit/140e90911229c876de7b674dd1e61b278e8b07fd)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.17 ([#1759](https://github.com/googleapis/java-storage/issues/1759)) ([7e3175a](https://github.com/googleapis/java-storage/commit/7e3175a56a06dac0aa0841f221a486bb69b5c9bf)) + +## [2.14.0](https://github.com/googleapis/java-storage/compare/v2.13.1...v2.14.0) (2022-10-26) + +### Google Cloud Storage gRPC API Preview +The first release of `google-cloud-storage` with support for a subset of the Google Cloud Storage gRPC API which is in private preview. The most common operations have all been implemented and are available for experimentation. + +Given not all public api surface of `google-cloud-storage` classes are supported for gRPC a new annotation `@TransportCompatibility` has been added to various classes, methods and fields/enum values to signal where that thing can be expected to work. As we implement more of the operations these annotations will be updated. + +All new gRPC related APIs are annotated with `@BetaApi` to denote they are in preview and the possibility of breaking change is present. At this time, opting to use any of the gRPC transport mode means you are okay with the possibility of a breaking change happening. When the APIs are out of preview, we will remove the `@BetaApi` annotation to signal they are now considered stable and will not break outside a major version. + +**_NOTICE_**: Using the gRPC transport is exclusive. Any operations which have not yet been implemented for gRPC will result in a runtime error. For those operations which are not yet implemented, please continue to use the existing HTTP transport. + +Special thanks (in alphabetical order) to @BenWhitehead, @frankyn, @jesselovelace and @sydney-munro for their hard work on this effort. + +#### Notable Improvements +1. For all gRPC media related operations (upload/download) we are now more resource courteous then the corresponding HTTP counterpart. Buffers are fixed to their specified size (can't arbitrarily grow without bounds), are allocated lazily and only if necessary. + 1. Investigation into the possibility of backporting these improvements to the HTTP counterparts is ongoing + +2. Preview support for Accessing GCS via gRPC + 1. Set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true`, then run your program. + 2. When configuring your `StorageOptions` mimic the following: + ``` + StorageOptions.grpc() + .setAttemptDirectPath(true) + .build() + ``` + 3. Internally the default host endpoint `https://storage.googleapis.com:443` will be transformed to the applicable `google-c2p-experimental:///storage.googleapis.com` + +3. Support for `java.time` types on model classes + 1. Points in time are now represented with `java.time.OffsetDateTime`, while durations are represented with `java.time.Duration` + 2. All existing `Long` centric methods are still present, but have been deprecated in favor of their corresponding `java.time` variant + 3. At the next major version, these deprecated methods will be replaced with types from `java.time` and the `java.time` variant methods will be deprecated + +4. `com.google.cloud.storage.Storage` now extends `java.lang.AutoClosable` thereby allowing it to be used in a try-with-resource block. + 1. When using gRPC transport be sure to call `Storage#close()` when complete so it can clean up the gRPC middleware and resources. + 2. When using HTTP transport calling `Storage#close()` will gracefully no-op, allowing for the same style of use regardless of transport. + +5. When downloading an object via gRPC idle stream detection is now present which will restart a stream if it is determined to be idle and has remaining retry budget +6. Update equals()/hashCode() methods to follow the expected contract +7. The new gRPC transport based implementation continues to provide idempotency aware automatic retries the same as HTTP +8. Expanded test suite which should bring improved stability and reliability to both HTTP and gRPC transport implementations +9. New `com.google.cloud:google-cloud-storage-bom` maven bom available to use for coordinated dependency version resolution for multiple storage artifacts + +#### Not yet implemented +1. All ACL specific operations. + 1. These will be implemented in the near future + 2. In the interim, reading and setting of ACLs and Default Object ACLs can be performed via Object/Bucket operations + +2. All Notification related operations + 1. These will be implemented in the near future + 2. In the interim, please continue to use the HTTP transport + +3. `ReadChannel#capture()`, `RestorableState#restore()`, `WriteChannel#capture()`, `RestorableState#restore()`, `CopyWriter#capture()` and `RestorableState#capture()` are not yet implemented. + * These use cases will be implemented in the near future. We are still determining the route we want to take. + +4. Batch and "bulk" operations which depend on batch + 1. GCS gRPC does not currently define a batch method whereas HTTP does. This means `Storage#batch()` is only supported for HTTP transport. + 2. The following methods which currently depend on `Storage#batch()` are currently only supported for HTTP transport + * `com.google.cloud.storage.Storage#get(com.google.cloud.storage.BlobId...)` + * `com.google.cloud.storage.Storage#get(java.lang.Iterable)` + * `com.google.cloud.storage.Storage#update(com.google.cloud.storage.BlobInfo...)` + * `com.google.cloud.storage.Storage#update(java.lang.Iterable)` + * `com.google.cloud.storage.Storage#delete(com.google.cloud.storage.BlobId...)` + * `com.google.cloud.storage.Storage#delete(java.lang.Iterable)` + +#### One-Time Inconveniences +1. All classes under `com.google.cloud.storage` which are `Serializable` have new `serialVersionUIDs` and are incompatible with any previous version. + 1. Several classes had to change in order to support both HTTP and gRPC at the same time. We were able to preserve Source and Binary runtime level compatibility but had to break Serialization across versions. + 2. If you depend upon Java Serialization, please ensure you are using the same version of `google-cloud-storage` in both locations. + +2. The cause chains of some Exceptions have changed. + 1. When using gRPC, `StorageException` causes will use the corresponding `com.google.api.gax.rpc.ApiException` for the failure type instead of the HTTP `com.google.api.client.googleapis.json.GoogleJsonError` + * In an effort to preserve compatibility of your existing error handling code, we will translate from the gRPC error code to the similar HTTP Status code before constructing the `StorageException` preserving the integrity of `StorageException#getCode()` + 2. RetryHelper$RetryHelperException will no longer appear in exception cause chains for either HTTP or gRPC + + +#### Not Supported +Given the nature of the gRPC transport a few things are explicitly not supported when using gRPC, and require HTTP transport. Attempting to use any of the following methods will result in a runtime error stating they are not supported for gRPC transport. +1. `Storage#writer(URL)` does not work for gRPC. gRPC does not provide a means of exchanging an HTTP url for a resumable session id +2. `Storage#signUrl` is not supported for gRPC transport. Signed URLs explicitly generate HTTP urls and are only supported for the HTTP transport based implementation. +3. `Storage#generateSignedPostPolicyV4` is not supported for gRPC transport. Signed URLs explicitly generate HTTP urls and are only supported for the HTTP transport based implementation. + +#### Known Issues +1. https://github.com/googleapis/java-storage/issues/1736 +2. https://github.com/googleapis/java-storage/issues/1737 + + +### Features + +* Initial preview implementation of Storage over gRPC ([#1740](https://github.com/googleapis/java-storage/issues/1740)) ([95d7ad9](https://github.com/googleapis/java-storage/commit/95d7ad9e5cd4106cadb23d9b2221419797fed5c1)) + + +### Bug Fixes + +* Properly implement GrpcBlobReadChannel#isOpen ([#1733](https://github.com/googleapis/java-storage/issues/1733)) ([04e5166](https://github.com/googleapis/java-storage/commit/04e51662d1f62d987e89ec4e221a3a02c02eaec1)) +* Update BucketInfo.LifecycleRule.LifecycleCondition equals and hashCode to include match prefix and suffix ([#1729](https://github.com/googleapis/java-storage/issues/1729)) ([9664e8a](https://github.com/googleapis/java-storage/commit/9664e8a776dc6c7c59e61c1e59333ba88731fe76)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.5 ([#1725](https://github.com/googleapis/java-storage/issues/1725)) ([09bc225](https://github.com/googleapis/java-storage/commit/09bc22534bb2a401507fa894123affb432a84350)) + +## [2.13.1](https://github.com/googleapis/java-storage/compare/v2.13.0...v2.13.1) (2022-10-20) + + +### Bug Fixes + +* Avoid unexpected initialization of `JacksonParser` in Graal 22.2 ([#1709](https://github.com/googleapis/java-storage/issues/1709)) ([eca1a03](https://github.com/googleapis/java-storage/commit/eca1a03c5d7656179a04058a2af94300f1dc0930)) +* Update BucketInfo.LifecycleRule#fromPb to wire through MatchesPrefix & MatchesSuffix ([#1717](https://github.com/googleapis/java-storage/issues/1717)) ([eae3cf2](https://github.com/googleapis/java-storage/commit/eae3cf2735cd4adab598d311fd64b8906972ffc3)) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.15 ([#1711](https://github.com/googleapis/java-storage/issues/1711)) ([f188a07](https://github.com/googleapis/java-storage/commit/f188a07b09e0787f958f714333337fc92657a824)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.16 ([#1721](https://github.com/googleapis/java-storage/issues/1721)) ([631b98d](https://github.com/googleapis/java-storage/commit/631b98df1a7d44358fbeacb95f2e2a3d590198d1)) + +## [2.13.0](https://github.com/googleapis/java-storage/compare/v2.12.0...v2.13.0) (2022-10-03) + + +### Features + +* Regenerate gapic storage v2 client as of googleapis/googleapis@844d0f7 ([#1673](https://github.com/googleapis/java-storage/issues/1673)) ([bd818b3](https://github.com/googleapis/java-storage/commit/bd818b311186c37ceb527630ed56755d89e2b71b)) + + +### Documentation + +* Add preconditions to some samples ([#1600](https://github.com/googleapis/java-storage/issues/1600)) ([4b3be44](https://github.com/googleapis/java-storage/commit/4b3be44bd4c53613f08bbd01d3b4a9ded419565d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.4 ([#1685](https://github.com/googleapis/java-storage/issues/1685)) ([3d8f550](https://github.com/googleapis/java-storage/commit/3d8f550e4d646a7d4bda6f9e74bc5996d573d5b4)) +* Update dependency gcp-releasetool to v1.8.9 ([#1684](https://github.com/googleapis/java-storage/issues/1684)) ([54a5159](https://github.com/googleapis/java-storage/commit/54a5159707efd6a2d4abd0273ab7c279f507005b)) +* Update dependency importlib-metadata to v4.13.0 ([#1682](https://github.com/googleapis/java-storage/issues/1682)) ([2487aff](https://github.com/googleapis/java-storage/commit/2487affad5a1adf1c39589d8f8a976c7f245b0c9)) +* Update dependency importlib-metadata to v5 ([#1683](https://github.com/googleapis/java-storage/issues/1683)) ([020fe8c](https://github.com/googleapis/java-storage/commit/020fe8ce7991e8c72135a2912f615ed50cc68fd4)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.14 ([#1643](https://github.com/googleapis/java-storage/issues/1643)) ([ab3a198](https://github.com/googleapis/java-storage/commit/ab3a198115ae637710982f838231d2d36c029486)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.1 ([#1635](https://github.com/googleapis/java-storage/issues/1635)) ([ba85ab6](https://github.com/googleapis/java-storage/commit/ba85ab6ce9304b4c426538d2bacc99ef73ed2b0e)) + +## [2.12.0](https://github.com/googleapis/java-storage/compare/v2.11.3...v2.12.0) (2022-09-15) + + +### Features + +* Add toString method for CustomPlacementConfig ([#1602](https://github.com/googleapis/java-storage/issues/1602)) ([51aca10](https://github.com/googleapis/java-storage/commit/51aca10fafe685ed9e7cb41bc4ae79be10feb080)) + + +### Documentation + +* Add batch sample ([#1559](https://github.com/googleapis/java-storage/issues/1559)) ([583bf73](https://github.com/googleapis/java-storage/commit/583bf73f5d58aa5d79fbaa12b24407c558235eed)) +* Document thread safety of library ([#1566](https://github.com/googleapis/java-storage/issues/1566)) ([c740899](https://github.com/googleapis/java-storage/commit/c7408999e811ba917edb0c136432afa29075e0f2)) +* Fix broken links in readme ([#1520](https://github.com/googleapis/java-storage/issues/1520)) ([840b08a](https://github.com/googleapis/java-storage/commit/840b08a03fa7c0535855140244c282f79403b458)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.2 ([#1611](https://github.com/googleapis/java-storage/issues/1611)) ([8a48aea](https://github.com/googleapis/java-storage/commit/8a48aea7e0049c64ef944b532a2874115b1e2323)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.3 ([#1620](https://github.com/googleapis/java-storage/issues/1620)) ([20e6378](https://github.com/googleapis/java-storage/commit/20e63785462e7876a7ff0ceeeea1363007cc160f)) + +## [2.11.3](https://github.com/googleapis/java-storage/compare/v2.11.2...v2.11.3) (2022-08-08) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.11 ([#1551](https://github.com/googleapis/java-storage/issues/1551)) ([0fc2d33](https://github.com/googleapis/java-storage/commit/0fc2d336c536deb3efe95e770d47fc898af12323)) + +## [2.11.2](https://github.com/googleapis/java-storage/compare/v2.11.1...v2.11.2) (2022-08-04) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.10 ([#1543](https://github.com/googleapis/java-storage/issues/1543)) ([8ff1dff](https://github.com/googleapis/java-storage/commit/8ff1dffb07141411daaf49ac9af570a1654eb2da)) + +## [2.11.1](https://github.com/googleapis/java-storage/compare/v2.11.0...v2.11.1) (2022-08-04) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.9 ([#1537](https://github.com/googleapis/java-storage/issues/1537)) ([ae17737](https://github.com/googleapis/java-storage/commit/ae17737342e262f7afd404e4c758a7fbf5530da5)) + +## [2.11.0](https://github.com/googleapis/java-storage/compare/v2.10.0...v2.11.0) (2022-08-03) + + +### Features + +* introduce RPC CancelResumableWriteRequest ([#1518](https://github.com/googleapis/java-storage/issues/1518)) ([f8811c6](https://github.com/googleapis/java-storage/commit/f8811c654109516116bbbe142f4b27ec7f63b5fb)) + + +### Documentation + +* Refactor Custom Dual Region sample to work with API changes ([#1516](https://github.com/googleapis/java-storage/issues/1516)) ([a60cace](https://github.com/googleapis/java-storage/commit/a60caced9584855f12cdb7cac8ad7606ba32a60a)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.5 ([#1527](https://github.com/googleapis/java-storage/issues/1527)) ([77072e3](https://github.com/googleapis/java-storage/commit/77072e3835bbeb802299fec1359c5f3ca13e3c8a)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.7 ([#1530](https://github.com/googleapis/java-storage/issues/1530)) ([8c2ebad](https://github.com/googleapis/java-storage/commit/8c2ebad79affb766d842615aa30cc2a3dbe7d0de)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.8 ([#1534](https://github.com/googleapis/java-storage/issues/1534)) ([723b100](https://github.com/googleapis/java-storage/commit/723b10014cafae257fa75d9c0bfd3fb0c34b9943)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v3 ([#1526](https://github.com/googleapis/java-storage/issues/1526)) ([f7fc0d2](https://github.com/googleapis/java-storage/commit/f7fc0d2f301bd22e96a0b43f8657cc738f49f278)) +* update dependency org.junit.vintage:junit-vintage-engine to v5.9.0 ([#1524](https://github.com/googleapis/java-storage/issues/1524)) ([0e74093](https://github.com/googleapis/java-storage/commit/0e74093f3dffd829f8fb9f2525c1502e1910fbe6)) + +## [2.10.0](https://github.com/googleapis/java-storage/compare/v2.9.3...v2.10.0) (2022-07-13) + + +### Features + +* Custom Placement Config Dual Region Support ([#1470](https://github.com/googleapis/java-storage/issues/1470)) ([36440fc](https://github.com/googleapis/java-storage/commit/36440fc587976acc78f4dae480214b80ad3fc477)) + + +### Bug Fixes + +* enable longpaths support for windows test ([#1485](https://github.com/googleapis/java-storage/issues/1485)) ([#1506](https://github.com/googleapis/java-storage/issues/1506)) ([e36e809](https://github.com/googleapis/java-storage/commit/e36e80934644ccd5e873754c625367f24e5fc7f3)) + + +### Documentation + +* **sample:** upgrade maven-surefire and remove junit-platform-native ([#1476](https://github.com/googleapis/java-storage/issues/1476)) ([9b222e4](https://github.com/googleapis/java-storage/commit/9b222e483937bd0639123899442c1e863683718a)) + + +### Dependencies + +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.13 ([#1501](https://github.com/googleapis/java-storage/issues/1501)) ([36e3898](https://github.com/googleapis/java-storage/commit/36e389857ea3f86350af5763c829e5f1caaaa3ec)) + +## [2.9.3](https://github.com/googleapis/java-storage/compare/v2.9.2...v2.9.3) (2022-07-08) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220705-1.32.1 ([#1494](https://github.com/googleapis/java-storage/issues/1494)) ([5612f0f](https://github.com/googleapis/java-storage/commit/5612f0f929582c9e720d67683f9441d96080fb14)) + +## [2.9.2](https://github.com/googleapis/java-storage/compare/v2.9.1...v2.9.2) (2022-07-06) + + +### Dependencies + +* update kms.version to v0.97.0 ([#1488](https://github.com/googleapis/java-storage/issues/1488)) ([852cafe](https://github.com/googleapis/java-storage/commit/852cafe7db018a6fd666c3490c96eeca34c4a1ed)) + +## [2.9.1](https://github.com/googleapis/java-storage/compare/v2.9.0...v2.9.1) (2022-07-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.120.0 ([#1487](https://github.com/googleapis/java-storage/issues/1487)) ([5a8f6e5](https://github.com/googleapis/java-storage/commit/5a8f6e5f29e4dfb2dc03cad8c77e4b504bc83019)) + +## [2.9.0](https://github.com/googleapis/java-storage/compare/v2.8.1...v2.9.0) (2022-06-27) + + +### Features + +* Enable REST transport for most of Java and Go clients ([#1469](https://github.com/googleapis/java-storage/issues/1469)) ([cfb6fd8](https://github.com/googleapis/java-storage/commit/cfb6fd8c1f13cb2115e5ff7ea7af41a9e27986b1)) + + +### Documentation + +* Clarified how clients should work with resumable uploads ([#1457](https://github.com/googleapis/java-storage/issues/1457)) ([15baa27](https://github.com/googleapis/java-storage/commit/15baa27c97aacca115981f5c14d942a8ddd6cb1f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.13.0 ([#1468](https://github.com/googleapis/java-storage/issues/1468)) ([1a817fe](https://github.com/googleapis/java-storage/commit/1a817fef87e626e74dab47eaff33c1d9d3c56bb5)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.12 ([#1463](https://github.com/googleapis/java-storage/issues/1463)) ([d42c570](https://github.com/googleapis/java-storage/commit/d42c5704e4fe7341dfdd0562fa0bdbcd4bc99c7e)) +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.12 ([#1464](https://github.com/googleapis/java-storage/issues/1464)) ([2f35a4f](https://github.com/googleapis/java-storage/commit/2f35a4f597870a4506119946510ba82fbb7f9449)) +* update kms.version to v0.96.3 ([#1471](https://github.com/googleapis/java-storage/issues/1471)) ([ceb7fa6](https://github.com/googleapis/java-storage/commit/ceb7fa665c35f6b00aebaa665246597b9ee4632a)) + +## [2.8.1](https://github.com/googleapis/java-storage/compare/v2.8.0...v2.8.1) (2022-06-13) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220608-1.32.1 ([#1448](https://github.com/googleapis/java-storage/issues/1448)) ([96676cd](https://github.com/googleapis/java-storage/commit/96676cd830aca27c23c08e02e8cc7c58dece686c)) + +## [2.8.0](https://github.com/googleapis/java-storage/compare/v2.7.2...v2.8.0) (2022-06-08) + + +### Features + +* Prefix/Suffix Matches Lifecycle Condition ([#1389](https://github.com/googleapis/java-storage/issues/1389)) ([20c8848](https://github.com/googleapis/java-storage/commit/20c88489d80d716da28f78fed628b54345f32ca4)) +* Support AbortIncompleteMultipartUpload LifecycleAction ([#1347](https://github.com/googleapis/java-storage/issues/1347)) ([7c3aba2](https://github.com/googleapis/java-storage/commit/7c3aba2f0a26ac550e4f37f9287ed6b041d75919)) + + +### Bug Fixes + +* update request method of HttpStorageRpc to properly configure offset on requests ([#1434](https://github.com/googleapis/java-storage/issues/1434)) ([72dc0df](https://github.com/googleapis/java-storage/commit/72dc0dff20d76875401dac721c0268c32e475e39)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220604-1.32.1 ([#1438](https://github.com/googleapis/java-storage/issues/1438)) ([df8fcd9](https://github.com/googleapis/java-storage/commit/df8fcd9925ef06c91ebebe0a3a7b5aedeb15ec4d)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.119.0 ([#1426](https://github.com/googleapis/java-storage/issues/1426)) ([93ba28c](https://github.com/googleapis/java-storage/commit/93ba28cff16d428e0222078dc60dbf49fda7632a)) + +### [2.7.2](https://github.com/googleapis/java-storage/compare/v2.7.1...v2.7.2) (2022-05-27) + + +### Dependencies + +* update kms.version to v0.96.1 ([#1418](https://github.com/googleapis/java-storage/issues/1418)) ([d2f325b](https://github.com/googleapis/java-storage/commit/d2f325b3d31ac5da367873be1fb530fb6356036a)) + +### [2.7.1](https://github.com/googleapis/java-storage/compare/v2.7.0...v2.7.1) (2022-05-24) + + +### Dependencies + +* update kms.version to v0.96.0 ([#1408](https://github.com/googleapis/java-storage/issues/1408)) ([7501ffc](https://github.com/googleapis/java-storage/commit/7501ffc97d5a7943d3852ea26133b6c62cbbff1f)) + +## [2.7.0](https://github.com/googleapis/java-storage/compare/v2.6.1...v2.7.0) (2022-05-24) + + +### Features + +* add build scripts for native image testing in Java 17 ([#1440](https://github.com/googleapis/java-storage/issues/1440)) ([#1400](https://github.com/googleapis/java-storage/issues/1400)) ([274a373](https://github.com/googleapis/java-storage/commit/274a3733b72d2aa1e2916edf40a72c013aaf1711)) +* add Storage#downloadTo ([#1354](https://github.com/googleapis/java-storage/issues/1354)) ([5a565a7](https://github.com/googleapis/java-storage/commit/5a565a74cd6aaa85ed81a8cea026477512fbd5da)) +* change GCS gRPC API to get user billing project from gRPC metadata instead of CommonRequestParams, and remove latter ([#1396](https://github.com/googleapis/java-storage/issues/1396)) ([8a7755c](https://github.com/googleapis/java-storage/commit/8a7755cc8352b3ab21c252885fb86576474d7f09)) + + +### Documentation + +* add new storage_download_byte_range samples ([#1325](https://github.com/googleapis/java-storage/issues/1325)) ([cef3d13](https://github.com/googleapis/java-storage/commit/cef3d138fd11762437ac59adee6a198139acb7f5)) +* **sample:** removing unnecessary native-image-support dependency ([#1373](https://github.com/googleapis/java-storage/issues/1373)) ([3a246ef](https://github.com/googleapis/java-storage/commit/3a246ef4f0a75e52734df52772d34547632ab85f)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220509-1.32.1 ([#1386](https://github.com/googleapis/java-storage/issues/1386)) ([4e93c8e](https://github.com/googleapis/java-storage/commit/4e93c8e6f3c8259968a3dd35a15e752a81491af2)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.116.4 ([#1360](https://github.com/googleapis/java-storage/issues/1360)) ([66c7ffe](https://github.com/googleapis/java-storage/commit/66c7ffe112242915165286a972c44fc2568b67c8)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.117.0 ([#1382](https://github.com/googleapis/java-storage/issues/1382)) ([0cd01a0](https://github.com/googleapis/java-storage/commit/0cd01a0eb498a994c330cc985c21b3248ecba8fa)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.118.0 ([#1397](https://github.com/googleapis/java-storage/issues/1397)) ([fc0c187](https://github.com/googleapis/java-storage/commit/fc0c187096058f84a2f73704b29457c5c6d744fe)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.11.0 ([#1398](https://github.com/googleapis/java-storage/issues/1398)) ([8834423](https://github.com/googleapis/java-storage/commit/8834423f8772310b1a99aa393095e319a4169307)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.12.0 ([#1402](https://github.com/googleapis/java-storage/issues/1402)) ([32cded4](https://github.com/googleapis/java-storage/commit/32cded493442ed5e7b524cd2478e97f00fd90c3c)) +* update kms.version to v0.95.4 ([#1361](https://github.com/googleapis/java-storage/issues/1361)) ([2f42ba2](https://github.com/googleapis/java-storage/commit/2f42ba296bf5ace92159ee02885eaf0e4d9c9864)) + +### [2.6.1](https://github.com/googleapis/java-storage/compare/v2.6.0...v2.6.1) (2022-04-15) + + +### Bug Fixes + +* add gccl-invocation-id interceptor ([#1309](https://github.com/googleapis/java-storage/issues/1309)) ([335c267](https://github.com/googleapis/java-storage/commit/335c2679b70f0bcd4db895d9cb4cbe97175e8070)) +* **java:** add service account email to Native Image testing kokoro job ([#1348](https://github.com/googleapis/java-storage/issues/1348)) ([9f76fcc](https://github.com/googleapis/java-storage/commit/9f76fccfddcc0d3a671ec4281dab303da07b9959)) + + +### Documentation + +* Adding PubSub Notification Samples ([#1317](https://github.com/googleapis/java-storage/issues/1317)) ([fa9920d](https://github.com/googleapis/java-storage/commit/fa9920d9097cfe6863c3e733a091c9b867d603ef)) +* Adding Samples for Creating Dual Region Buckets ([#1341](https://github.com/googleapis/java-storage/issues/1341)) ([9396061](https://github.com/googleapis/java-storage/commit/9396061ac71f98efd6784c34da3bbea04f48873d)) +* Update CreateBucketPubSubNotification to have line without horizontal scrolling ([#1335](https://github.com/googleapis/java-storage/issues/1335)) ([09b7842](https://github.com/googleapis/java-storage/commit/09b78424f7090c7c0469709a357a06143668e31b)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220401-1.32.1 ([#1337](https://github.com/googleapis/java-storage/issues/1337)) ([a5050e2](https://github.com/googleapis/java-storage/commit/a5050e230a620ba727a93c0a93f0bf82a011ce99)) +* update dependency com.google.cloud:google-cloud-pubsub to v1.116.3 ([#1327](https://github.com/googleapis/java-storage/issues/1327)) ([9d8c520](https://github.com/googleapis/java-storage/commit/9d8c520acca7f56f5af46348bc1db71bda1f93aa)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.10.0 ([#1352](https://github.com/googleapis/java-storage/issues/1352)) ([ab46f98](https://github.com/googleapis/java-storage/commit/ab46f985768c1539babf4c14a7e030083776ce0e)) +* update dependency com.google.cloud:native-image-support to v0.12.11 ([#1319](https://github.com/googleapis/java-storage/issues/1319)) ([c338c54](https://github.com/googleapis/java-storage/commit/c338c54210940dbe3b97aa0e7b13904e72ede91d)) +* update dependency com.google.cloud:native-image-support to v0.13.1 ([#1353](https://github.com/googleapis/java-storage/issues/1353)) ([0f76d27](https://github.com/googleapis/java-storage/commit/0f76d2773bc159b1c8a9eeddb54ae8406da86e7a)) +* update kms.version to v0.95.2 ([#1331](https://github.com/googleapis/java-storage/issues/1331)) ([2ca4883](https://github.com/googleapis/java-storage/commit/2ca488362ce2cb2b620ed6dc846d76b095d44a31)) +* update kms.version to v0.95.3 ([#1346](https://github.com/googleapis/java-storage/issues/1346)) ([a4f9503](https://github.com/googleapis/java-storage/commit/a4f95038e56ac36badf68edd072705621fec1dbe)) + +## [2.6.0](https://github.com/googleapis/java-storage/compare/v2.5.1...v2.6.0) (2022-03-30) + + +### Features + +* replace enum with string representation for predefined ACLs and public_access_prevention ([#1323](https://github.com/googleapis/java-storage/issues/1323)) ([4dd1a88](https://github.com/googleapis/java-storage/commit/4dd1a8800317343bb0cd575864683e580f9ccd29)) + + +### Bug Fixes + +* **java:** add configurations for Storage tests ([#1305](https://github.com/googleapis/java-storage/issues/1305)) ([2bacf92](https://github.com/googleapis/java-storage/commit/2bacf92799e8a0fbdc1b5cfcfc6ef8d806a53fa3)) +* update boundary checking of BlobReadChannel when limit() is used ([#1324](https://github.com/googleapis/java-storage/issues/1324)) ([f21f624](https://github.com/googleapis/java-storage/commit/f21f624f1645b5ada350c04c774f9f113e76e971)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.9.0 ([#1321](https://github.com/googleapis/java-storage/issues/1321)) ([f48d8dd](https://github.com/googleapis/java-storage/commit/f48d8dd09e918ba4a54fccaebf65feaba4f6e206)) + + +### Documentation + +* adjust retry settings for hmac samples ([#1303](https://github.com/googleapis/java-storage/issues/1303)) ([d0c5361](https://github.com/googleapis/java-storage/commit/d0c5361e9e4996f8a99754381e5a28a843e6de2a)) + +### [2.5.1](https://github.com/googleapis/java-storage/compare/v2.5.0...v2.5.1) (2022-03-28) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-pubsub to v1.116.2 ([#1310](https://github.com/googleapis/java-storage/issues/1310)) ([fb64493](https://github.com/googleapis/java-storage/commit/fb644932d4350b4e33481abda8cc8f498f9da85e)) + +## [2.5.0](https://github.com/googleapis/java-storage/compare/v2.4.5...v2.5.0) (2022-03-25) + + +### Features + +* allow limiting ReadChannel ([#1180](https://github.com/googleapis/java-storage/issues/1180)) ([2898ee8](https://github.com/googleapis/java-storage/commit/2898ee88545a93916d55c969fd0838e4fc703912)) +* expose the methods of Notifications ([#399](https://github.com/googleapis/java-storage/issues/399)) ([0bd17b1](https://github.com/googleapis/java-storage/commit/0bd17b1f70e47081941a44f018e3098b37ba2c47)) + + +### Documentation + +* Adding Samples for printing all Acls for a file and for a specific user ([#1288](https://github.com/googleapis/java-storage/issues/1288)) ([32fe388](https://github.com/googleapis/java-storage/commit/32fe388c8733cb237fc2a5b4676e36df76ef0dff)) +* Copy all storage samples from java-docs-samples ([#1258](https://github.com/googleapis/java-storage/issues/1258)) ([48b99be](https://github.com/googleapis/java-storage/commit/48b99beb692f529bea7e3de7ea5a36606876d96a)) +* **sample:** Add Native Image sample for Storage ([#1283](https://github.com/googleapis/java-storage/issues/1283)) ([375874d](https://github.com/googleapis/java-storage/commit/375874d38fc46bfec2df4c58e7d661c4f1f6c486)) + + +### Dependencies + +* allow snapshot to update properly ([#1311](https://github.com/googleapis/java-storage/issues/1311)) ([a5d32f2](https://github.com/googleapis/java-storage/commit/a5d32f2945247f76a21b030300a6e037084231b5)) +* update dependency com.google.cloud:native-image-support to v0.12.10 ([#1295](https://github.com/googleapis/java-storage/issues/1295)) ([3b3ecce](https://github.com/googleapis/java-storage/commit/3b3ecce262a3c7c95fbf0ddf3a5830a116022053)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.10 ([#1296](https://github.com/googleapis/java-storage/issues/1296)) ([6f1b142](https://github.com/googleapis/java-storage/commit/6f1b1423d1de6aef9aedbf6b89ce42bbc72196e9)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.11 ([#1306](https://github.com/googleapis/java-storage/issues/1306)) ([1527ba0](https://github.com/googleapis/java-storage/commit/1527ba0abad38acd55542ce92214d5c66a9c62ee)) +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.10 ([#1297](https://github.com/googleapis/java-storage/issues/1297)) ([3f64f11](https://github.com/googleapis/java-storage/commit/3f64f117be7b7150a7a89c5240f09350d1add578)) +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.11 ([#1307](https://github.com/googleapis/java-storage/issues/1307)) ([e45ae07](https://github.com/googleapis/java-storage/commit/e45ae0783bd9e0bea7e8accee1437dee4e974333)) + +### [2.4.5](https://github.com/googleapis/java-storage/compare/v2.4.4...v2.4.5) (2022-03-04) + + +### Documentation + +* Adding Samples for Adding/Removing File Owners ([#1273](https://github.com/googleapis/java-storage/issues/1273)) ([6fad19c](https://github.com/googleapis/java-storage/commit/6fad19c184d108f30c85f62426d254a9f0ff715d)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#1284](https://github.com/googleapis/java-storage/issues/1284)) ([0f71ae4](https://github.com/googleapis/java-storage/commit/0f71ae41fbabf6a3f38674a2f68fb55bd9809595)) +* update kms.version to v0.95.1 ([#1287](https://github.com/googleapis/java-storage/issues/1287)) ([8334d3c](https://github.com/googleapis/java-storage/commit/8334d3cb1f527b00ee8f19583dcf112f4f1b08ac)) + +### [2.4.4](https://github.com/googleapis/java-storage/compare/v2.4.3...v2.4.4) (2022-02-28) + + +### Dependencies + +* update actions/setup-java action to v3 ([#1274](https://github.com/googleapis/java-storage/issues/1274)) ([d29d19a](https://github.com/googleapis/java-storage/commit/d29d19a9936164e0ffe4d2f5fa14739a807369f6)) + +### [2.4.3](https://github.com/googleapis/java-storage/compare/v2.4.2...v2.4.3) (2022-02-25) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20220210-1.32.1 ([#1269](https://github.com/googleapis/java-storage/issues/1269)) ([b3933be](https://github.com/googleapis/java-storage/commit/b3933be393bcb8850e39635d28211159a3d0a091)) + + +### Documentation + +* Adding Samples for Add/Remove Bucket Default Owner ([#1260](https://github.com/googleapis/java-storage/issues/1260)) ([7223626](https://github.com/googleapis/java-storage/commit/7223626481930bf4442a04ccf49536f7f9e5fd32)) +* Adding Samples for Add/Remove Bucket Owner ([#1272](https://github.com/googleapis/java-storage/issues/1272)) ([9d25fa9](https://github.com/googleapis/java-storage/commit/9d25fa986ec6116eeb16ac5773b46e7fdbe10647)) +* Adding Samples for Printing Bucket ACLs and Printing Bucket ACL for a specific user. ([#1236](https://github.com/googleapis/java-storage/issues/1236)) ([d82333b](https://github.com/googleapis/java-storage/commit/d82333b01eadd9afd0c9d58455f86bc6457c99e3)) + +### [2.4.2](https://github.com/googleapis/java-storage/compare/v2.4.1...v2.4.2) (2022-02-11) + + +### Dependencies + +* update actions/github-script action to v6 ([#1241](https://github.com/googleapis/java-storage/issues/1241)) ([366d738](https://github.com/googleapis/java-storage/commit/366d7385c4f6ac5c7478ea71cf0f7f1546ad4607)) + +### [2.4.1](https://github.com/googleapis/java-storage/compare/v2.4.0...v2.4.1) (2022-02-08) + + +### Dependencies + +* update kms.version to v0.95.0 ([#1224](https://github.com/googleapis/java-storage/issues/1224)) ([5700c54](https://github.com/googleapis/java-storage/commit/5700c544da904bca75bf42314b150f109771f719)) + +## [2.4.0](https://github.com/googleapis/java-storage/compare/v2.3.0...v2.4.0) (2022-02-03) + + +### Features + +* Change RewriteObjectRequest to specify bucket name, object name and KMS key outside of Object resource ([#1218](https://github.com/googleapis/java-storage/issues/1218)) ([8789e4f](https://github.com/googleapis/java-storage/commit/8789e4f73a3c5b36aa93246d172d07adb24027aa)) +* re-generate gapic client to include full GCS gRPC API ([#1189](https://github.com/googleapis/java-storage/issues/1189)) ([3099a22](https://github.com/googleapis/java-storage/commit/3099a2264d8b135f602d8dd06f3e91ac5b0ecdba)) +* Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#1220](https://github.com/googleapis/java-storage/issues/1220)) ([7845c0e](https://github.com/googleapis/java-storage/commit/7845c0e8be5ba150f5e835172e9341ef2efc6054)) + + +### Bug Fixes + +* Remove post policy v4 client side validation ([#1210](https://github.com/googleapis/java-storage/issues/1210)) ([631741d](https://github.com/googleapis/java-storage/commit/631741df96a6dddd31a38dce099f3d3ff09ca7cf)) + + +### Dependencies + +* **java:** update actions/github-script action to v5 ([#1339](https://github.com/googleapis/java-storage/issues/1339)) ([#1215](https://github.com/googleapis/java-storage/issues/1215)) ([deb110b](https://github.com/googleapis/java-storage/commit/deb110b0b5ec4a7e6963d1c1ab0e63ca58240ae1)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.7.0 ([#1219](https://github.com/googleapis/java-storage/issues/1219)) ([623e68b](https://github.com/googleapis/java-storage/commit/623e68b8b678df425730b6472cf34d7b78841757)) + +## [2.3.0](https://github.com/googleapis/java-storage/compare/v2.2.3...v2.3.0) (2022-01-12) + + +### Features + +* Add RPO metadata settings ([#1105](https://github.com/googleapis/java-storage/issues/1105)) ([6f9dfdf](https://github.com/googleapis/java-storage/commit/6f9dfdfdbf9f1466839a17ef97489f207f18bec6)) + + +### Bug Fixes + +* **java:** run Maven in plain console-friendly mode ([#1301](https://github.com/googleapis/java-storage/issues/1301)) ([#1186](https://github.com/googleapis/java-storage/issues/1186)) ([1e55dba](https://github.com/googleapis/java-storage/commit/1e55dba4cd5111472b9bb05db08ba7e47fafe762)) +* Remove all client side validation for OLM, allow nonspecific lif… ([#1160](https://github.com/googleapis/java-storage/issues/1160)) ([5a160ee](https://github.com/googleapis/java-storage/commit/5a160eee2b80e3d392df9d73dfc30ca9cd665764)) + + +### Dependencies + +* update dependency org.easymock:easymock to v4 ([#1198](https://github.com/googleapis/java-storage/issues/1198)) ([558520f](https://github.com/googleapis/java-storage/commit/558520f35ed64f0b36f7f8ada4491023a0fb759e)) +* update kms.version to v0.94.1 ([#1195](https://github.com/googleapis/java-storage/issues/1195)) ([cc999b1](https://github.com/googleapis/java-storage/commit/cc999b1ebaba051524ce6131052c824232ccb79a)) + +### [2.2.3](https://www.github.com/googleapis/java-storage/compare/v2.2.2...v2.2.3) (2022-01-07) + + +### Bug Fixes + +* do not cause a failure when encountering no bindings ([#1177](https://www.github.com/googleapis/java-storage/issues/1177)) ([16c2aef](https://www.github.com/googleapis/java-storage/commit/16c2aef4f09eccee59d1028e3bbf01c65b5982d6)) +* **java:** add -ntp flag to native image testing command ([#1169](https://www.github.com/googleapis/java-storage/issues/1169)) ([b8a6395](https://www.github.com/googleapis/java-storage/commit/b8a6395fcaa34423d42a90bd42f71809f89a6c3b)) +* update retry handling to retry idempotent requests that encounter unexpected EOF while parsing json responses ([#1155](https://www.github.com/googleapis/java-storage/issues/1155)) ([8fbe6ef](https://www.github.com/googleapis/java-storage/commit/8fbe6efab969d699e9ba9e5448db7a6ee10c0572)) + + +### Documentation + +* add new sample storage_configure_retries ([#1152](https://www.github.com/googleapis/java-storage/issues/1152)) ([8634c4b](https://www.github.com/googleapis/java-storage/commit/8634c4b5cb88d2818378558427170ecf6c403df5)) +* update comments ([#1188](https://www.github.com/googleapis/java-storage/issues/1188)) ([d58e67c](https://www.github.com/googleapis/java-storage/commit/d58e67c217f38ca7b1926882ec48bd7b0c351ea7)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.6.0 ([#1191](https://www.github.com/googleapis/java-storage/issues/1191)) ([3b384cf](https://www.github.com/googleapis/java-storage/commit/3b384cf46876610ce33f2842ee8e9fc13e08443c)) +* update dependency org.apache.httpcomponents:httpcore to v4.4.15 ([#1171](https://www.github.com/googleapis/java-storage/issues/1171)) ([57f7a74](https://www.github.com/googleapis/java-storage/commit/57f7a743ee042c52261cd388fb0aec48c84e5d32)) + +### [2.2.2](https://www.github.com/googleapis/java-storage/compare/v2.2.1...v2.2.2) (2021-12-06) + + +### Bug Fixes + +* update StorageOptions to not overwrite any previously set host ([#1142](https://www.github.com/googleapis/java-storage/issues/1142)) ([05375c0](https://www.github.com/googleapis/java-storage/commit/05375c0b9b6f9fde2e6cefb1af6a695aa3b01732)) + + +### Documentation + +* Add comments to GCS gRPC API proto spec to describe how naming work ([#1139](https://www.github.com/googleapis/java-storage/issues/1139)) ([417c525](https://www.github.com/googleapis/java-storage/commit/417c5250eb7ad1a7b04a055a39d72e6536a63e18)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20211201-1.32.1 ([#1165](https://www.github.com/googleapis/java-storage/issues/1165)) ([9031836](https://www.github.com/googleapis/java-storage/commit/90318368e69d7677c49e985eb58ff1b61d878ec9)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.1 ([#1163](https://www.github.com/googleapis/java-storage/issues/1163)) ([feca2c6](https://www.github.com/googleapis/java-storage/commit/feca2c6342786ef3fb699c459067c015bd374a13)) +* update kms.version to v0.94.0 ([#1164](https://www.github.com/googleapis/java-storage/issues/1164)) ([8653783](https://www.github.com/googleapis/java-storage/commit/86537836a3b96f369e1cad59c692d350047414f7)) + +### [2.2.1](https://www.github.com/googleapis/java-storage/compare/v2.2.0...v2.2.1) (2021-11-15) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.0 ([#1146](https://www.github.com/googleapis/java-storage/issues/1146)) ([a5d13a9](https://www.github.com/googleapis/java-storage/commit/a5d13a97bae50b4ee8a2fcef180ddc26b77e3d16)) + +## [2.2.0](https://www.github.com/googleapis/java-storage/compare/v2.1.9...v2.2.0) (2021-11-02) + + +### Features + +* next release from mainline is 2.2.0 ([#1124](https://www.github.com/googleapis/java-storage/issues/1124)) ([53a755b](https://www.github.com/googleapis/java-storage/commit/53a755b315c0e739e33929fa5db92eb1daf32e8b)) +* update all automatic retry behavior to be idempotency aware ([#1132](https://www.github.com/googleapis/java-storage/issues/1132)) ([470b8cd](https://www.github.com/googleapis/java-storage/commit/470b8cd8a24c1c2b4be1b956d1691dbae8cf87fd)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20211018-1.32.1 ([#1123](https://www.github.com/googleapis/java-storage/issues/1123)) ([edc0e00](https://www.github.com/googleapis/java-storage/commit/edc0e00a9f0d3c48ed7abbd5b01429837298ecfb)) +* update kms.version to v0.93.2 ([#1120](https://www.github.com/googleapis/java-storage/issues/1120)) ([a5c007d](https://www.github.com/googleapis/java-storage/commit/a5c007d306c5d7fc00927be39b6879dfc7a01fcb)) + +### [2.1.9](https://www.github.com/googleapis/java-storage/compare/v2.1.8...v2.1.9) (2021-10-19) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.4.0 ([#1115](https://www.github.com/googleapis/java-storage/issues/1115)) ([37d892b](https://www.github.com/googleapis/java-storage/commit/37d892b05ae3c7338b6c804cddfcecca80509ea3)) + +### [2.1.8](https://www.github.com/googleapis/java-storage/compare/v2.1.7...v2.1.8) (2021-10-18) + + +### Bug Fixes + +* regenerate google.cloud.storage.v2 protos ([a7e3b94](https://www.github.com/googleapis/java-storage/commit/a7e3b94e4a3e03599b0dbe51fbe574ed4ea1a0d8)) + + +### Dependencies + +* update kms.version to v0.93.1 ([#1079](https://www.github.com/googleapis/java-storage/issues/1079)) ([1c52b3d](https://www.github.com/googleapis/java-storage/commit/1c52b3db6699c2ad325853e95231e1a908da069f)) + +### [2.1.7](https://www.github.com/googleapis/java-storage/compare/v2.1.6...v2.1.7) (2021-10-04) + + +### Bug Fixes + +* update PAP to use inherited instead of unspecified ([#1051](https://www.github.com/googleapis/java-storage/issues/1051)) ([6d73e46](https://www.github.com/googleapis/java-storage/commit/6d73e4631777542996a0ea815b482f5c19a8927d)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20210918-1.32.1 ([#1046](https://www.github.com/googleapis/java-storage/issues/1046)) ([2c79005](https://www.github.com/googleapis/java-storage/commit/2c79005d29ee0b279850c7008b1afbb302f9c90d)) +* update kms.version to v0.93.0 ([#1061](https://www.github.com/googleapis/java-storage/issues/1061)) ([97b1a2e](https://www.github.com/googleapis/java-storage/commit/97b1a2ebe411e48e2df095fe5518a867c5136851)) + +### [2.1.6](https://www.github.com/googleapis/java-storage/compare/v2.1.5...v2.1.6) (2021-09-24) + + +### Dependencies + +* update kms.version to v0.92.2 ([#1039](https://www.github.com/googleapis/java-storage/issues/1039)) ([d6a0542](https://www.github.com/googleapis/java-storage/commit/d6a0542f5fd290a0bdc2755f81a49f55724662b2)) + +### [2.1.5](https://www.github.com/googleapis/java-storage/compare/v2.1.4...v2.1.5) (2021-09-22) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.3.0 ([#1035](https://www.github.com/googleapis/java-storage/issues/1035)) ([ae71c24](https://www.github.com/googleapis/java-storage/commit/ae71c2496f64a0601b24574032cc133afb423408)) + +### [2.1.4](https://www.github.com/googleapis/java-storage/compare/v2.1.3...v2.1.4) (2021-09-20) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20210914-1.32.1 ([#1025](https://www.github.com/googleapis/java-storage/issues/1025)) ([ff56d5e](https://www.github.com/googleapis/java-storage/commit/ff56d5e5632d925542ac918d293b68dfcb32b465)) +* update kms.version to v0.92.1 ([#1023](https://www.github.com/googleapis/java-storage/issues/1023)) ([ca1afcf](https://www.github.com/googleapis/java-storage/commit/ca1afcff085bd02b150b93128b102cb9a61e1b4d)) + +### [2.1.3](https://www.github.com/googleapis/java-storage/compare/v2.1.2...v2.1.3) (2021-09-15) + + +### Dependencies + +* update kms.version to v0.92.0 ([#1018](https://www.github.com/googleapis/java-storage/issues/1018)) ([f1c58db](https://www.github.com/googleapis/java-storage/commit/f1c58db517596a5ee65e0f8a6e4b9c561288594e)) + +### [2.1.2](https://www.github.com/googleapis/java-storage/compare/v2.1.1...v2.1.2) (2021-09-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.1 ([#1011](https://www.github.com/googleapis/java-storage/issues/1011)) ([0bf06a5](https://www.github.com/googleapis/java-storage/commit/0bf06a54e3b90b9d8cf425d490561b48d6b5d882)) + +### [2.1.1](https://www.github.com/googleapis/java-storage/compare/v2.1.0...v2.1.1) (2021-09-03) + + +### Documentation + +* Modify OLM notice to recommend upgrading to latest version. ([#932](https://www.github.com/googleapis/java-storage/issues/932)) ([be72433](https://www.github.com/googleapis/java-storage/commit/be72433ef5446db880e44f103a7d120f444f183f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-conformance-tests to v0.2.0 ([#982](https://www.github.com/googleapis/java-storage/issues/982)) ([c7460a3](https://www.github.com/googleapis/java-storage/commit/c7460a3ffef81ef2f651b582a97139c0523d1eab)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.0 ([#989](https://www.github.com/googleapis/java-storage/issues/989)) ([6745c9e](https://www.github.com/googleapis/java-storage/commit/6745c9e5a9d3a907873b989ca8f8a47edd833523)) +* update kms.version to v0.91.3 ([#991](https://www.github.com/googleapis/java-storage/issues/991)) ([1f15022](https://www.github.com/googleapis/java-storage/commit/1f15022a590bce4f80dcb86d150b8e3dbe43aec9)) + +## [2.1.0](https://www.github.com/googleapis/java-storage/compare/v2.0.2...v2.1.0) (2021-08-24) + + +### Features + +* fix post policy escape bug, update conformance tests ([#924](https://www.github.com/googleapis/java-storage/issues/924)) ([d8329c3](https://www.github.com/googleapis/java-storage/commit/d8329c34fe19fd8c6bba5579aa3c55490c1d4e6f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.1.0 ([#976](https://www.github.com/googleapis/java-storage/issues/976)) ([5cac14d](https://www.github.com/googleapis/java-storage/commit/5cac14d7785ef3798c379d17cd44500958d9cc6a)) +* update kms.version to v0.91.2 ([#977](https://www.github.com/googleapis/java-storage/issues/977)) ([1c60e6e](https://www.github.com/googleapis/java-storage/commit/1c60e6e6a34f662478043989b5b0bddea32cc5bf)) + +### [2.0.2](https://www.github.com/googleapis/java-storage/compare/v2.0.1...v2.0.2) (2021-08-19) + + +### Dependencies + +* update kms.version to v0.91.1 ([#956](https://www.github.com/googleapis/java-storage/issues/956)) ([53d24e9](https://www.github.com/googleapis/java-storage/commit/53d24e9d3e27c0319fa3b6837c926484b1bd56a4)) + +## [2.0.1](https://www.github.com/googleapis/java-storage/compare/v2.0.0...v2.0.1) (2021-08-11) + + +### Features + +* generate storage v2 gapic client ([#960](https://www.github.com/googleapis/java-storage/issues/960)) ([fb2f9d4](https://www.github.com/googleapis/java-storage/commit/fb2f9d489e42b57f61642ce9e0c1a65fe91c9c45)) + + +### Bug Fixes + +* incorrectly labeled span list(String,Map) ([#946](https://www.github.com/googleapis/java-storage/issues/946)) ([0c1fdcf](https://www.github.com/googleapis/java-storage/commit/0c1fdcfe89609b10c148f0dc6026084d2f49b1b7)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.0.1 ([#961](https://www.github.com/googleapis/java-storage/issues/961)) ([69543dc](https://www.github.com/googleapis/java-storage/commit/69543dcba2fce1028e5fac25a59e1defe6465f06)) + +## [2.0.0](https://www.github.com/googleapis/java-storage/compare/v1.118.1...v2.0.0) (2021-08-09) + + +### ⚠ BREAKING CHANGES + +* migrate to java8 (#950) + +### Features + +* migrate to java8 ([#950](https://www.github.com/googleapis/java-storage/issues/950)) ([839bcc1](https://www.github.com/googleapis/java-storage/commit/839bcc174ff1c2f5536130d880a5c6e2559b5793)) + +### [1.118.1](https://www.github.com/googleapis/java-storage/compare/v1.118.0...v1.118.1) (2021-08-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2 ([#941](https://www.github.com/googleapis/java-storage/issues/941)) ([effefa6](https://www.github.com/googleapis/java-storage/commit/effefa64336a6112dae1497b3bcde7c7f8b0ad41)) + +## [1.118.0](https://www.github.com/googleapis/java-storage/compare/v1.117.1...v1.118.0) (2021-07-13) + + +### Features + +* fix signed url mismatch in BlobWriteChannel ([#915](https://www.github.com/googleapis/java-storage/issues/915)) ([8b05867](https://www.github.com/googleapis/java-storage/commit/8b0586757523cfc550c62ff264eea3eebbd7f32e)) + + +### Bug Fixes + +* correct lastChunk retry logic in BlobWriteChannel ([#918](https://www.github.com/googleapis/java-storage/issues/918)) ([ab0228c](https://www.github.com/googleapis/java-storage/commit/ab0228c95df831d79f4a9c993908e5700dab5aa7)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20210127-1.32.1 ([#910](https://www.github.com/googleapis/java-storage/issues/910)) ([2c54acc](https://www.github.com/googleapis/java-storage/commit/2c54acca0653a96773ab3606a8d97299e9fdf045)) +* update kms.version to v0.90.0 ([#911](https://www.github.com/googleapis/java-storage/issues/911)) ([1050725](https://www.github.com/googleapis/java-storage/commit/1050725c91b4375340ba113568ba04538c7f52fc)) + +### [1.117.1](https://www.github.com/googleapis/java-storage/compare/v1.117.0...v1.117.1) (2021-06-30) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.4.0 ([#905](https://www.github.com/googleapis/java-storage/issues/905)) ([dd084d1](https://www.github.com/googleapis/java-storage/commit/dd084d16b5f0bbf54730f2e91ce2c04a61457e0f)) + +## [1.117.0](https://www.github.com/googleapis/java-storage/compare/v1.116.0...v1.117.0) (2021-06-28) + + +### Features + +* Add from and to storage url options for BlobId ([#888](https://www.github.com/googleapis/java-storage/issues/888)) ([1876a58](https://www.github.com/googleapis/java-storage/commit/1876a580f904d095ca6621c1e2f38c3a6e253276)) +* add support of public access prevention ([#636](https://www.github.com/googleapis/java-storage/issues/636)) ([3d1e482](https://www.github.com/googleapis/java-storage/commit/3d1e48208c44c35c8e3761913bcd05c438e81069)) + + +### Bug Fixes + +* Add `shopt -s nullglob` to dependencies script ([#894](https://www.github.com/googleapis/java-storage/issues/894)) ([901fd33](https://www.github.com/googleapis/java-storage/commit/901fd335c8d2f2e49844dee2adfa318a98ed99ba)) +* Update dependencies.sh to not break on mac ([#879](https://www.github.com/googleapis/java-storage/issues/879)) ([bc6d1d9](https://www.github.com/googleapis/java-storage/commit/bc6d1d9e211fbbb1accd1019c8eed4bc55ca421c)) + + +### Documentation + +* add notice about broken OLM experience ([#898](https://www.github.com/googleapis/java-storage/issues/898)) ([73e7cdf](https://www.github.com/googleapis/java-storage/commit/73e7cdf162be76a8438160f4c7f2070fb6fb5ea6)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20210127-1.31.5 ([#889](https://www.github.com/googleapis/java-storage/issues/889)) ([99138a4](https://www.github.com/googleapis/java-storage/commit/99138a4cd3523cc634e3c5283a775a1c245b6201)) + +## [1.116.0](https://www.github.com/googleapis/java-storage/compare/v1.115.0...v1.116.0) (2021-06-14) + + +### Features + +* Add shouldReturnRawInputStream option to Get requests ([#872](https://www.github.com/googleapis/java-storage/issues/872)) ([474dfae](https://www.github.com/googleapis/java-storage/commit/474dfaec09d591455cecc77b08461efff1010c3a)) + + +### Bug Fixes + +* **ci:** remove linkage-monitor to pass 1.106.1 patch ci ([#862](https://www.github.com/googleapis/java-storage/issues/862)) ([94a9159](https://www.github.com/googleapis/java-storage/commit/94a915958f888cfbf4110d06a7f64be135dc141e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.3.0 ([#863](https://www.github.com/googleapis/java-storage/issues/863)) ([37bfd5e](https://www.github.com/googleapis/java-storage/commit/37bfd5e3cf1c62767ff8033366cca66c2e8e6e4c)) +* update kms.version ([#860](https://www.github.com/googleapis/java-storage/issues/860)) ([f1430ff](https://www.github.com/googleapis/java-storage/commit/f1430ffea07696ea808369fcd287187c14afc9a2)) +* update kms.version to v0.89.3 ([#873](https://www.github.com/googleapis/java-storage/issues/873)) ([ee7c236](https://www.github.com/googleapis/java-storage/commit/ee7c2368928c050befb809a2d61bd6ffc92bdc88)) + +## [1.115.0](https://www.github.com/googleapis/java-storage/compare/v1.114.0...v1.115.0) (2021-06-01) + + +### Features + +* add `gcf-owl-bot[bot]` to `ignoreAuthors` ([#837](https://www.github.com/googleapis/java-storage/issues/837)) ([fe8e98a](https://www.github.com/googleapis/java-storage/commit/fe8e98a229f472c1f29d206d937690660bfa1444)) + + +### Bug Fixes + +* improve error detection and reporting for BlobWriteChannel retry state ([#846](https://www.github.com/googleapis/java-storage/issues/846)) ([d0f2184](https://www.github.com/googleapis/java-storage/commit/d0f2184f4dd2d99a4315f260f35421358d14a2df)), closes [#839](https://www.github.com/googleapis/java-storage/issues/839) +* update BucketInfo translation code to properly handle lifecycle rules ([#852](https://www.github.com/googleapis/java-storage/issues/852)) ([3b1df1d](https://www.github.com/googleapis/java-storage/commit/3b1df1d00a459b134103bc8738f0294188502a37)), closes [#850](https://www.github.com/googleapis/java-storage/issues/850) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.2.0 ([#836](https://www.github.com/googleapis/java-storage/issues/836)) ([c1752ce](https://www.github.com/googleapis/java-storage/commit/c1752ce17d5d723d0ea36c41d98ae2bc9201fec2)) +* update kms.version to v0.88.4 ([#830](https://www.github.com/googleapis/java-storage/issues/830)) ([7e3dc28](https://www.github.com/googleapis/java-storage/commit/7e3dc287e4285a9312393179671a78c569e7e869)) +* update kms.version to v0.89.0 ([#855](https://www.github.com/googleapis/java-storage/issues/855)) ([29236e9](https://www.github.com/googleapis/java-storage/commit/29236e9d2eefb0e64b1b9bbfc532f4c3ae3e9ea4)) + +## [1.114.0](https://www.github.com/googleapis/java-storage/compare/v1.113.16...v1.114.0) (2021-05-13) + + +### Features + +* Remove client side vaildation for lifecycle conditions ([#816](https://www.github.com/googleapis/java-storage/issues/816)) ([5ec84cc](https://www.github.com/googleapis/java-storage/commit/5ec84cc2935a4787dd14a207d27501878f5849d5)) + + +### Bug Fixes + +* **test:** update blob paths used in storage.it.ITStorageTest#testDownloadPublicBlobWithoutAuthentication ([#759](https://www.github.com/googleapis/java-storage/issues/759)) ([#817](https://www.github.com/googleapis/java-storage/issues/817)) ([1a576ca](https://www.github.com/googleapis/java-storage/commit/1a576ca3945b51d7a678aa2414be91b3c6b2d55e)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.31.5 ([#820](https://www.github.com/googleapis/java-storage/issues/820)) ([9e1bc0b](https://www.github.com/googleapis/java-storage/commit/9e1bc0b42abdaab0b11d761ecdbb92f6116aacd2)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.88.3 ([#797](https://www.github.com/googleapis/java-storage/issues/797)) ([747e7e4](https://www.github.com/googleapis/java-storage/commit/747e7e463c028b9cf8a406b7536b1916c1d52c01)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.1.0 ([#815](https://www.github.com/googleapis/java-storage/issues/815)) ([e210de9](https://www.github.com/googleapis/java-storage/commit/e210de93452243242be7d3d719d00da723632335)) + +### [1.113.16](https://www.github.com/googleapis/java-storage/compare/v1.113.15...v1.113.16) (2021-04-23) + + +### Bug Fixes + +* release scripts from issuing overlapping phases ([#784](https://www.github.com/googleapis/java-storage/issues/784)) ([36751f5](https://www.github.com/googleapis/java-storage/commit/36751f5de9708ac9e23550f67256fb05ebf1f69e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.1 ([#789](https://www.github.com/googleapis/java-storage/issues/789)) ([c005e87](https://www.github.com/googleapis/java-storage/commit/c005e877a7d64c4bbd2ed267526d8025ea29a9ad)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1 ([#794](https://www.github.com/googleapis/java-storage/issues/794)) ([195fead](https://www.github.com/googleapis/java-storage/commit/195fead94dea4c50f7e285e0a7a1578fa5b6265d)) + +### [1.113.15](https://www.github.com/googleapis/java-storage/compare/v1.113.14...v1.113.15) (2021-04-13) + + +### Bug Fixes + +* **test:** update blob paths used in storage.it.ITStorageTest#testDownloadPublicBlobWithoutAuthentication ([#759](https://www.github.com/googleapis/java-storage/issues/759)) ([9a6619c](https://www.github.com/googleapis/java-storage/commit/9a6619c39a89e2c2ee8d0000d595d09ac7b7825f)) +* typo ([#779](https://www.github.com/googleapis/java-storage/issues/779)) ([3c3d6b4](https://www.github.com/googleapis/java-storage/commit/3c3d6b487648fde4eb956ce8912cd680a4440f8d)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.31.4 ([#774](https://www.github.com/googleapis/java-storage/issues/774)) ([ad9ff7b](https://www.github.com/googleapis/java-storage/commit/ad9ff7b801d0c5fb39f72c7118c319f4e45084a0)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.0 ([#771](https://www.github.com/googleapis/java-storage/issues/771)) ([5605095](https://www.github.com/googleapis/java-storage/commit/5605095ed796327879a930c12526b3c5b1409b17)) +* update kms.version to v0.88.1 ([#758](https://www.github.com/googleapis/java-storage/issues/758)) ([3e57ea9](https://www.github.com/googleapis/java-storage/commit/3e57ea9a2f5f7013e997469c5ca32be8cef2a4a4)) +* update kms.version to v0.88.2 ([#778](https://www.github.com/googleapis/java-storage/issues/778)) ([6edfc4c](https://www.github.com/googleapis/java-storage/commit/6edfc4ced2bdae9878ecdbc5ef636ac39bdb5881)) +* update truth ([#767](https://www.github.com/googleapis/java-storage/issues/767)) ([4e5ee03](https://www.github.com/googleapis/java-storage/commit/4e5ee0398e700baf4f88224f66309e426f9532d7)) + +### [1.113.14](https://www.github.com/googleapis/java-storage/compare/v1.113.13...v1.113.14) (2021-03-11) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.1 ([#749](https://www.github.com/googleapis/java-storage/issues/749)) ([bb42107](https://www.github.com/googleapis/java-storage/commit/bb42107ff10148e14e112ff78534753f2ebc7dd9)) +* update kms.version to v0.88.0 ([#753](https://www.github.com/googleapis/java-storage/issues/753)) ([eaedb64](https://www.github.com/googleapis/java-storage/commit/eaedb6456f2f427a7f2f3f3d6bd13d0d49fd269b)) + +### [1.113.13](https://www.github.com/googleapis/java-storage/compare/v1.113.12...v1.113.13) (2021-03-08) + + +### Bug Fixes + +* npe in createFrom ([#746](https://www.github.com/googleapis/java-storage/issues/746)) ([9ed9d13](https://www.github.com/googleapis/java-storage/commit/9ed9d1389e92766b66e2b8b4fb78b44d96d98803)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.31.3 ([#737](https://www.github.com/googleapis/java-storage/issues/737)) ([71b3842](https://www.github.com/googleapis/java-storage/commit/71b384233226531eabc1bd8eebf716ec53708afc)) + +### [1.113.12](https://www.github.com/googleapis/java-storage/compare/v1.113.11...v1.113.12) (2021-02-26) + + +### Bug Fixes + +* retrying get remote offset and recover from last chunk failures. ([#726](https://www.github.com/googleapis/java-storage/issues/726)) ([b41b881](https://www.github.com/googleapis/java-storage/commit/b41b88109e13b5ebbd0393d1f264225c12876be6)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.31.2 ([#686](https://www.github.com/googleapis/java-storage/issues/686)) ([6b1f036](https://www.github.com/googleapis/java-storage/commit/6b1f0361376167719ec5456181134136d27d1d3c)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.0 ([#732](https://www.github.com/googleapis/java-storage/issues/732)) ([c98413d](https://www.github.com/googleapis/java-storage/commit/c98413df9d9514340aed78b5a4d5e596760bb616)) +* update kms.version to v0.87.7 ([#724](https://www.github.com/googleapis/java-storage/issues/724)) ([3229bd8](https://www.github.com/googleapis/java-storage/commit/3229bd860f3a4d700a969aa9e922bbf6b5c1ca10)) +* update kms.version to v0.87.8 ([#733](https://www.github.com/googleapis/java-storage/issues/733)) ([a21b75f](https://www.github.com/googleapis/java-storage/commit/a21b75fa846f373970298dd98f8f3520fc2b3c97)) + +### [1.113.11](https://www.github.com/googleapis/java-storage/compare/v1.113.10...v1.113.11) (2021-02-19) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.19.0 ([#719](https://www.github.com/googleapis/java-storage/issues/719)) ([5831bfa](https://www.github.com/googleapis/java-storage/commit/5831bfae3afeab9b044c8d53ebf6a2ce79bc9950)) + +### [1.113.10](https://www.github.com/googleapis/java-storage/compare/v1.113.9...v1.113.10) (2021-02-17) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20210127-1.31.0 ([#706](https://www.github.com/googleapis/java-storage/issues/706)) ([04db8f7](https://www.github.com/googleapis/java-storage/commit/04db8f7b87644559685d4c05a67a74e4c8bea364)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.18.0 ([#683](https://www.github.com/googleapis/java-storage/issues/683)) ([6f172eb](https://www.github.com/googleapis/java-storage/commit/6f172eba6fd6e9c11a1f49569249ea6e714ea91f)) +* update kms.version to v0.87.6 ([#702](https://www.github.com/googleapis/java-storage/issues/702)) ([a50c333](https://www.github.com/googleapis/java-storage/commit/a50c333f6e944fa4c6bdf9613cddca7c4fe79652)) + +### [1.113.9](https://www.github.com/googleapis/java-storage/compare/v1.113.8...v1.113.9) (2021-01-12) + + +### Bug Fixes + +* last chunk is retriable ([#677](https://www.github.com/googleapis/java-storage/issues/677)) ([44f49e0](https://www.github.com/googleapis/java-storage/commit/44f49e0a33c3e541d9f8b22622ffff17cc8b8eaa)) +* unnecessary options in resumable upload URL ([#679](https://www.github.com/googleapis/java-storage/issues/679)) ([d31a39b](https://www.github.com/googleapis/java-storage/commit/d31a39b88b2d8adb04549330f9b8ff1c1a516b69)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.1 ([#678](https://www.github.com/googleapis/java-storage/issues/678)) ([d4a237f](https://www.github.com/googleapis/java-storage/commit/d4a237f4dff9dd870a69d5da9d690c14d4e88610)) +* update kms.version to v0.87.5 ([#662](https://www.github.com/googleapis/java-storage/issues/662)) ([20e7c1f](https://www.github.com/googleapis/java-storage/commit/20e7c1f10a233df6d4660b31d26cd95a6d4002e9)) + +### [1.113.8](https://www.github.com/googleapis/java-storage/compare/v1.113.7...v1.113.8) (2020-12-16) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.0 ([#659](https://www.github.com/googleapis/java-storage/issues/659)) ([5fa03fa](https://www.github.com/googleapis/java-storage/commit/5fa03fa14aa9ee29e7b1b27b783ab873052b97c6)) + +### [1.113.7](https://www.github.com/googleapis/java-storage/compare/v1.113.6...v1.113.7) (2020-12-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.1 ([f1fc884](https://www.github.com/googleapis/java-storage/commit/f1fc884851ee602d737f3e4191acb1f8450c8f2c)) + +### [1.113.6](https://www.github.com/googleapis/java-storage/compare/v1.113.5...v1.113.6) (2020-12-10) + + +### Bug Fixes + +* content-length missing in offset request ([#647](https://www.github.com/googleapis/java-storage/issues/647)) ([3cd3815](https://www.github.com/googleapis/java-storage/commit/3cd3815c62603d05d4c571ba1affeaf91e4d8040)) + + +### Dependencies + +* update kms.version to v0.87.3 ([#646](https://www.github.com/googleapis/java-storage/issues/646)) ([c93896a](https://www.github.com/googleapis/java-storage/commit/c93896a5007b48753809de806ddaf6c8df6e9d56)) + +### [1.113.5](https://www.github.com/googleapis/java-storage/compare/v1.113.4...v1.113.5) (2020-12-07) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.31.1 ([#611](https://www.github.com/googleapis/java-storage/issues/611)) ([7c4c759](https://www.github.com/googleapis/java-storage/commit/7c4c759d8bca9c20252e06e02eb8ead3bd9f88d6)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.87.2 ([#625](https://www.github.com/googleapis/java-storage/issues/625)) ([243a3cb](https://www.github.com/googleapis/java-storage/commit/243a3cb1506b2e2d609210dc4e9608637c06d7f3)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20201112-1.30.10 ([#613](https://www.github.com/googleapis/java-storage/issues/613)) ([b0e24db](https://www.github.com/googleapis/java-storage/commit/b0e24db88c784fd05988a813bd8b29aeff0739f2)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20201112-1.31.0 ([#641](https://www.github.com/googleapis/java-storage/issues/641)) ([11da9c7](https://www.github.com/googleapis/java-storage/commit/11da9c7e9058c508423e7b2f84c897ab3e9ab3f3)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.0 ([#639](https://www.github.com/googleapis/java-storage/issues/639)) ([68a3af9](https://www.github.com/googleapis/java-storage/commit/68a3af9b12c0e05d2cf59fb58aceab15323e29b1)) +* update dependency org.apache.httpcomponents:httpcore to v4.4.14 ([#637](https://www.github.com/googleapis/java-storage/issues/637)) ([af53902](https://www.github.com/googleapis/java-storage/commit/af5390239ffd1e157f066a1009b7bb18fa6264ec)) + +### [1.113.4](https://www.github.com/googleapis/java-storage/compare/v1.113.3...v1.113.4) (2020-11-13) + + +### Bug Fixes + +* retry using remote offset ([#604](https://www.github.com/googleapis/java-storage/issues/604)) ([216b52c](https://www.github.com/googleapis/java-storage/commit/216b52c54d34eaf1307788809a3512c461adf381)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.15.0 ([#610](https://www.github.com/googleapis/java-storage/issues/610)) ([ac65e5b](https://www.github.com/googleapis/java-storage/commit/ac65e5b0bd324d5726504bb3405c758675a56ddc)) + +### [1.113.3](https://www.github.com/googleapis/java-storage/compare/v1.113.2...v1.113.3) (2020-11-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.14.1 ([#592](https://www.github.com/googleapis/java-storage/issues/592)) ([25e8e6a](https://www.github.com/googleapis/java-storage/commit/25e8e6a01dde517fd42cfc8ae59b8555ea0a2831)) +* update kms.version to v0.87.1 ([#595](https://www.github.com/googleapis/java-storage/issues/595)) ([1e399cd](https://www.github.com/googleapis/java-storage/commit/1e399cd33755e647bf08f4a82af932320cab655d)) + +### [1.113.2](https://www.github.com/googleapis/java-storage/compare/v1.113.1...v1.113.2) (2020-10-26) + + +### Documentation + +* update libraries-bom ([#540](https://www.github.com/googleapis/java-storage/issues/540)) ([54987e1](https://www.github.com/googleapis/java-storage/commit/54987e1ba35d99db680ab2ad6ac86a6b74c7c705)) +* update libraries-bom ([#552](https://www.github.com/googleapis/java-storage/issues/552)) ([c4df018](https://www.github.com/googleapis/java-storage/commit/c4df01875b8f088bd65bcd0353e1b74a18b9582c)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.30.11 ([#575](https://www.github.com/googleapis/java-storage/issues/575)) ([99838e6](https://www.github.com/googleapis/java-storage/commit/99838e63f9a71095c4d8f6c99622a9aee2e5d26d)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200927-1.30.10 ([#539](https://www.github.com/googleapis/java-storage/issues/539)) ([5e49013](https://www.github.com/googleapis/java-storage/commit/5e49013add340e4d8287e00b8d4a9c499df80205)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.0 ([#529](https://www.github.com/googleapis/java-storage/issues/529)) ([dc58856](https://www.github.com/googleapis/java-storage/commit/dc58856c2548013a495b62cc6bb696ada24d2557)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.2 ([#549](https://www.github.com/googleapis/java-storage/issues/549)) ([c59c28d](https://www.github.com/googleapis/java-storage/commit/c59c28d97a9eb4e811921c7cad637d67c2be16be)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.12.1 ([#566](https://www.github.com/googleapis/java-storage/issues/566)) ([f1dedfb](https://www.github.com/googleapis/java-storage/commit/f1dedfbf9f47c87c7f7fea5e6c1c7c1af35b060e)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.13.0 ([#570](https://www.github.com/googleapis/java-storage/issues/570)) ([ef55f49](https://www.github.com/googleapis/java-storage/commit/ef55f49230f58863195782b5fe0f84285a316aef)) + +### [1.113.1](https://www.github.com/googleapis/java-storage/compare/v1.113.0...v1.113.1) (2020-09-17) + + +### Bug Fixes + +* KMS Bad Key error when using existing Blob context to overwrite object ([#507](https://www.github.com/googleapis/java-storage/issues/507)) ([4d9c490](https://www.github.com/googleapis/java-storage/commit/4d9c49027e4746ee273902694441886c2f43188d)) +* When passing a sub-array (offset, length) to the Storage#create method the array is needlessly cloned ([#506](https://www.github.com/googleapis/java-storage/issues/506)) ([9415bb7](https://www.github.com/googleapis/java-storage/commit/9415bb7bdb42d8012ca457a90070b616e6bbec19)), closes [#505](https://www.github.com/googleapis/java-storage/issues/505) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20200814-1.30.10 ([#499](https://www.github.com/googleapis/java-storage/issues/499)) ([af91d7d](https://www.github.com/googleapis/java-storage/commit/af91d7da4117fb22992d6a860af61f72906e0aa1)) + + +### Documentation + +* update libraries-bom ([#504](https://www.github.com/googleapis/java-storage/issues/504)) ([0e58c1c](https://www.github.com/googleapis/java-storage/commit/0e58c1cb2b6a890e567b043188613021592f2bc8)) + +## [1.113.0](https://www.github.com/googleapis/java-storage/compare/v1.112.0...v1.113.0) (2020-09-03) + + +### Features + +* expose timeStorageClassUpdated property of blob's ([#456](https://www.github.com/googleapis/java-storage/issues/456)) ([57853ec](https://www.github.com/googleapis/java-storage/commit/57853ec7fbc2f3188d8da991001660a4f6008632)) + + +### Bug Fixes + +* add missing FieldSelector inside BucketField and BlobField ([#484](https://www.github.com/googleapis/java-storage/issues/484)) ([c2aa9cf](https://www.github.com/googleapis/java-storage/commit/c2aa9cf6fb4c7f407cbfce85b338b735ceafe1dc)) +* prevent NPE in RemoteStorageHelper.cleanBuckets ([#492](https://www.github.com/googleapis/java-storage/issues/492)) ([db358c8](https://www.github.com/googleapis/java-storage/commit/db358c8b53f7ba3084c5566c9abf4033bf29783f)) +* set IT_SERVICE_ACCOUNT_EMAIL for nightly integration test ([#479](https://www.github.com/googleapis/java-storage/issues/479)) ([23c379e](https://www.github.com/googleapis/java-storage/commit/23c379e4d28e4fb319db047c7d46654d9a8b9a61)) + + +### Documentation + +* update libraries-bom ([#494](https://www.github.com/googleapis/java-storage/issues/494)) ([6b015da](https://www.github.com/googleapis/java-storage/commit/6b015da57d42f468c9b3d1f86476407a61cd14ea)) +* update link ([#490](https://www.github.com/googleapis/java-storage/issues/490)) ([6cd5dfa](https://www.github.com/googleapis/java-storage/commit/6cd5dface9cc14f2ec6729e5b842bcee91c1ad34)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.9.0 ([#493](https://www.github.com/googleapis/java-storage/issues/493)) ([0e4f70f](https://www.github.com/googleapis/java-storage/commit/0e4f70f7f70784fee91be499def9734d8af61be9)) +* update kms.version to v0.87.0 ([#489](https://www.github.com/googleapis/java-storage/issues/489)) ([a045d54](https://www.github.com/googleapis/java-storage/commit/a045d5400234595f102a8b8d594539dbfd5f295e)) + +## [1.112.0](https://www.github.com/googleapis/java-storage/compare/v1.111.2...v1.112.0) (2020-08-27) + + +### Features + +* add support of customTime metadata ([#413](https://www.github.com/googleapis/java-storage/issues/413)) ([6f4585e](https://www.github.com/googleapis/java-storage/commit/6f4585eb6706390865cf5fb565fa8062d0071045)) +* add support of customTimeBefore and daysSinceCustomTime ([#396](https://www.github.com/googleapis/java-storage/issues/396)) ([1af8288](https://www.github.com/googleapis/java-storage/commit/1af8288016f2526ddbe221ef22dc705e28b18b77)) +* add support of daysSinceNoncurrentTime and noncurrentTimeBefore OLM options ([#335](https://www.github.com/googleapis/java-storage/issues/335)) ([1e3e88a](https://www.github.com/googleapis/java-storage/commit/1e3e88a391651421469e5c7a8216a788eaa4ba5a)) +* add support of null to remove the CORS configuration from bucket ([#438](https://www.github.com/googleapis/java-storage/issues/438)) ([f8a4b12](https://www.github.com/googleapis/java-storage/commit/f8a4b12517c661881d7b7c65f796c1c8f1cf3ae9)) +* add support of startOffset and endOffset ([#430](https://www.github.com/googleapis/java-storage/issues/430)) ([38c1c34](https://www.github.com/googleapis/java-storage/commit/38c1c34937eeacd126cf6d62bf85fb9db90e1702)) +* auto content-type on blob creation ([#338](https://www.github.com/googleapis/java-storage/issues/338)) ([66d1eb7](https://www.github.com/googleapis/java-storage/commit/66d1eb793383b9e83992824b392cedd28d54609f)) +* expose updateTime field of the bucket ([#449](https://www.github.com/googleapis/java-storage/issues/449)) ([f0e945e](https://www.github.com/googleapis/java-storage/commit/f0e945e14662b86594298557b83151d3cb7e1ebb)) + + +### Bug Fixes + +* Ignore CONTRIBUTING.md ([#447](https://www.github.com/googleapis/java-storage/issues/447)) ([bdacdc9](https://www.github.com/googleapis/java-storage/commit/bdacdc93a107108add5bd9dc00473997534aa761)), closes [#446](https://www.github.com/googleapis/java-storage/issues/446) [#446](https://www.github.com/googleapis/java-storage/issues/446) +* PostPolicyV4 classes could be improved ([#442](https://www.github.com/googleapis/java-storage/issues/442)) ([8602b81](https://www.github.com/googleapis/java-storage/commit/8602b81eae95868e184fd4ab290396707bd21a8e)) +* **docs:** example of Storage#testIamPermissions ([#434](https://www.github.com/googleapis/java-storage/issues/434)) ([275f452](https://www.github.com/googleapis/java-storage/commit/275f452a5993f95a84fb603a5f4b436238b39439)) +* PostPolicyV4.PostFieldsV4.Builder.addCustomMetadataField() allows to add prefixed an not prefixed custom fields ([#398](https://www.github.com/googleapis/java-storage/issues/398)) ([02dc3b5](https://www.github.com/googleapis/java-storage/commit/02dc3b5e5377d8848c889647e72102cd9acc646d)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.30.10 ([#423](https://www.github.com/googleapis/java-storage/issues/423)) ([fbfa9ec](https://www.github.com/googleapis/java-storage/commit/fbfa9ecf277794e07d9a3c46d5b5022f54c37afd)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.86.1 ([#463](https://www.github.com/googleapis/java-storage/issues/463)) ([cf94230](https://www.github.com/googleapis/java-storage/commit/cf94230a5f02dcc16e364aa528d97046d80f59a0)) +* update dependency com.google.api.grpc:proto-google-cloud-kms-v1 to v0.86.1 ([#464](https://www.github.com/googleapis/java-storage/issues/464)) ([6c372fa](https://www.github.com/googleapis/java-storage/commit/6c372fa81e49ac74bdda6f9b10914fac42767247)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200611-1.30.10 ([#428](https://www.github.com/googleapis/java-storage/issues/428)) ([6ef57eb](https://www.github.com/googleapis/java-storage/commit/6ef57ebc9eeddc90f13ef87274e8ab0b7eb53290)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200727-1.30.10 ([#457](https://www.github.com/googleapis/java-storage/issues/457)) ([edfd1e6](https://www.github.com/googleapis/java-storage/commit/edfd1e69e886adb04b98b54b3a63768c7e82b1e0)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.4 ([#452](https://www.github.com/googleapis/java-storage/issues/452)) ([12bc02d](https://www.github.com/googleapis/java-storage/commit/12bc02d7bc05e584cad4362628155333630fbcba)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.6 ([#458](https://www.github.com/googleapis/java-storage/issues/458)) ([f8d6e15](https://www.github.com/googleapis/java-storage/commit/f8d6e158a06aec926fb7bc42f10483d56696a37e)) + +### [1.111.2](https://www.github.com/googleapis/java-storage/compare/v1.111.1...v1.111.2) (2020-07-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.3 ([#425](https://www.github.com/googleapis/java-storage/issues/425)) ([727b173](https://www.github.com/googleapis/java-storage/commit/727b1739963f5dc86009587eeb998d20adb94448)) + +### [1.111.1](https://www.github.com/googleapis/java-storage/compare/v1.111.0...v1.111.1) (2020-07-01) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20200611-1.30.9 ([#406](https://www.github.com/googleapis/java-storage/issues/406)) ([b2ebea7](https://www.github.com/googleapis/java-storage/commit/b2ebea7a8fa0a2b2a2696c33da5f54a94b0f3d62)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.2 ([#414](https://www.github.com/googleapis/java-storage/issues/414)) ([4451887](https://www.github.com/googleapis/java-storage/commit/4451887bc58cdfa14488efcba6ad4040819ab71c)) +* update dependency google-cloud-shared-config to v0.9.0 ([#417](https://www.github.com/googleapis/java-storage/issues/417)) +* update dependency grpc-google-cloud-kms-v1 to v0.86.0 ([#417](https://www.github.com/googleapis/java-storage/issues/417)) +* update dependency proto-google-cloud-kms-v1 to v0.86.0 ([#417](https://www.github.com/googleapis/java-storage/issues/417)) + + +## [1.111.0](https://www.github.com/googleapis/java-storage/compare/v1.110.0...v1.111.0) (2020-06-25) + + +### Features + +* add storage.upload(path) ([#269](https://www.github.com/googleapis/java-storage/issues/269)) ([9457f3a](https://www.github.com/googleapis/java-storage/commit/9457f3a76ff18552adc5f9c82f62ab8f3c207d31)) +* Add support to disable logging from bucket ([#390](https://www.github.com/googleapis/java-storage/issues/390)) ([be72027](https://www.github.com/googleapis/java-storage/commit/be72027b1587b9b0a3e9e65e7a2231bdb2ae521f)) +* expose all the methods of notification ([#141](https://www.github.com/googleapis/java-storage/issues/141)) ([8dfc0cb](https://www.github.com/googleapis/java-storage/commit/8dfc0cbf8294a7fc426948e22e5c2182da97b630)) + + +### Reverts + +* Revert "feat: expose all the methods of notification (#141)" (#393) ([3e02b9c](https://www.github.com/googleapis/java-storage/commit/3e02b9c4ee1ce0fb785d15b04bd36754e31831a0)), closes [#141](https://www.github.com/googleapis/java-storage/issues/141) [#393](https://www.github.com/googleapis/java-storage/issues/393) + +## [1.110.0](https://www.github.com/googleapis/java-storage/compare/v1.109.1...v1.110.0) (2020-06-18) + + +### Features + +* delete bucket OLM rules ([#352](https://www.github.com/googleapis/java-storage/issues/352)) ([0a528c6](https://www.github.com/googleapis/java-storage/commit/0a528c6916f8b031916a4c6ecc96ce5e49ea99c7)) + +### [1.109.1](https://www.github.com/googleapis/java-storage/compare/v1.109.0...v1.109.1) (2020-06-15) + + +### Dependencies + +* bump shared-deps version and add back certain test deps ([#340](https://www.github.com/googleapis/java-storage/issues/340)) ([afd0339](https://www.github.com/googleapis/java-storage/commit/afd0339c1d62dfb82032e08e3ef50a14c80ad30a)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.1 ([#368](https://www.github.com/googleapis/java-storage/issues/368)) ([ccaf480](https://www.github.com/googleapis/java-storage/commit/ccaf48015f9e99fa2ee3b457eb5c04ad07c3253a)) + +## [1.109.0](https://www.github.com/googleapis/java-storage/compare/v1.108.0...v1.109.0) (2020-06-11) + + +### Features + +* adopt flatten-maven-plugin and java-shared-dependencies ([#325](https://www.github.com/googleapis/java-storage/issues/325)) ([209cae3](https://www.github.com/googleapis/java-storage/commit/209cae322932a4f87729fe4c5176a4f11962cfae)) +* stub implementation of StorageRpc for the sake of testing ([#351](https://www.github.com/googleapis/java-storage/issues/351)) ([dd58025](https://www.github.com/googleapis/java-storage/commit/dd5802555eb0351a5afa2f2f197cb93ca6d3b66e)) + + +### Bug Fixes + +* blob.reload() does not work as intuitively expected ([#308](https://www.github.com/googleapis/java-storage/issues/308)) ([a2bab58](https://www.github.com/googleapis/java-storage/commit/a2bab58ccd89f48e8d4a8ee2dd776b201598420d)) + + +### Documentation + +* **fix:** update client documentation link ([#324](https://www.github.com/googleapis/java-storage/issues/324)) ([eb8940c](https://www.github.com/googleapis/java-storage/commit/eb8940cc6a88b5e2b3dea8d0ab2ffc1e350ab924)) +* Add doc for equals method in blob ([#311](https://www.github.com/googleapis/java-storage/issues/311)) ([91fc36a](https://www.github.com/googleapis/java-storage/commit/91fc36a6673e30d1cfa8c4da51b874e1fd0b0535)) +* catch actual exception in java doc comment ([#312](https://www.github.com/googleapis/java-storage/issues/312)) ([9201de5](https://www.github.com/googleapis/java-storage/commit/9201de559fe4218abd2e4fac47beac62454547cf)), closes [#309](https://www.github.com/googleapis/java-storage/issues/309) +* update CONTRIBUTING.md to include code formatting ([#534](https://www.github.com/googleapis/java-storage/issues/534)) ([#315](https://www.github.com/googleapis/java-storage/issues/315)) ([466d08f](https://www.github.com/googleapis/java-storage/commit/466d08f9835a0f1dd00b5c9b3a08551be68d03ad)) +* update readme to point client libarary documentation ([#317](https://www.github.com/googleapis/java-storage/issues/317)) ([8650f80](https://www.github.com/googleapis/java-storage/commit/8650f806736beec7bf7ab09a337b333bbf144f7b)) + + +### Dependencies + +* update dependency com.google.api.grpc:proto-google-common-protos to v1.18.0 ([#301](https://www.github.com/googleapis/java-storage/issues/301)) ([ff2dee2](https://www.github.com/googleapis/java-storage/commit/ff2dee2ce41d37787f0866ae740d3cd7f3b2bbd6)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200410-1.30.9 ([#296](https://www.github.com/googleapis/java-storage/issues/296)) ([2e55aa2](https://www.github.com/googleapis/java-storage/commit/2e55aa2c8b9c78df9eebfe748fe72dcaae63ff81)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200430-1.30.9 ([#319](https://www.github.com/googleapis/java-storage/issues/319)) ([3d03fa3](https://www.github.com/googleapis/java-storage/commit/3d03fa3381cfbb76d1501ec3d2ad14742a8a58dd)) +* update dependency com.google.cloud:google-cloud-conformance-tests to v0.0.11 ([#320](https://www.github.com/googleapis/java-storage/issues/320)) ([6c18c88](https://www.github.com/googleapis/java-storage/commit/6c18c882cfe0c35b310a518e6044847e6fbeab94)) + +## [1.108.0](https://www.github.com/googleapis/java-storage/compare/v1.107.0...v1.108.0) (2020-04-30) + + +### Features + +* add mockito dependency ([#284](https://www.github.com/googleapis/java-storage/issues/284)) ([58692dd](https://www.github.com/googleapis/java-storage/commit/58692dd8eeb2d228d14c896e563184d723b25df1)) +* V4 POST policy ([#177](https://www.github.com/googleapis/java-storage/issues/177)) ([32d8ffa](https://www.github.com/googleapis/java-storage/commit/32d8fface1a994cb5ac928f08c0467edc3c9aab1)) + + +### Bug Fixes + +* Documentation for Blob.update() and Storage.update() methods is confusing/incorrect ([#261](https://www.github.com/googleapis/java-storage/issues/261)) ([876405f](https://www.github.com/googleapis/java-storage/commit/876405f81cf195f5619b353be8d1e8efcbf5e0b3)), closes [#252](https://www.github.com/googleapis/java-storage/issues/252) + + +### Dependencies + +* pin mockito version to work with java 7 ([#292](https://www.github.com/googleapis/java-storage/issues/292)) ([8eb2fff](https://www.github.com/googleapis/java-storage/commit/8eb2fff3f51c90af7f76f74d40ed1d6d6b4320b7)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.85.1 ([#273](https://www.github.com/googleapis/java-storage/issues/273)) ([7b5e7d1](https://www.github.com/googleapis/java-storage/commit/7b5e7d173cdac6b2de802c568e3a60b915d39d1c)) +* update dependency com.google.api.grpc:proto-google-cloud-kms-v1 to v0.85.1 ([#274](https://www.github.com/googleapis/java-storage/issues/274)) ([0ab4304](https://www.github.com/googleapis/java-storage/commit/0ab4304ea4e5e5668c05c67d2c96c6056f8c19c2)) +* update dependency com.google.cloud:google-cloud-conformance-tests to v0.0.10 ([#281](https://www.github.com/googleapis/java-storage/issues/281)) ([f3dee7e](https://www.github.com/googleapis/java-storage/commit/f3dee7ea0d0e305f0bc0c980aa65e538f7bf890c)) +* update dependency com.google.http-client:google-http-client-bom to v1.35.0 ([#282](https://www.github.com/googleapis/java-storage/issues/282)) ([1c1c1be](https://www.github.com/googleapis/java-storage/commit/1c1c1bee0d6382e76e74f9a00dca8e527cc390c6)) +* update dependency io.grpc:grpc-bom to v1.28.1 ([#250](https://www.github.com/googleapis/java-storage/issues/250)) ([b35e81c](https://www.github.com/googleapis/java-storage/commit/b35e81ce19fa72672aefe8bd956959bfa954194c)) +* update dependency io.grpc:grpc-bom to v1.29.0 ([#275](https://www.github.com/googleapis/java-storage/issues/275)) ([9b241b4](https://www.github.com/googleapis/java-storage/commit/9b241b468d4f3a73b81c5bc67c085c6fe7c6ea1e)) +* update dependency org.threeten:threetenbp to v1.4.4 ([#278](https://www.github.com/googleapis/java-storage/issues/278)) ([7bae49f](https://www.github.com/googleapis/java-storage/commit/7bae49f16ba5de0eeac8301a6a11b85bd4406ed5)) + + +### Documentation + +* label legacy storage classes in documentation ([#267](https://www.github.com/googleapis/java-storage/issues/267)) ([50e5938](https://www.github.com/googleapis/java-storage/commit/50e5938147f7bb2594b9a142e8087c6e555f4979)), closes [#254](https://www.github.com/googleapis/java-storage/issues/254) + +## [1.107.0](https://www.github.com/googleapis/java-storage/compare/v1.106.0...v1.107.0) (2020-04-14) + + +### Bug Fixes + +* Blob API Doc is confusing ([#233](https://www.github.com/googleapis/java-storage/issues/233)) ([b5208b8](https://www.github.com/googleapis/java-storage/commit/b5208b87e5469bfdf684bd5f250921be99a59ac8)) +* Blob.downloadTo() methods do not wrap RetryHelper$RetryHelperException ([#218](https://www.github.com/googleapis/java-storage/issues/218)) ([5599f29](https://www.github.com/googleapis/java-storage/commit/5599f299018cb363d600d4e39e35d2657b74f5bc)) +* implementations of FromHexString() for md5 and crc32c ([#246](https://www.github.com/googleapis/java-storage/issues/246)) ([c9b23b3](https://www.github.com/googleapis/java-storage/commit/c9b23b36874211681ea323ef89a69316438924af)) +* storage-client-lib-docs to right location ([#213](https://www.github.com/googleapis/java-storage/issues/213)) ([133d137](https://www.github.com/googleapis/java-storage/commit/133d1377781fd6bdc58dd4f494a75ec1d7b9e530)) +* surface storage interface expectations correctly. ([#241](https://www.github.com/googleapis/java-storage/issues/241)) ([130a641](https://www.github.com/googleapis/java-storage/commit/130a6413abbc1eacd0ee5c10dbbba699e1f528ea)) +* throw io exception instead of storage exception ([#229](https://www.github.com/googleapis/java-storage/issues/229)) ([4d42a4e](https://www.github.com/googleapis/java-storage/commit/4d42a4eb1feb2afc6a6a9f3a3797b33f33f50900)) + + +### Reverts + +* Revert "feat: add upload functionality (#214)" (#224) ([e87c731](https://www.github.com/googleapis/java-storage/commit/e87c7319c610454c9e7e052d0a4a4e7454e4d9a4)), closes [#214](https://www.github.com/googleapis/java-storage/issues/214) [#224](https://www.github.com/googleapis/java-storage/issues/224) +* grpc version update ([#248](https://www.github.com/googleapis/java-storage/issues/248)) ([0f6703e](https://www.github.com/googleapis/java-storage/commit/0f6703ea2d8374667728ebcb4c398c6681280c58)) + + +### Dependencies + +* update conformance test dep ([#210](https://www.github.com/googleapis/java-storage/issues/210)) ([010c112](https://www.github.com/googleapis/java-storage/commit/010c1128761d9c74ba1af33bc34e9264f34b8c80)) +* update core dependencies ([#182](https://www.github.com/googleapis/java-storage/issues/182)) ([3f0c59c](https://www.github.com/googleapis/java-storage/commit/3f0c59c18ecfd844f718346768dc274a9e2f131d)) +* update core dependencies to v1.93.4 ([#231](https://www.github.com/googleapis/java-storage/issues/231)) ([1bb5787](https://www.github.com/googleapis/java-storage/commit/1bb578710148bab21c978e31b00608f7f9770128)) +* update dependency com.google.api:api-common to v1.9.0 ([#209](https://www.github.com/googleapis/java-storage/issues/209)) ([789ceaa](https://www.github.com/googleapis/java-storage/commit/789ceaa2be6163f85f483637205191e38029e0c2)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.85.0 ([#222](https://www.github.com/googleapis/java-storage/issues/222)) ([03eace6](https://www.github.com/googleapis/java-storage/commit/03eace664dd13164c1db68b4895185d318d13d64)) +* update dependency com.google.api.grpc:proto-google-cloud-kms-v1 to v0.85.0 ([#223](https://www.github.com/googleapis/java-storage/issues/223)) ([aaf6a17](https://www.github.com/googleapis/java-storage/commit/aaf6a1728a9dd7e0bde1b6f52dd628c020cb73d3)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20200326-1.30.9 ([#239](https://www.github.com/googleapis/java-storage/issues/239)) ([b9d0a70](https://www.github.com/googleapis/java-storage/commit/b9d0a70c2a9ca1febafd1c1b8699c25e9e30e9b2)) +* update dependency com.google.cloud.samples:shared-configuration to v1.0.14 ([#207](https://www.github.com/googleapis/java-storage/issues/207)) ([be74072](https://www.github.com/googleapis/java-storage/commit/be74072662f2e3a99e54ee3d3feff66cb39032b2)) +* update dependency com.google.guava:guava to v29 ([#240](https://www.github.com/googleapis/java-storage/issues/240)) ([7824c15](https://www.github.com/googleapis/java-storage/commit/7824c15ab38ad89111c3eb9e77a499479a62742b)) +* update dependency org.threeten:threetenbp to v1.4.2 ([#200](https://www.github.com/googleapis/java-storage/issues/200)) ([84faad1](https://www.github.com/googleapis/java-storage/commit/84faad1a854c3a189d2997a121a8753988213f90)) +* update dependency org.threeten:threetenbp to v1.4.3 ([#228](https://www.github.com/googleapis/java-storage/issues/228)) ([be40a70](https://www.github.com/googleapis/java-storage/commit/be40a70fbe2d1556d26c7983c5ad62535ce6dfbd)) + + +### Documentation + +* clarify documentation on date formats ([#196](https://www.github.com/googleapis/java-storage/issues/196)) ([9b4af58](https://www.github.com/googleapis/java-storage/commit/9b4af5870ef38cae4e92b60a2f8e6efd3e93d06d)), closes [/github.com/googleapis/google-http-java-client/blob/master/google-http-client/src/main/java/com/google/api/client/util/DateTime.java#L53](https://www.github.com/googleapis//github.com/googleapis/google-http-java-client/blob/master/google-http-client/src/main/java/com/google/api/client/util/DateTime.java/issues/L53) + +## [1.106.0](https://www.github.com/googleapis/java-storage/compare/v1.105.2...v1.106.0) (2020-03-17) + + +### Bug Fixes + +* rely on google core for SSLException's ([#188](https://www.github.com/googleapis/java-storage/issues/188)) ([2581f3c](https://www.github.com/googleapis/java-storage/commit/2581f3cfff88ee6a1688ddb881baa30d9967b0c3)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-storage to v1-rev20200226-1.30.9 ([#189](https://www.github.com/googleapis/java-storage/issues/189)) ([b61a820](https://www.github.com/googleapis/java-storage/commit/b61a820a5de4266cfacb76330977962b1940b1e5)) + +### [1.105.2](https://www.github.com/googleapis/java-storage/compare/v1.105.1...v1.105.2) (2020-03-13) + + +### Bug Fixes + +* connection closed prematurely in BlobReadChannel & ConnectionReset ([#173](https://www.github.com/googleapis/java-storage/issues/173)) ([27bccda](https://www.github.com/googleapis/java-storage/commit/27bccda384da4a7b877b371fbaecc794d6304fbf)) + + +### Dependencies + +* update core dependencies ([#171](https://www.github.com/googleapis/java-storage/issues/171)) ([ef5f2c6](https://www.github.com/googleapis/java-storage/commit/ef5f2c6e5079debe8f7f37c3d2c501aac3dc82a6)) + +### [1.105.1](https://www.github.com/googleapis/java-storage/compare/v1.105.0...v1.105.1) (2020-03-09) + + +### Bug Fixes + +* use %s instead of %d format specifier in checkArgument ([#163](https://www.github.com/googleapis/java-storage/issues/163)) ([ee16197](https://www.github.com/googleapis/java-storage/commit/ee16197d784de167b3ce32eaacbb89d776ce3211)) + + +### Dependencies + +* update core dependencies to v1.93.1 ([#161](https://www.github.com/googleapis/java-storage/issues/161)) ([960572f](https://www.github.com/googleapis/java-storage/commit/960572f047ae94e69046b7a59cf9d0e71c6f2dc0)) +* update dependency com.google.api-client:google-api-client to v1.30.9 ([#154](https://www.github.com/googleapis/java-storage/issues/154)) ([84dfab9](https://www.github.com/googleapis/java-storage/commit/84dfab9a89d8cbe2c22dd9dea7b05ddcc7b3eb62)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20191127-1.30.9 ([#164](https://www.github.com/googleapis/java-storage/issues/164)) ([d9ba7c7](https://www.github.com/googleapis/java-storage/commit/d9ba7c785e280c320a5a65cf3837dbca4c7293b7)) +* update dependency com.google.cloud:google-cloud-conformance-tests to v0.0.7 ([#160](https://www.github.com/googleapis/java-storage/issues/160)) ([cbf8082](https://www.github.com/googleapis/java-storage/commit/cbf8082891951966e83315666fd83b58f7ddc0d7)) +* update dependency org.apache.httpcomponents:httpclient to v4.5.12 ([#168](https://www.github.com/googleapis/java-storage/issues/168)) ([45b3992](https://www.github.com/googleapis/java-storage/commit/45b39920cfef0c44e2f2ebf1efb94f7502fddd00)) + +## [1.105.0](https://www.github.com/googleapis/java-storage/compare/v1.104.0...v1.105.0) (2020-02-28) + + +### Features + +* add IAM Conditions support ([#120](https://www.github.com/googleapis/java-storage/issues/120)) ([8256f6d](https://www.github.com/googleapis/java-storage/commit/8256f6d9b479b2fb3c76f887325cb37b051e1654)) +* examples of creating a signed url for a blob with generation ([#140](https://www.github.com/googleapis/java-storage/issues/140)) ([420212a](https://www.github.com/googleapis/java-storage/commit/420212a71f675fc1823a7bfdd6a1c5325f17979f)) + + +### Dependencies + +* update core dependencies to v1.93.0 ([#153](https://www.github.com/googleapis/java-storage/issues/153)) ([836a2e7](https://www.github.com/googleapis/java-storage/commit/836a2e746011de5f10b28911388b508fef230d84)) +* update dependency com.google.api:gax-bom to v1.54.0 ([#152](https://www.github.com/googleapis/java-storage/issues/152)) ([e86051f](https://www.github.com/googleapis/java-storage/commit/e86051f45931269f62c8a372509367cb5e3be009)) +* update dependency com.google.cloud:google-cloud-conformance-tests to v0.0.6 ([#151](https://www.github.com/googleapis/java-storage/issues/151)) ([2627a93](https://www.github.com/googleapis/java-storage/commit/2627a938e8b2d295fcd46eebe6b001cbb2ba6784)) +* update dependency io.grpc:grpc-bom to v1.27.2 ([e56f8ce](https://www.github.com/googleapis/java-storage/commit/e56f8cefdf7a710b4d74004639af3e4ff086fd1f)) + +## [1.104.0](https://www.github.com/googleapis/java-storage/compare/v1.103.1...v1.104.0) (2020-02-19) + + +### Features + +* add delimiter BlobListOption ([#102](https://www.github.com/googleapis/java-storage/issues/102)) ([b30a675](https://www.github.com/googleapis/java-storage/commit/b30a6757de84e2ceebc9f28817bcfa5c34c20a30)) +* disableGzipContent option on create with InputStream ([#36](https://www.github.com/googleapis/java-storage/issues/36)) ([#82](https://www.github.com/googleapis/java-storage/issues/82)) ([65d3739](https://www.github.com/googleapis/java-storage/commit/65d3739567427e49ca4abfd39702fd4022ee8e3c)) + + +### Bug Fixes + +* mismatch chunksize ([#135](https://www.github.com/googleapis/java-storage/issues/135)) ([5da3e8d](https://www.github.com/googleapis/java-storage/commit/5da3e8d3736eed0151e0f564a6d164fb5b429450)) + + +### Dependencies + +* update dependency com.google.api-client:google-api-client to v1.30.8 ([#111](https://www.github.com/googleapis/java-storage/issues/111)) ([47b1495](https://www.github.com/googleapis/java-storage/commit/47b149509478d211ff103419e695476f42b814f0)) +* update dependency com.google.api.grpc:grpc-google-cloud-kms-v1 to v0.83.1 ([#118](https://www.github.com/googleapis/java-storage/issues/118)) ([753d870](https://www.github.com/googleapis/java-storage/commit/753d8700175bdbb2d4c4a51d42399cb400017520)) +* update dependency com.google.api.grpc:proto-google-cloud-kms-v1 to v0.83.1 ([#119](https://www.github.com/googleapis/java-storage/issues/119)) ([2c8b9ec](https://www.github.com/googleapis/java-storage/commit/2c8b9ecd527f80397d5921c77aa72bf91fe0bd3c)) +* update dependency com.google.http-client:google-http-client-bom to v1.34.2 ([#131](https://www.github.com/googleapis/java-storage/issues/131)) ([fce5b33](https://www.github.com/googleapis/java-storage/commit/fce5b3335bd1d480eb82dcbccf71afc779a1fb25)) +* update dependency com.google.protobuf:protobuf-bom to v3.11.3 ([#113](https://www.github.com/googleapis/java-storage/issues/113)) ([044de39](https://www.github.com/googleapis/java-storage/commit/044de393b6523c68eb63c8d1e160288e0c4dc2a0)) +* update dependency com.google.protobuf:protobuf-bom to v3.11.4 ([#134](https://www.github.com/googleapis/java-storage/issues/134)) ([1af989e](https://www.github.com/googleapis/java-storage/commit/1af989e1d5745268bfca3d9ffd1ad8e331d94589)) +* update dependency io.opencensus:opencensus-api to v0.25.0 ([#129](https://www.github.com/googleapis/java-storage/issues/129)) ([3809576](https://www.github.com/googleapis/java-storage/commit/3809576429a27c13e0c65d986e5306f8aa50bb1a)) +* update to gRPC 1.27.0 ([#105](https://www.github.com/googleapis/java-storage/issues/105)) ([64f34bd](https://www.github.com/googleapis/java-storage/commit/64f34bd7a5735aaddecc6a1f76db4f35a320e305)) + +### [1.103.1](https://www.github.com/googleapis/java-storage/compare/v1.103.0...v1.103.1) (2020-01-27) + + +### Bug Fixes + +* make the getStorageClass() method public ([#22](https://www.github.com/googleapis/java-storage/issues/22)) ([7fb1f6c](https://www.github.com/googleapis/java-storage/commit/7fb1f6c2cb8c5d6ebbf9dcaccf1218d2a0aebb09)) + + +### Dependencies + +* update dependency com.google.truth:truth to v1.0.1 ([#60](https://www.github.com/googleapis/java-storage/issues/60)) ([3cedc8f](https://www.github.com/googleapis/java-storage/commit/3cedc8f7fcac0d87ca121197895fc7b36fc8f6d7)) +* update dependency org.threeten:threetenbp to v1.4.1 ([4c0f03a](https://www.github.com/googleapis/java-storage/commit/4c0f03a3cc22eed03f002bedf11b3a40e57c709e)) + +## [1.103.0](https://www.github.com/googleapis/java-storage/compare/1.102.0...v1.103.0) (2020-01-06) + + +### Features + +* add support for archive storage class ([#19](https://www.github.com/googleapis/java-storage/issues/19)) ([a3fbd67](https://www.github.com/googleapis/java-storage/commit/a3fbd67fb0789849922eb7e7b08dc33f3ea9efae)) +* make repo releasable ([#3](https://www.github.com/googleapis/java-storage/issues/3)) ([39ff6f6](https://www.github.com/googleapis/java-storage/commit/39ff6f67dc785d3cae070756ca502df749ac9f34)) + + +### Dependencies + +* update core transport dependencies ([#16](https://www.github.com/googleapis/java-storage/issues/16)) ([d0a82ab](https://www.github.com/googleapis/java-storage/commit/d0a82ab2b705246923a89a2b826ac1d6d1adba70)) +* update dependency com.google.apis:google-api-services-storage to v1-rev20191011-1.30.3 ([#7](https://www.github.com/googleapis/java-storage/issues/7)) ([5ac5b8a](https://www.github.com/googleapis/java-storage/commit/5ac5b8a802e5e6814ba629b0fdb238d3b337756b)) diff --git a/java-storage/README.md b/java-storage/README.md new file mode 100644 index 000000000000..4dcf0bc732db --- /dev/null +++ b/java-storage/README.md @@ -0,0 +1,495 @@ +# Google Cloud Storage Client for Java + +Java idiomatic client for [Cloud Storage][product-docs]. + +[![Maven][maven-version-image]][maven-version-link] +![Stability][stability-image] + +- [Product Documentation][product-docs] +- [Client Library Documentation][javadocs] + + +## Quickstart + +If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: + +```xml + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + com.google.cloud + google-cloud-storage + + + com.google.cloud + google-cloud-storage-control + + + +``` + +If you are using Maven without the BOM, add this to your dependencies: + + +```xml + + com.google.cloud + google-cloud-storage + 2.63.0 + + + com.google.cloud + google-cloud-storage-control + 2.63.0 + + +``` + +If you are using Gradle 5.x or later, add this to your dependencies: + +```Groovy +implementation platform('com.google.cloud:libraries-bom:26.77.0') + +implementation 'com.google.cloud:google-cloud-storage' +``` +If you are using Gradle without BOM, add this to your dependencies: + +```Groovy +implementation 'com.google.cloud:google-cloud-storage:2.64.0' +``` + +If you are using SBT, add this to your dependencies: + +```Scala +libraryDependencies += "com.google.cloud" % "google-cloud-storage" % "2.64.0" +``` + +## Authentication + +See the [Authentication][authentication] section in the base directory's README. + +## Authorization + +The client application making API calls must be granted [authorization scopes][auth-scopes] required for the desired Cloud Storage APIs, and the authenticated principal must have the [IAM role(s)][predefined-iam-roles] required to access GCP resources using the Cloud Storage API calls. + +## Getting Started + +### Prerequisites + +You will need a [Google Cloud Platform Console][developer-console] project with the Cloud Storage [API enabled][enable-api]. +You will need to [enable billing][enable-billing] to use Google Cloud Storage. +[Follow these instructions][create-project] to get your project set up. You will also need to set up the local development environment by +[installing the Google Cloud Command Line Interface][cloud-cli] and running the following commands in command line: +`gcloud auth login` and `gcloud config set project [YOUR PROJECT ID]`. + +### Installation and setup + +You'll need to obtain the `google-cloud-storage` library. See the [Quickstart](#quickstart) section +to add `google-cloud-storage` as a dependency in your code. + +## About Cloud Storage + + +[Cloud Storage][product-docs] is a durable and highly available object storage service. Google Cloud Storage is almost infinitely scalable and guarantees consistency: when a write succeeds, the latest copy of the object will be returned to any GET, globally. + +See the [Cloud Storage client library docs][javadocs] to learn how to +use this Cloud Storage Client Library. + + +#### Creating an authorized service object + +To make authenticated requests to Google Cloud Storage, you must create a service object with credentials. You can +then make API calls by calling methods on the Storage service object. The simplest way to authenticate is to use +[Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). +These credentials are automatically inferred from your environment, so you only need the following code to create your +service object: + +```java +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +Storage storage = StorageOptions.getDefaultInstance().getService(); +``` + +For other authentication options, see the [Authentication](https://github.com/googleapis/google-cloud-java#authentication) page in Google Cloud Java. + +#### Storing data +Stored objects are called "blobs" in `google-cloud` and are organized into containers called "buckets". `Blob`, a +subclass of `BlobInfo`, adds a layer of service-related functionality over `BlobInfo`. Similarly, `Bucket` adds a +layer of service-related functionality over `BucketInfo`. In this code snippet, we will create a new bucket and +upload a blob to that bucket. + +Add the following imports at the top of your file: + +```java +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +``` + +Then add the following code to create a bucket and upload a simple blob. + +*Important: Bucket names have to be globally unique (among all users of Cloud Storage). If you choose a bucket name +that already exists, you'll get a helpful error message telling you to choose another name. In the code below, replace +"my_unique_bucket" with a unique bucket name. See more about naming rules +[here](https://cloud.google.com/storage/docs/bucket-naming?hl=en#requirements).* + +```java +// Create a bucket +String bucketName = "my_unique_bucket"; // Change this to something unique +Bucket bucket = storage.create(BucketInfo.of(bucketName)); + +// Upload a blob to the newly created bucket +BlobId blobId = BlobId.of(bucketName, "my_blob_name"); +BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); +Blob blob = storage.create(blobInfo, "a simple blob".getBytes(UTF_8)); +``` + +A complete example for creating a blob can be found at +[UploadObject.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java). + +At this point, you will be able to see your newly created bucket and blob on the Google Developers Console. + +#### Retrieving data +Now that we have content uploaded to the server, we can see how to read data from the server. Add the following line +to your program to get back the blob we uploaded. + +```java +BlobId blobId = BlobId.of(bucketName, "my_blob_name"); +byte[] content = storage.readAllBytes(blobId); +String contentString = new String(content, UTF_8); +``` + +A complete example for accessing blobs can be found at +[DownloadObject.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java). + +#### Updating data +Another thing we may want to do is update a blob. The following snippet shows how to update a Storage blob if it exists. + +``` java +BlobId blobId = BlobId.of(bucketName, "my_blob_name"); +Blob blob = storage.get(blobId); +if (blob != null) { + byte[] prevContent = blob.getContent(); + System.out.println(new String(prevContent, UTF_8)); + WritableByteChannel channel = blob.writer(); + channel.write(ByteBuffer.wrap("Updated content".getBytes(UTF_8))); + channel.close(); +} +``` + +#### Listing buckets and contents of buckets +Suppose that you've added more buckets and blobs, and now you want to see the names of your buckets and the contents +of each one. Add the following code to list all your buckets and all the blobs inside each bucket. + +```java +// List all your buckets +System.out.println("My buckets:"); +for (Bucket bucket : storage.list().iterateAll()) { + System.out.println(bucket); + + // List all blobs in the bucket + System.out.println("Blobs in the bucket:"); + for (Blob blob : bucket.list().iterateAll()) { + System.out.println(blob); + } +} +``` + +#### Complete source code + +See [ListObjects.java](https://github.com/googleapis/java-storage/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java) for a complete example. + +### Example Applications + +- [`Bookshelf`](https://github.com/GoogleCloudPlatform/getting-started-java/tree/main/bookshelf) - An App Engine application that manages a virtual bookshelf. + - This app uses `google-cloud` to interface with Cloud Datastore and Cloud Storage. It also uses Cloud SQL, another Google Cloud Platform service. +- [`Flexible Environment/Storage example`](https://github.com/GoogleCloudPlatform/java-docs-samples/tree/main/flexible/cloudstorage) - An app that uploads files to a public Cloud Storage bucket on the App Engine Flexible Environment runtime. + + + + +## Samples + +Samples are in the [`samples/`](https://github.com/googleapis/google-cloud-java/tree/main/samples) directory. + +| Sample | Source Code | Try it | +| --------------------------- | --------------------------------- | ------ | +| Configure Retries | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/ConfigureRetries.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/ConfigureRetries.java) | +| Generate Signed Post Policy V4 | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/GenerateSignedPostPolicyV4.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/GenerateSignedPostPolicyV4.java) | +| Get Service Account | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/GetServiceAccount.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/GetServiceAccount.java) | +| Quickstart Grpc Dp Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcDpSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/QuickstartGrpcDpSample.java) | +| Quickstart Grpc Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/QuickstartGrpcSample.java) | +| Quickstart Open Telemetry Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/QuickstartOpenTelemetrySample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/QuickstartOpenTelemetrySample.java) | +| Quickstart Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/QuickstartSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/QuickstartSample.java) | +| Quickstart Storage Control Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/QuickstartStorageControlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/QuickstartStorageControlSample.java) | +| Add Bucket Default Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketDefaultOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/AddBucketDefaultOwner.java) | +| Add Bucket Iam Conditional Binding | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamConditionalBinding.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamConditionalBinding.java) | +| Add Bucket Iam Member | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamMember.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamMember.java) | +| Add Bucket Label | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketLabel.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/AddBucketLabel.java) | +| Add Bucket Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/AddBucketOwner.java) | +| Change Default Storage Class | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ChangeDefaultStorageClass.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ChangeDefaultStorageClass.java) | +| Configure Bucket Cors | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ConfigureBucketCors.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ConfigureBucketCors.java) | +| Create Bucket | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucket.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucket.java) | +| Create Bucket Dual Region | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketDualRegion.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketDualRegion.java) | +| Create Bucket Pub Sub Notification | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketPubSubNotification.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketPubSubNotification.java) | +| Create Bucket With Object Retention | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithObjectRetention.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithObjectRetention.java) | +| Create Bucket With Storage Class And Location | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithStorageClassAndLocation.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithStorageClassAndLocation.java) | +| Create Bucket With Turbo Replication | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithTurboReplication.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithTurboReplication.java) | +| Delete Bucket | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucket.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucket.java) | +| Delete Bucket Pub Sub Notification | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucketPubSubNotification.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucketPubSubNotification.java) | +| Disable Bucket Versioning | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableBucketVersioning.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableBucketVersioning.java) | +| Disable Default Event Based Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableDefaultEventBasedHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableDefaultEventBasedHold.java) | +| Disable Lifecycle Management | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableLifecycleManagement.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableLifecycleManagement.java) | +| Disable Requester Pays | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableRequesterPays.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableRequesterPays.java) | +| Disable Soft Delete | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableSoftDelete.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableSoftDelete.java) | +| Disable Uniform Bucket Level Access | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/DisableUniformBucketLevelAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/DisableUniformBucketLevelAccess.java) | +| Enable Bucket Versioning | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/EnableBucketVersioning.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/EnableBucketVersioning.java) | +| Enable Default Event Based Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/EnableDefaultEventBasedHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/EnableDefaultEventBasedHold.java) | +| Enable Lifecycle Management | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/EnableLifecycleManagement.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/EnableLifecycleManagement.java) | +| Enable Requester Pays | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/EnableRequesterPays.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/EnableRequesterPays.java) | +| Enable Uniform Bucket Level Access | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/EnableUniformBucketLevelAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/EnableUniformBucketLevelAccess.java) | +| Get Bucket Autoclass | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketAutoclass.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetBucketAutoclass.java) | +| Get Bucket Metadata | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketMetadata.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetBucketMetadata.java) | +| Get Bucket Rpo | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketRpo.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetBucketRpo.java) | +| Get Default Event Based Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetDefaultEventBasedHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetDefaultEventBasedHold.java) | +| Get Public Access Prevention | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetPublicAccessPrevention.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetPublicAccessPrevention.java) | +| Get Requester Pays Status | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetRequesterPaysStatus.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetRequesterPaysStatus.java) | +| Get Retention Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetRetentionPolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetRetentionPolicy.java) | +| Get Soft Delete Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetSoftDeletePolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetSoftDeletePolicy.java) | +| Get Uniform Bucket Level Access | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/GetUniformBucketLevelAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/GetUniformBucketLevelAccess.java) | +| List Bucket Iam Members | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketIamMembers.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ListBucketIamMembers.java) | +| List Buckets | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ListBuckets.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ListBuckets.java) | +| List Buckets With Partial Success | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketsWithPartialSuccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ListBucketsWithPartialSuccess.java) | +| List Pub Sub Notifications | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/ListPubSubNotifications.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/ListPubSubNotifications.java) | +| Lock Retention Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/LockRetentionPolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/LockRetentionPolicy.java) | +| Make Bucket Public | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/MakeBucketPublic.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/MakeBucketPublic.java) | +| Print Bucket Acl | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAcl.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAcl.java) | +| Print Bucket Acl Filter By User | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAclFilterByUser.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAclFilterByUser.java) | +| Print Pub Sub Notification | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/PrintPubSubNotification.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/PrintPubSubNotification.java) | +| Remove Bucket Cors | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketCors.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketCors.java) | +| Remove Bucket Default Kms Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultKmsKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultKmsKey.java) | +| Remove Bucket Default Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultOwner.java) | +| Remove Bucket Iam Conditional Binding | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamConditionalBinding.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamConditionalBinding.java) | +| Remove Bucket Iam Member | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamMember.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamMember.java) | +| Remove Bucket Label | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketLabel.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketLabel.java) | +| Remove Bucket Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketOwner.java) | +| Remove Retention Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/RemoveRetentionPolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/RemoveRetentionPolicy.java) | +| Set Async Turbo Rpo | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetAsyncTurboRpo.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetAsyncTurboRpo.java) | +| Set Bucket Autoclass | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketAutoclass.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetBucketAutoclass.java) | +| Set Bucket Default Kms Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketDefaultKmsKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetBucketDefaultKmsKey.java) | +| Set Bucket Website Info | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketWebsiteInfo.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetBucketWebsiteInfo.java) | +| Set Client Endpoint | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetClientEndpoint.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetClientEndpoint.java) | +| Set Default Rpo | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetDefaultRpo.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetDefaultRpo.java) | +| Set Public Access Prevention Enforced | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionEnforced.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionEnforced.java) | +| Set Public Access Prevention Inherited | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionInherited.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionInherited.java) | +| Set Retention Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetRetentionPolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetRetentionPolicy.java) | +| Set Soft Delete Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/bucket/SetSoftDeletePolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/bucket/SetSoftDeletePolicy.java) | +| Anywhere Cache Create | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheCreate.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheCreate.java) | +| Anywhere Cache Disable | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheDisable.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheDisable.java) | +| Anywhere Cache Get | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheGet.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheGet.java) | +| Anywhere Cache List | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheList.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheList.java) | +| Anywhere Cache Pause | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCachePause.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCachePause.java) | +| Anywhere Cache Resume | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheResume.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheResume.java) | +| Anywhere Cache Update | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheUpdate.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheUpdate.java) | +| Create Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/CreateFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/CreateFolder.java) | +| Create Hierarchical Namespace Bucket | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/CreateHierarchicalNamespaceBucket.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/CreateHierarchicalNamespaceBucket.java) | +| Delete Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/DeleteFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/DeleteFolder.java) | +| Get Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/GetFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/GetFolder.java) | +| List Folders | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/ListFolders.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/ListFolders.java) | +| Rename Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/control/v2/RenameFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/control/v2/RenameFolder.java) | +| Activate Hmac Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/ActivateHmacKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/ActivateHmacKey.java) | +| Create Hmac Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/CreateHmacKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/CreateHmacKey.java) | +| Deactivate Hmac Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/DeactivateHmacKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/DeactivateHmacKey.java) | +| Delete Hmac Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/DeleteHmacKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/DeleteHmacKey.java) | +| Get Hmac Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/GetHmacKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/GetHmacKey.java) | +| List Hmac Keys | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/hmac/ListHmacKeys.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/hmac/ListHmacKeys.java) | +| Create Managed Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/managedfolders/CreateManagedFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/managedfolders/CreateManagedFolder.java) | +| Delete Managed Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/managedfolders/DeleteManagedFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/managedfolders/DeleteManagedFolder.java) | +| Get Managed Folder | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/managedfolders/GetManagedFolder.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/managedfolders/GetManagedFolder.java) | +| List Managed Folders | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/managedfolders/ListManagedFolders.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/managedfolders/ListManagedFolders.java) | +| Abort Multipart Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/multipartupload/AbortMultipartUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/multipartupload/AbortMultipartUpload.java) | +| Complete Multipart Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/multipartupload/CompleteMultipartUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/multipartupload/CompleteMultipartUpload.java) | +| Create Multipart Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/multipartupload/CreateMultipartUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/multipartupload/CreateMultipartUpload.java) | +| List Parts | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/multipartupload/ListParts.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/multipartupload/ListParts.java) | +| Upload Part | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/multipartupload/UploadPart.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/multipartupload/UploadPart.java) | +| Add Blob Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/AddBlobOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/AddBlobOwner.java) | +| Atomic Move Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/AtomicMoveObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/AtomicMoveObject.java) | +| Batch Set Object Metadata | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/BatchSetObjectMetadata.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/BatchSetObjectMetadata.java) | +| Change Object Csek To Kms | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectCsekToKms.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ChangeObjectCsekToKms.java) | +| Change Object Storage Class | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectStorageClass.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ChangeObjectStorageClass.java) | +| Compose Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ComposeObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ComposeObject.java) | +| Copy Delete Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/CopyDeleteObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/CopyDeleteObject.java) | +| Copy Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/CopyObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/CopyObject.java) | +| Copy Old Version Of Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/CopyOldVersionOfObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/CopyOldVersionOfObject.java) | +| Create And Write Appendable Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/CreateAndWriteAppendableObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/CreateAndWriteAppendableObject.java) | +| Delete Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DeleteObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DeleteObject.java) | +| Delete Old Version Of Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DeleteOldVersionOfObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DeleteOldVersionOfObject.java) | +| Download Byte Range | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadByteRange.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadByteRange.java) | +| Download Encrypted Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadEncryptedObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadEncryptedObject.java) | +| Download Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java) | +| Download Object Into Memory | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadObjectIntoMemory.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadObjectIntoMemory.java) | +| Download Public Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadPublicObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadPublicObject.java) | +| Download Requester Pays Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/DownloadRequesterPaysObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/DownloadRequesterPaysObject.java) | +| Finalize Appendable Object Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/FinalizeAppendableObjectUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/FinalizeAppendableObjectUpload.java) | +| Generate Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/GenerateEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/GenerateEncryptionKey.java) | +| Generate V4 Get Object Signed Url | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/GenerateV4GetObjectSignedUrl.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/GenerateV4GetObjectSignedUrl.java) | +| Generate V4 Put Object Signed Url | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/GenerateV4PutObjectSignedUrl.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/GenerateV4PutObjectSignedUrl.java) | +| Get Object Contexts | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/GetObjectContexts.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/GetObjectContexts.java) | +| Get Object Metadata | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/GetObjectMetadata.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/GetObjectMetadata.java) | +| List Object Contexts | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjectContexts.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListObjectContexts.java) | +| List Objects | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListObjects.java) | +| List Objects With Old Versions | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithOldVersions.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithOldVersions.java) | +| List Objects With Prefix | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithPrefix.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithPrefix.java) | +| List Soft Deleted Objects | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedObjects.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedObjects.java) | +| List Soft Deleted Versions Of Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedVersionsOfObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedVersionsOfObject.java) | +| Make Object Public | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/MakeObjectPublic.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/MakeObjectPublic.java) | +| Open Multiple Objects Ranged Read | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/OpenMultipleObjectsRangedRead.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/OpenMultipleObjectsRangedRead.java) | +| Open Object Multiple Ranged Read | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/OpenObjectMultipleRangedRead.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/OpenObjectMultipleRangedRead.java) | +| Open Object Read Full Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/OpenObjectReadFullObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/OpenObjectReadFullObject.java) | +| Open Object Single Ranged Read | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/OpenObjectSingleRangedRead.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/OpenObjectSingleRangedRead.java) | +| Pause And Resume Appendable Object Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/PauseAndResumeAppendableObjectUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/PauseAndResumeAppendableObjectUpload.java) | +| Print Blob Acl | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAcl.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/PrintBlobAcl.java) | +| Print Blob Acl For User | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAclForUser.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/PrintBlobAclForUser.java) | +| Read Appendable Object Tail | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ReadAppendableObjectTail.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ReadAppendableObjectTail.java) | +| Release Event Based Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ReleaseEventBasedHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ReleaseEventBasedHold.java) | +| Release Temporary Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/ReleaseTemporaryHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/ReleaseTemporaryHold.java) | +| Remove Blob Owner | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/RemoveBlobOwner.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/RemoveBlobOwner.java) | +| Restore Soft Deleted Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/RestoreSoftDeletedObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/RestoreSoftDeletedObject.java) | +| Rotate Object Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/RotateObjectEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/RotateObjectEncryptionKey.java) | +| Set Event Based Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/SetEventBasedHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/SetEventBasedHold.java) | +| Set Object Contexts | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/SetObjectContexts.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/SetObjectContexts.java) | +| Set Object Metadata | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/SetObjectMetadata.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/SetObjectMetadata.java) | +| Set Object Retention Policy | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/SetObjectRetentionPolicy.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/SetObjectRetentionPolicy.java) | +| Set Temporary Hold | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/SetTemporaryHold.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/SetTemporaryHold.java) | +| Stream Object Download | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/StreamObjectDownload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/StreamObjectDownload.java) | +| Stream Object Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/StreamObjectUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/StreamObjectUpload.java) | +| Upload Encrypted Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadEncryptedObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/UploadEncryptedObject.java) | +| Upload Kms Encrypted Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadKmsEncryptedObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/UploadKmsEncryptedObject.java) | +| Upload Object | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/UploadObject.java) | +| Upload Object From Memory | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/object/UploadObjectFromMemory.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/object/UploadObjectFromMemory.java) | +| Allow Divide And Conquer Download | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowDivideAndConquerDownload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/AllowDivideAndConquerDownload.java) | +| Allow Parallel Composite Upload | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowParallelCompositeUpload.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/AllowParallelCompositeUpload.java) | +| Download Bucket | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadBucket.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadBucket.java) | +| Download Many | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadMany.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadMany.java) | +| Upload Directory | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadDirectory.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/UploadDirectory.java) | +| Upload Many | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadMany.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/storage/transfermanager/UploadMany.java) | + + + +## Troubleshooting + +To get help, follow the instructions in the [shared Troubleshooting document][troubleshooting]. + +## Transport + +Cloud Storage uses HTTP/JSON for the transport layer. + +## Supported Java Versions + +Java 8 or above is required for using this client. + +Google's Java client libraries, +[Google Cloud Client Libraries][cloudlibs] +and +[Google Cloud API Libraries][apilibs], +follow the +[Oracle Java SE support roadmap][oracle] +(see the Oracle Java SE Product Releases section). + +### For new development + +In general, new feature development occurs with support for the lowest Java +LTS version covered by Oracle's Premier Support (which typically lasts 5 years +from initial General Availability). If the minimum required JVM for a given +library is changed, it is accompanied by a [semver][semver] major release. + +Java 11 and (in September 2021) Java 17 are the best choices for new +development. + +### Keeping production systems current + +Google tests its client libraries with all current LTS versions covered by +Oracle's Extended Support (which typically lasts 8 years from initial +General Availability). + +#### Legacy support + +Google's client libraries support legacy versions of Java runtimes with long +term stable libraries that don't receive feature updates on a best efforts basis +as it may not be possible to backport all patches. + +Google provides updates on a best efforts basis to apps that continue to use +Java 7, though apps might need to upgrade to current versions of the library +that supports their JVM. + +#### Where to find specific information + +The latest versions and the supported Java versions are identified on +the individual GitHub repository `github.com/GoogleAPIs/java-SERVICENAME` +and on [google-cloud-java][g-c-j]. + +## Versioning + +This library follows [Semantic Versioning](http://semver.org/), but does update [Storage interface](src/main/java/com.google.cloud.storage/Storage.java) +to introduce new methods which can break your implementations if you implement this interface for testing purposes. + + + +## Contributing + + +Contributions to this library are always welcome and highly encouraged. + +See [CONTRIBUTING][contributing] for more information how to get started. + +Please note that this project is released with a Contributor Code of Conduct. By participating in +this project you agree to abide by its terms. See [Code of Conduct][code-of-conduct] for more +information. + + +## License + +Apache 2.0 - See [LICENSE][license] for more information. + +Java is a registered trademark of Oracle and/or its affiliates. + +[product-docs]: https://cloud.google.com/storage +[javadocs]: https://cloud.google.com/java/docs/reference/google-cloud-storage/latest/history +[stability-image]: https://img.shields.io/badge/stability-stable-green +[maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-storage.svg +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-storage/2.64.0 +[authentication]: https://github.com/googleapis/google-cloud-java#authentication +[auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes +[predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles +[iam-policy]: https://cloud.google.com/iam/docs/overview#cloud-iam-policy +[developer-console]: https://console.developers.google.com/ +[create-project]: https://cloud.google.com/resource-manager/docs/creating-managing-projects +[cloud-cli]: https://cloud.google.com/cli +[troubleshooting]: https://github.com/googleapis/google-cloud-java/blob/main/TROUBLESHOOTING.md +[contributing]: https://github.com/googleapis/google-cloud-java/blob/main/CONTRIBUTING.md +[code-of-conduct]: https://github.com/googleapis/google-cloud-java/blob/main/CODE_OF_CONDUCT.md#contributor-code-of-conduct +[license]: https://github.com/googleapis/google-cloud-java/blob/main/LICENSE +[enable-billing]: https://cloud.google.com/apis/docs/getting-started#enabling_billing +[enable-api]: https://console.cloud.google.com/flows/enableapi?apiid=storage.googleapis.com +[libraries-bom]: https://github.com/GoogleCloudPlatform/cloud-opensource-java/wiki/The-Google-Cloud-Platform-Libraries-BOM +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png + +[semver]: https://semver.org/ +[cloudlibs]: https://cloud.google.com/apis/docs/client-libraries-explained +[apilibs]: https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries +[oracle]: https://www.oracle.com/java/technologies/java-se-support-roadmap.html +[g-c-j]: http://github.com/googleapis/google-cloud-java diff --git a/java-storage/gapic-google-cloud-storage-v2/clirr-ignored-differences.xml b/java-storage/gapic-google-cloud-storage-v2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..75f5fd52cb38 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/clirr-ignored-differences.xml @@ -0,0 +1,86 @@ + + + + + + 7002 + com/google/storage/v2/StorageSettings + * *HttpJson*Builder() + + + 7002 + com/google/storage/v2/stub/StorageStubSettings + * *HttpJson*Builder() + + + 7002 + com/google/storage/v2/stub/StorageStubSettings + * defaultGrpcApiClientHeaderProviderBuilder() + + + + 8001 + com/google/storage/v2/stub/HttpJsonStorageCallableFactory + + + 8001 + com/google/storage/v2/stub/HttpJsonStorageStub + + + 8001 + com/google/storage/v2/StorageClient$ListHmacKeysFixedSizeCollection + + + 8001 + com/google/storage/v2/StorageClient$ListHmacKeysPage + + + 8001 + com/google/storage/v2/StorageClient$ListHmacKeysPagedResponse + + + + + 7002 + com/google/storage/v2/* + * *Notification*(*) + + + 7002 + com/google/storage/v2/stub/* + * *Notification*(*) + + + 7002 + com/google/storage/v2/* + * *Hmac*(*) + + + 7002 + com/google/storage/v2/stub/* + * *Hmac*(*) + + + 7002 + com/google/storage/v2/* + * *ServiceAccount*(*) + + + 7002 + com/google/storage/v2/stub/* + * *ServiceAccount*(*) + + + 8001 + com/google/storage/v2/StorageClient$ListNotification* + + + + + 7005 + com/google/storage/v2/StorageClient + * *(*Name*) + * *(*Name*) + + + diff --git a/java-storage/gapic-google-cloud-storage-v2/pom.xml b/java-storage/gapic-google-cloud-storage-v2/pom.xml new file mode 100644 index 000000000000..26952f846f9f --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/pom.xml @@ -0,0 +1,93 @@ + + 4.0.0 + com.google.api.grpc + gapic-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + gapic-google-cloud-storage-v2 + GRPC library for gapic-google-cloud-storage-v2 + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-storage-v2 + + + com.google.api.grpc + grpc-google-cloud-storage-v2 + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.api + gax + + + com.google.api.grpc + proto-google-iam-v1 + + + + com.google.api + gax-grpc + + + com.google.guava + guava + + + junit + junit + + + + com.google.api + gax-grpc + testlib + test + + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageClient.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageClient.java new file mode 100644 index 000000000000..fd46bdab8079 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageClient.java @@ -0,0 +1,4279 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.resourcenames.ResourceName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.storage.v2.stub.StorageStub; +import com.google.storage.v2.stub.StorageStubSettings; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: ## API Overview and Naming Syntax + * + *

The Cloud Storage gRPC API allows applications to read and write data through the abstractions + * of buckets and objects. For a description of these abstractions please see [Cloud Storage + * documentation](https://cloud.google.com/storage/docs). + * + *

Resources are named as follows: + * + *

- Projects are referred to as they are defined by the Resource Manager API, using strings like + * `projects/123456` or `projects/my-string-id`. - Buckets are named using string names of the form: + * `projects/{project}/buckets/{bucket}`. For globally unique buckets, `_` might be substituted for + * the project. - Objects are uniquely identified by their name along with the name of the bucket + * they belong to, as separate strings in this API. For example: + * + *

``` ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' object: 'my-object' } ``` + * + *

Note that object names can contain `/` characters, which are treated as any other character + * (no special directory semantics). + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (StorageClient storageClient = StorageClient.create()) {
+ *   BucketName name = BucketName.of("[PROJECT]", "[BUCKET]");
+ *   storageClient.deleteBucket(name);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the StorageClient object to clean up resources such as + * threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

DeleteBucket

Permanently deletes an empty bucket. The request fails if there are any live or noncurrent objects in the bucket, but the request succeeds if the bucket only contains soft-deleted objects or incomplete uploads, such as ongoing XML API multipart uploads. Does not permanently delete soft-deleted objects. + *

When this API is used to delete a bucket containing an object that has a soft delete policy enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` properties are set on the object. + *

Objects and multipart uploads that were in the bucket at the time of deletion are also retained for the specified retention duration. When a soft-deleted bucket reaches the end of its retention duration, it is permanently deleted. The `hardDeleteTime` of the bucket always equals or exceeds the expiration time of the last soft-deleted object in the bucket. + *

**IAM Permissions**: + *

Requires `storage.buckets.delete` IAM permission on the bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteBucket(DeleteBucketRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteBucket(BucketName name) + *

  • deleteBucket(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteBucketCallable() + *

+ *

GetBucket

Returns metadata for the specified bucket. + *

**IAM Permissions**: + *

Requires `storage.buckets.get` IAM permission on the bucket. Additionally, to return specific bucket metadata, the authenticated user must have the following permissions: + *

- To return the IAM policies: `storage.buckets.getIamPolicy` - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getBucket(GetBucketRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getBucket(BucketName name) + *

  • getBucket(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getBucketCallable() + *

+ *

CreateBucket

Creates a new bucket. + *

**IAM Permissions**: + *

Requires `storage.buckets.create` IAM permission on the bucket. Additionally, to enable specific bucket features, the authenticated user must have the following permissions: + *

- To enable object retention using the `enableObjectRetention` query parameter: `storage.buckets.enableObjectRetention` - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createBucket(CreateBucketRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createBucket(ProjectName parent, Bucket bucket, String bucketId) + *

  • createBucket(String parent, Bucket bucket, String bucketId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createBucketCallable() + *

+ *

ListBuckets

Retrieves a list of buckets for a given project, ordered lexicographically by name. + *

**IAM Permissions**: + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable specific bucket features, the authenticated user must have the following permissions: + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listBuckets(ListBucketsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listBuckets(ProjectName parent) + *

  • listBuckets(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listBucketsPagedCallable() + *

  • listBucketsCallable() + *

+ *

LockBucketRetentionPolicy

Permanently locks the retention policy that is currently applied to the specified bucket. + *

Caution: Locking a bucket is an irreversible action. Once you lock a bucket: + *

- You cannot remove the retention policy from the bucket. - You cannot decrease the retention period for the policy. + *

Once locked, you must delete the entire bucket in order to remove the bucket's retention policy. However, before you can delete the bucket, you must delete all the objects in the bucket, which is only possible if all the objects have reached the retention period set by the retention policy. + *

**IAM Permissions**: + *

Requires `storage.buckets.update` IAM permission on the bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • lockBucketRetentionPolicy(LockBucketRetentionPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • lockBucketRetentionPolicy(BucketName bucket) + *

  • lockBucketRetentionPolicy(String bucket) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • lockBucketRetentionPolicyCallable() + *

+ *

GetIamPolicy

Gets the IAM policy for a specified bucket or managed folder. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + *

**IAM Permissions**: + *

Requires `storage.buckets.getIamPolicy` on the bucket or `storage.managedFolders.getIamPolicy` IAM permission on the managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getIamPolicy(GetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getIamPolicy(ResourceName resource) + *

  • getIamPolicy(String resource) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getIamPolicyCallable() + *

+ *

SetIamPolicy

Updates an IAM policy for the specified bucket or managed folder. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • setIamPolicy(SetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • setIamPolicy(ResourceName resource, Policy policy) + *

  • setIamPolicy(String resource, Policy policy) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • setIamPolicyCallable() + *

+ *

TestIamPermissions

Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, are held by the caller. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • testIamPermissions(TestIamPermissionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • testIamPermissions(ResourceName resource, List<String> permissions) + *

  • testIamPermissions(String resource, List<String> permissions) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • testIamPermissionsCallable() + *

+ *

UpdateBucket

Updates a bucket. Changes to the bucket are readable immediately after writing, but configuration changes might take time to propagate. This method supports `patch` semantics. + *

**IAM Permissions**: + *

Requires `storage.buckets.update` IAM permission on the bucket. Additionally, to enable specific bucket features, the authenticated user must have the following permissions: + *

- To set bucket IP filtering rules: `storage.buckets.setIpFilter` - To update public access prevention policies or access control lists (ACLs): `storage.buckets.setIamPolicy`

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateBucket(UpdateBucketRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateBucket(Bucket bucket, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateBucketCallable() + *

+ *

ComposeObject

Concatenates a list of existing objects into a new object in the same bucket. The existing source objects are unaffected by this operation. + *

**IAM Permissions**: + *

Requires the `storage.objects.create` and `storage.objects.get` IAM permissions to use this method. If the new composite object overwrites an existing object, the authenticated user must also have the `storage.objects.delete` permission. If the request body includes the retention property, the authenticated user must also have the `storage.objects.setRetention` IAM permission.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • composeObject(ComposeObjectRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • composeObjectCallable() + *

+ *

DeleteObject

Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used, or if soft delete is not enabled for the bucket. When this API is used to delete an object from a bucket that has soft delete policy enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` properties are set on the object. This API cannot be used to permanently delete soft-deleted objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore soft-deleted objects until the soft delete retention period has passed. + *

**IAM Permissions**: + *

Requires `storage.objects.delete` IAM permission on the bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteObject(DeleteObjectRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteObject(BucketName bucket, String object) + *

  • deleteObject(String bucket, String object) + *

  • deleteObject(BucketName bucket, String object, long generation) + *

  • deleteObject(String bucket, String object, long generation) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteObjectCallable() + *

+ *

RestoreObject

Restores a soft-deleted object. When a soft-deleted object is restored, a new copy of that object is created in the same bucket and inherits the same metadata as the soft-deleted object. The inherited metadata is the metadata that existed when the original object became soft deleted, with the following exceptions: + *

- The `createTime` of the new object is set to the time at which the soft-deleted object was restored. - The `softDeleteTime` and `hardDeleteTime` values are cleared. - A new generation is assigned and the metageneration is reset to 1. - If the soft-deleted object was in a bucket that had Autoclass enabled, the new object is restored to Standard storage. - The restored object inherits the bucket's default object ACL, unless `copySourceAcl` is `true`. + *

If a live object using the same name already exists in the bucket and becomes overwritten, the live object becomes a noncurrent object if Object Versioning is enabled on the bucket. If Object Versioning is not enabled, the live object becomes soft deleted. + *

**IAM Permissions**: + *

Requires the following IAM permissions to use this method: + *

- `storage.objects.restore` - `storage.objects.create` - `storage.objects.delete` (only required if overwriting an existing object) - `storage.objects.getIamPolicy` (only required if `projection` is `full` and the relevant bucket has uniform bucket-level access disabled) - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is `true` and the relevant bucket has uniform bucket-level access disabled)

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • restoreObject(RestoreObjectRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • restoreObject(BucketName bucket, String object, long generation) + *

  • restoreObject(String bucket, String object, long generation) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • restoreObjectCallable() + *

+ *

CancelResumableWrite

Cancels an in-progress resumable upload. + *

Any attempts to write to the resumable upload after cancelling the upload fail. + *

The behavior for any in-progress write operations is not guaranteed; they could either complete before the cancellation or fail if the cancellation completes first.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • cancelResumableWrite(CancelResumableWriteRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • cancelResumableWrite(String uploadId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • cancelResumableWriteCallable() + *

+ *

GetObject

Retrieves object metadata. + *

**IAM Permissions**: + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` permission.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getObject(GetObjectRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getObject(BucketName bucket, String object) + *

  • getObject(String bucket, String object) + *

  • getObject(BucketName bucket, String object, long generation) + *

  • getObject(String bucket, String object, long generation) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getObjectCallable() + *

+ *

ReadObject

Retrieves object data. + *

**IAM Permissions**: + *

Requires `storage.objects.get` IAM permission on the bucket.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • readObjectCallable() + *

+ *

BidiReadObject

Reads an object's data. + *

This bi-directional API reads data from an object, allowing you to request multiple data ranges within a single stream, even across several messages. If an error occurs with any request, the stream closes with a relevant error code. Since you can have multiple outstanding requests, the error response includes a `BidiReadObjectError` proto in its `details` field, reporting the specific error, if any, for each pending `read_id`. + *

**IAM Permissions**: + *

Requires `storage.objects.get` IAM permission on the bucket.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • bidiReadObjectCallable() + *

+ *

UpdateObject

Updates an object's metadata. Equivalent to JSON API's `storage.objects.patch` method. + *

**IAM Permissions**: + *

Requires `storage.objects.update` IAM permission on the bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateObject(UpdateObjectRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateObject(Object object, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateObjectCallable() + *

+ *

WriteObject

Stores a new object and metadata. + *

An object can be written either in a single message stream or in a resumable sequence of message streams. To write using a single stream, the client should include in the first message of the stream an `WriteObjectSpec` describing the destination bucket, object, and any preconditions. Additionally, the final message must set 'finish_write' to true, or else it is an error. + *

For a resumable write, the client should instead call `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. They should then attach the returned `upload_id` to the first message of each following call to `WriteObject`. If the stream is closed before finishing the upload (either explicitly by the client or due to a network error or an error response from the server), the client should do as follows: + *

- Check the result Status of the stream, to determine if writing can be resumed on this stream or must be restarted from scratch (by calling `StartResumableWrite()`). The resumable errors are `DEADLINE_EXCEEDED`, `INTERNAL`, and `UNAVAILABLE`. For each case, the client should use binary exponential backoff before retrying. Additionally, writes can be resumed after `RESOURCE_EXHAUSTED` errors, but only after taking appropriate measures, which might include reducing aggregate send rate across clients and/or requesting a quota increase for your project. - If the call to `WriteObject` returns `ABORTED`, that indicates concurrent attempts to update the resumable write, caused either by multiple racing clients or by a single client where the previous request was timed out on the client side but nonetheless reached the server. In this case the client should take steps to prevent further concurrent writes. For example, increase the timeouts and stop using more than one process to perform the upload. Follow the steps below for resuming the upload. - For resumable errors, the client should call `QueryWriteStatus()` and then continue writing from the returned `persisted_size`. This might be less than the amount of data the client previously sent. Note also that it is acceptable to send data starting at an offset earlier than the returned `persisted_size`; in this case, the service skips data at offsets that were already persisted (without checking that it matches the previously written data), and write only the data starting from the persisted offset. Even though the data isn't written, it might still incur a performance cost over resuming at the correct write offset. This behavior can make client-side handling simpler in some cases. - Clients must only send data that is a multiple of 256 KiB per message, unless the object is being finished with `finish_write` set to `true`. + *

The service does not view the object as complete until the client has sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any requests on a stream after sending a request with `finish_write` set to `true` causes an error. The client must check the response it receives to determine how much data the service is able to commit and whether the service views the object as complete. + *

Attempting to resume an already finalized object results in an `OK` status, with a `WriteObjectResponse` containing the finalized object's metadata. + *

Alternatively, you can use the `BidiWriteObject` operation to write an object with controls over flushing and the ability to fetch the ability to determine the current persisted size. + *

**IAM Permissions**: + *

Requires `storage.objects.create` IAM permission on the bucket.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • writeObjectCallable() + *

+ *

BidiWriteObject

Stores a new object and metadata. + *

This is similar to the `WriteObject` call with the added support for manual flushing of persisted state, and the ability to determine current persisted size without closing the stream. + *

The client might specify one or both of the `state_lookup` and `flush` fields in each `BidiWriteObjectRequest`. If `flush` is specified, the data written so far is persisted to storage. If `state_lookup` is specified, the service responds with a `BidiWriteObjectResponse` that contains the persisted size. If both `flush` and `state_lookup` are specified, the flush always occurs before a `state_lookup`, so that both might be set in the same request and the returned state is the state of the object post-flush. When the stream is closed, a `BidiWriteObjectResponse` is always sent to the client, regardless of the value of `state_lookup`.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • bidiWriteObjectCallable() + *

+ *

ListObjects

Retrieves a list of objects matching the criteria. + *

**IAM Permissions**: + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` permission.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listObjects(ListObjectsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listObjects(BucketName parent) + *

  • listObjects(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listObjectsPagedCallable() + *

  • listObjectsCallable() + *

+ *

RewriteObject

Rewrites a source object to a destination object. Optionally overrides metadata.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • rewriteObject(RewriteObjectRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • rewriteObjectCallable() + *

+ *

StartResumableWrite

Starts a resumable write operation. This method is part of the Resumable upload feature. This allows you to upload large objects in multiple chunks, which is more resilient to network interruptions than a single upload. The validity duration of the write operation, and the consequences of it becoming invalid, are service-dependent. + *

**IAM Permissions**: + *

Requires `storage.objects.create` IAM permission on the bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • startResumableWrite(StartResumableWriteRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • startResumableWriteCallable() + *

+ *

QueryWriteStatus

Determines the `persisted_size` of an object that is being written. This method is part of the resumable upload feature. The returned value is the size of the object that has been persisted so far. The value can be used as the `write_offset` for the next `Write()` call. + *

If the object does not exist, meaning if it was deleted, or the first `Write()` has not yet reached the service, this method returns the error `NOT_FOUND`. + *

This method is useful for clients that buffer data and need to know which data can be safely evicted. The client can call `QueryWriteStatus()` at any time to determine how much data has been logged for this object. For any sequence of `QueryWriteStatus()` calls for a given object name, the sequence of returned `persisted_size` values are non-decreasing.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • queryWriteStatus(QueryWriteStatusRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • queryWriteStatus(String uploadId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • queryWriteStatusCallable() + *

+ *

MoveObject

Moves the source object to the destination object in the same bucket. This operation moves a source object to a destination object in the same bucket by renaming the object. The move itself is an atomic transaction, ensuring all steps either complete successfully or no changes are made. + *

**IAM Permissions**: + *

Requires the following IAM permissions to use this method: + *

- `storage.objects.move` - `storage.objects.create` - `storage.objects.delete` (only required if overwriting an existing object)

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • moveObject(MoveObjectRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • moveObject(BucketName bucket, String sourceObject, String destinationObject) + *

  • moveObject(String bucket, String sourceObject, String destinationObject) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • moveObjectCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of StorageSettings to create(). + * For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageSettings storageSettings =
+ *     StorageSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * StorageClient storageClient = StorageClient.create(storageSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageSettings storageSettings = StorageSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * StorageClient storageClient = StorageClient.create(storageSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class StorageClient implements BackgroundResource { + private final StorageSettings settings; + private final StorageStub stub; + + /** Constructs an instance of StorageClient with default settings. */ + public static final StorageClient create() throws IOException { + return create(StorageSettings.newBuilder().build()); + } + + /** + * Constructs an instance of StorageClient, using the given settings. The channels are created + * based on the settings passed in, or defaults for any settings that are not set. + */ + public static final StorageClient create(StorageSettings settings) throws IOException { + return new StorageClient(settings); + } + + /** + * Constructs an instance of StorageClient, using the given stub for making calls. This is for + * advanced usage - prefer using create(StorageSettings). + */ + public static final StorageClient create(StorageStub stub) { + return new StorageClient(stub); + } + + /** + * Constructs an instance of StorageClient, using the given settings. This is protected so that it + * is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected StorageClient(StorageSettings settings) throws IOException { + this.settings = settings; + this.stub = ((StorageStubSettings) settings.getStubSettings()).createStub(); + } + + protected StorageClient(StorageStub stub) { + this.settings = null; + this.stub = stub; + } + + public final StorageSettings getSettings() { + return settings; + } + + public StorageStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty bucket. The request fails if there are any live or noncurrent + * objects in the bucket, but the request succeeds if the bucket only contains soft-deleted + * objects or incomplete uploads, such as ongoing XML API multipart uploads. Does not permanently + * delete soft-deleted objects. + * + *

When this API is used to delete a bucket containing an object that has a soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. + * + *

Objects and multipart uploads that were in the bucket at the time of deletion are also + * retained for the specified retention duration. When a soft-deleted bucket reaches the end of + * its retention duration, it is permanently deleted. The `hardDeleteTime` of the bucket always + * equals or exceeds the expiration time of the last soft-deleted object in the bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName name = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   storageClient.deleteBucket(name);
+   * }
+   * }
+ * + * @param name Required. Name of a bucket to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBucket(BucketName name) { + DeleteBucketRequest request = + DeleteBucketRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty bucket. The request fails if there are any live or noncurrent + * objects in the bucket, but the request succeeds if the bucket only contains soft-deleted + * objects or incomplete uploads, such as ongoing XML API multipart uploads. Does not permanently + * delete soft-deleted objects. + * + *

When this API is used to delete a bucket containing an object that has a soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. + * + *

Objects and multipart uploads that were in the bucket at the time of deletion are also + * retained for the specified retention duration. When a soft-deleted bucket reaches the end of + * its retention duration, it is permanently deleted. The `hardDeleteTime` of the bucket always + * equals or exceeds the expiration time of the last soft-deleted object in the bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String name = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   storageClient.deleteBucket(name);
+   * }
+   * }
+ * + * @param name Required. Name of a bucket to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBucket(String name) { + DeleteBucketRequest request = DeleteBucketRequest.newBuilder().setName(name).build(); + deleteBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty bucket. The request fails if there are any live or noncurrent + * objects in the bucket, but the request succeeds if the bucket only contains soft-deleted + * objects or incomplete uploads, such as ongoing XML API multipart uploads. Does not permanently + * delete soft-deleted objects. + * + *

When this API is used to delete a bucket containing an object that has a soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. + * + *

Objects and multipart uploads that were in the bucket at the time of deletion are also + * retained for the specified retention duration. When a soft-deleted bucket reaches the end of + * its retention duration, it is permanently deleted. The `hardDeleteTime` of the bucket always + * equals or exceeds the expiration time of the last soft-deleted object in the bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   DeleteBucketRequest request =
+   *       DeleteBucketRequest.newBuilder()
+   *           .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .build();
+   *   storageClient.deleteBucket(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBucket(DeleteBucketRequest request) { + deleteBucketCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty bucket. The request fails if there are any live or noncurrent + * objects in the bucket, but the request succeeds if the bucket only contains soft-deleted + * objects or incomplete uploads, such as ongoing XML API multipart uploads. Does not permanently + * delete soft-deleted objects. + * + *

When this API is used to delete a bucket containing an object that has a soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. + * + *

Objects and multipart uploads that were in the bucket at the time of deletion are also + * retained for the specified retention duration. When a soft-deleted bucket reaches the end of + * its retention duration, it is permanently deleted. The `hardDeleteTime` of the bucket always + * equals or exceeds the expiration time of the last soft-deleted object in the bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   DeleteBucketRequest request =
+   *       DeleteBucketRequest.newBuilder()
+   *           .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .build();
+   *   ApiFuture future = storageClient.deleteBucketCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteBucketCallable() { + return stub.deleteBucketCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.get` IAM permission on the bucket. Additionally, to return + * specific bucket metadata, the authenticated user must have the following permissions: + * + *

- To return the IAM policies: `storage.buckets.getIamPolicy` - To return the bucket IP + * filtering rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName name = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   Bucket response = storageClient.getBucket(name);
+   * }
+   * }
+ * + * @param name Required. Name of a bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket getBucket(BucketName name) { + GetBucketRequest request = + GetBucketRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.get` IAM permission on the bucket. Additionally, to return + * specific bucket metadata, the authenticated user must have the following permissions: + * + *

- To return the IAM policies: `storage.buckets.getIamPolicy` - To return the bucket IP + * filtering rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String name = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   Bucket response = storageClient.getBucket(name);
+   * }
+   * }
+ * + * @param name Required. Name of a bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket getBucket(String name) { + GetBucketRequest request = GetBucketRequest.newBuilder().setName(name).build(); + return getBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.get` IAM permission on the bucket. Additionally, to return + * specific bucket metadata, the authenticated user must have the following permissions: + * + *

- To return the IAM policies: `storage.buckets.getIamPolicy` - To return the bucket IP + * filtering rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetBucketRequest request =
+   *       GetBucketRequest.newBuilder()
+   *           .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Bucket response = storageClient.getBucket(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket getBucket(GetBucketRequest request) { + return getBucketCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.get` IAM permission on the bucket. Additionally, to return + * specific bucket metadata, the authenticated user must have the following permissions: + * + *

- To return the IAM policies: `storage.buckets.getIamPolicy` - To return the bucket IP + * filtering rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetBucketRequest request =
+   *       GetBucketRequest.newBuilder()
+   *           .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.getBucketCallable().futureCall(request);
+   *   // Do something.
+   *   Bucket response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getBucketCallable() { + return stub.getBucketCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.create` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To enable object retention using the `enableObjectRetention` query parameter: + * `storage.buckets.enableObjectRetention` - To set the bucket IP filtering rules: + * `storage.buckets.setIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   Bucket bucket = Bucket.newBuilder().build();
+   *   String bucketId = "bucketId-1603305307";
+   *   Bucket response = storageClient.createBucket(parent, bucket, bucketId);
+   * }
+   * }
+ * + * @param parent Required. The project to which this bucket belongs. This field must either be + * empty or `projects/_`. The project ID that owns this bucket should be specified in the + * `bucket.project` field. + * @param bucket Optional. Properties of the new bucket being inserted. The name of the bucket is + * specified in the `bucket_id` field. Populating `bucket.name` field results in an error. The + * project of the bucket must be specified in the `bucket.project` field. This field must be + * in `projects/{projectIdentifier}` format, {projectIdentifier} can be the project ID or + * project number. The `parent` field must be either empty or `projects/_`. + * @param bucketId Required. The ID to use for this bucket, which becomes the final component of + * the bucket's resource name. For example, the value `foo` might result in a bucket with the + * name `projects/123456/buckets/foo`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket createBucket(ProjectName parent, Bucket bucket, String bucketId) { + CreateBucketRequest request = + CreateBucketRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBucket(bucket) + .setBucketId(bucketId) + .build(); + return createBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.create` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To enable object retention using the `enableObjectRetention` query parameter: + * `storage.buckets.enableObjectRetention` - To set the bucket IP filtering rules: + * `storage.buckets.setIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   Bucket bucket = Bucket.newBuilder().build();
+   *   String bucketId = "bucketId-1603305307";
+   *   Bucket response = storageClient.createBucket(parent, bucket, bucketId);
+   * }
+   * }
+ * + * @param parent Required. The project to which this bucket belongs. This field must either be + * empty or `projects/_`. The project ID that owns this bucket should be specified in the + * `bucket.project` field. + * @param bucket Optional. Properties of the new bucket being inserted. The name of the bucket is + * specified in the `bucket_id` field. Populating `bucket.name` field results in an error. The + * project of the bucket must be specified in the `bucket.project` field. This field must be + * in `projects/{projectIdentifier}` format, {projectIdentifier} can be the project ID or + * project number. The `parent` field must be either empty or `projects/_`. + * @param bucketId Required. The ID to use for this bucket, which becomes the final component of + * the bucket's resource name. For example, the value `foo` might result in a bucket with the + * name `projects/123456/buckets/foo`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket createBucket(String parent, Bucket bucket, String bucketId) { + CreateBucketRequest request = + CreateBucketRequest.newBuilder() + .setParent(parent) + .setBucket(bucket) + .setBucketId(bucketId) + .build(); + return createBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.create` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To enable object retention using the `enableObjectRetention` query parameter: + * `storage.buckets.enableObjectRetention` - To set the bucket IP filtering rules: + * `storage.buckets.setIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   CreateBucketRequest request =
+   *       CreateBucketRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setBucket(Bucket.newBuilder().build())
+   *           .setBucketId("bucketId-1603305307")
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setPredefinedDefaultObjectAcl("predefinedDefaultObjectAcl2109168048")
+   *           .setEnableObjectRetention(true)
+   *           .build();
+   *   Bucket response = storageClient.createBucket(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket createBucket(CreateBucketRequest request) { + return createBucketCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new bucket. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.create` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To enable object retention using the `enableObjectRetention` query parameter: + * `storage.buckets.enableObjectRetention` - To set the bucket IP filtering rules: + * `storage.buckets.setIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   CreateBucketRequest request =
+   *       CreateBucketRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setBucket(Bucket.newBuilder().build())
+   *           .setBucketId("bucketId-1603305307")
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setPredefinedDefaultObjectAcl("predefinedDefaultObjectAcl2109168048")
+   *           .setEnableObjectRetention(true)
+   *           .build();
+   *   ApiFuture future = storageClient.createBucketCallable().futureCall(request);
+   *   // Do something.
+   *   Bucket response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createBucketCallable() { + return stub.createBucketCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of buckets for a given project, ordered lexicographically by name. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering + * rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   for (Bucket element : storageClient.listBuckets(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project whose buckets we are listing. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBucketsPagedResponse listBuckets(ProjectName parent) { + ListBucketsRequest request = + ListBucketsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBuckets(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of buckets for a given project, ordered lexicographically by name. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering + * rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   for (Bucket element : storageClient.listBuckets(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project whose buckets we are listing. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBucketsPagedResponse listBuckets(String parent) { + ListBucketsRequest request = ListBucketsRequest.newBuilder().setParent(parent).build(); + return listBuckets(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of buckets for a given project, ordered lexicographically by name. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering + * rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListBucketsRequest request =
+   *       ListBucketsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setReturnPartialSuccess(true)
+   *           .build();
+   *   for (Bucket element : storageClient.listBuckets(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBucketsPagedResponse listBuckets(ListBucketsRequest request) { + return listBucketsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of buckets for a given project, ordered lexicographically by name. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering + * rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListBucketsRequest request =
+   *       ListBucketsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setReturnPartialSuccess(true)
+   *           .build();
+   *   ApiFuture future = storageClient.listBucketsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Bucket element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBucketsPagedCallable() { + return stub.listBucketsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of buckets for a given project, ordered lexicographically by name. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.list` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To list the IAM policies: `storage.buckets.getIamPolicy` - To list the bucket IP filtering + * rules: `storage.buckets.getIpFilter` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListBucketsRequest request =
+   *       ListBucketsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setReturnPartialSuccess(true)
+   *           .build();
+   *   while (true) {
+   *     ListBucketsResponse response = storageClient.listBucketsCallable().call(request);
+   *     for (Bucket element : response.getBucketsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listBucketsCallable() { + return stub.listBucketsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently locks the retention policy that is currently applied to the specified bucket. + * + *

Caution: Locking a bucket is an irreversible action. Once you lock a bucket: + * + *

- You cannot remove the retention policy from the bucket. - You cannot decrease the + * retention period for the policy. + * + *

Once locked, you must delete the entire bucket in order to remove the bucket's retention + * policy. However, before you can delete the bucket, you must delete all the objects in the + * bucket, which is only possible if all the objects have reached the retention period set by the + * retention policy. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   Bucket response = storageClient.lockBucketRetentionPolicy(bucket);
+   * }
+   * }
+ * + * @param bucket Required. Name of a bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket lockBucketRetentionPolicy(BucketName bucket) { + LockBucketRetentionPolicyRequest request = + LockBucketRetentionPolicyRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .build(); + return lockBucketRetentionPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently locks the retention policy that is currently applied to the specified bucket. + * + *

Caution: Locking a bucket is an irreversible action. Once you lock a bucket: + * + *

- You cannot remove the retention policy from the bucket. - You cannot decrease the + * retention period for the policy. + * + *

Once locked, you must delete the entire bucket in order to remove the bucket's retention + * policy. However, before you can delete the bucket, you must delete all the objects in the + * bucket, which is only possible if all the objects have reached the retention period set by the + * retention policy. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   Bucket response = storageClient.lockBucketRetentionPolicy(bucket);
+   * }
+   * }
+ * + * @param bucket Required. Name of a bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket lockBucketRetentionPolicy(String bucket) { + LockBucketRetentionPolicyRequest request = + LockBucketRetentionPolicyRequest.newBuilder().setBucket(bucket).build(); + return lockBucketRetentionPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently locks the retention policy that is currently applied to the specified bucket. + * + *

Caution: Locking a bucket is an irreversible action. Once you lock a bucket: + * + *

- You cannot remove the retention policy from the bucket. - You cannot decrease the + * retention period for the policy. + * + *

Once locked, you must delete the entire bucket in order to remove the bucket's retention + * policy. However, before you can delete the bucket, you must delete all the objects in the + * bucket, which is only possible if all the objects have reached the retention period set by the + * retention policy. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   LockBucketRetentionPolicyRequest request =
+   *       LockBucketRetentionPolicyRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .build();
+   *   Bucket response = storageClient.lockBucketRetentionPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket lockBucketRetentionPolicy(LockBucketRetentionPolicyRequest request) { + return lockBucketRetentionPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently locks the retention policy that is currently applied to the specified bucket. + * + *

Caution: Locking a bucket is an irreversible action. Once you lock a bucket: + * + *

- You cannot remove the retention policy from the bucket. - You cannot decrease the + * retention period for the policy. + * + *

Once locked, you must delete the entire bucket in order to remove the bucket's retention + * policy. However, before you can delete the bucket, you must delete all the objects in the + * bucket, which is only possible if all the objects have reached the retention period set by the + * retention policy. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   LockBucketRetentionPolicyRequest request =
+   *       LockBucketRetentionPolicyRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .build();
+   *   ApiFuture future =
+   *       storageClient.lockBucketRetentionPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Bucket response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + lockBucketRetentionPolicyCallable() { + return stub.lockBucketRetentionPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.getIamPolicy` on the bucket or + * `storage.managedFolders.getIamPolicy` IAM permission on the managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ResourceName resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]");
+   *   Policy response = storageClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(ResourceName resource) { + GetIamPolicyRequest request = + GetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.getIamPolicy` on the bucket or + * `storage.managedFolders.getIamPolicy` IAM permission on the managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]").toString();
+   *   Policy response = storageClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(String resource) { + GetIamPolicyRequest request = GetIamPolicyRequest.newBuilder().setResource(resource).build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.getIamPolicy` on the bucket or + * `storage.managedFolders.getIamPolicy` IAM permission on the managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   Policy response = storageClient.getIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(GetIamPolicyRequest request) { + return getIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.getIamPolicy` on the bucket or + * `storage.managedFolders.getIamPolicy` IAM permission on the managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.getIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getIamPolicyCallable() { + return stub.getIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ResourceName resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]");
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = storageClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(ResourceName resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .setPolicy(policy) + .build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]").toString();
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = storageClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(String resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Policy response = storageClient.setIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(SetIamPolicyRequest request) { + return setIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket or managed folder. The `resource` field in the + * request should be `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.setIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable setIamPolicyCallable() { + return stub.setIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ResourceName resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]");
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response = storageClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + ResourceName resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String resource =
+   *       CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]").toString();
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response = storageClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + String resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   TestIamPermissionsResponse response = storageClient.testIamPermissions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions(TestIamPermissionsRequest request) { + return testIamPermissionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       storageClient.testIamPermissionsCallable().futureCall(request);
+   *   // Do something.
+   *   TestIamPermissionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + testIamPermissionsCallable() { + return stub.testIamPermissionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a bucket. Changes to the bucket are readable immediately after writing, but + * configuration changes might take time to propagate. This method supports `patch` semantics. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To set bucket IP filtering rules: `storage.buckets.setIpFilter` - To update public access + * prevention policies or access control lists (ACLs): `storage.buckets.setIamPolicy` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   Bucket bucket = Bucket.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   Bucket response = storageClient.updateBucket(bucket, updateMask);
+   * }
+   * }
+ * + * @param bucket Required. The bucket to update. The bucket's `name` field is used to identify the + * bucket. + * @param updateMask Required. List of fields to be updated. + *

To specify ALL fields, equivalent to the JSON API's "update" function, specify a single + * field with the value `*`. Note: not recommended. If a new field is introduced at a + * later time, an older client updating with the `*` might accidentally reset the new + * field's value. + *

Not specifying any fields is an error. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket updateBucket(Bucket bucket, FieldMask updateMask) { + UpdateBucketRequest request = + UpdateBucketRequest.newBuilder().setBucket(bucket).setUpdateMask(updateMask).build(); + return updateBucket(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a bucket. Changes to the bucket are readable immediately after writing, but + * configuration changes might take time to propagate. This method supports `patch` semantics. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To set bucket IP filtering rules: `storage.buckets.setIpFilter` - To update public access + * prevention policies or access control lists (ACLs): `storage.buckets.setIamPolicy` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   UpdateBucketRequest request =
+   *       UpdateBucketRequest.newBuilder()
+   *           .setBucket(Bucket.newBuilder().build())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setPredefinedDefaultObjectAcl("predefinedDefaultObjectAcl2109168048")
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Bucket response = storageClient.updateBucket(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Bucket updateBucket(UpdateBucketRequest request) { + return updateBucketCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a bucket. Changes to the bucket are readable immediately after writing, but + * configuration changes might take time to propagate. This method supports `patch` semantics. + * + *

**IAM Permissions**: + * + *

Requires `storage.buckets.update` IAM permission on the bucket. Additionally, to enable + * specific bucket features, the authenticated user must have the following permissions: + * + *

- To set bucket IP filtering rules: `storage.buckets.setIpFilter` - To update public access + * prevention policies or access control lists (ACLs): `storage.buckets.setIamPolicy` + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   UpdateBucketRequest request =
+   *       UpdateBucketRequest.newBuilder()
+   *           .setBucket(Bucket.newBuilder().build())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setPredefinedDefaultObjectAcl("predefinedDefaultObjectAcl2109168048")
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.updateBucketCallable().futureCall(request);
+   *   // Do something.
+   *   Bucket response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateBucketCallable() { + return stub.updateBucketCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Concatenates a list of existing objects into a new object in the same bucket. The existing + * source objects are unaffected by this operation. + * + *

**IAM Permissions**: + * + *

Requires the `storage.objects.create` and `storage.objects.get` IAM permissions to use this + * method. If the new composite object overwrites an existing object, the authenticated user must + * also have the `storage.objects.delete` permission. If the request body includes the retention + * property, the authenticated user must also have the `storage.objects.setRetention` IAM + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ComposeObjectRequest request =
+   *       ComposeObjectRequest.newBuilder()
+   *           .setDestination(Object.newBuilder().build())
+   *           .addAllSourceObjects(new ArrayList())
+   *           .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setKmsKey(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .setDeleteSourceObjects(true)
+   *           .build();
+   *   Object response = storageClient.composeObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object composeObject(ComposeObjectRequest request) { + return composeObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Concatenates a list of existing objects into a new object in the same bucket. The existing + * source objects are unaffected by this operation. + * + *

**IAM Permissions**: + * + *

Requires the `storage.objects.create` and `storage.objects.get` IAM permissions to use this + * method. If the new composite object overwrites an existing object, the authenticated user must + * also have the `storage.objects.delete` permission. If the request body includes the retention + * property, the authenticated user must also have the `storage.objects.setRetention` IAM + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ComposeObjectRequest request =
+   *       ComposeObjectRequest.newBuilder()
+   *           .setDestination(Object.newBuilder().build())
+   *           .addAllSourceObjects(new ArrayList())
+   *           .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setKmsKey(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .setDeleteSourceObjects(true)
+   *           .build();
+   *   ApiFuture future = storageClient.composeObjectCallable().futureCall(request);
+   *   // Do something.
+   *   Object response = future.get();
+   * }
+   * }
+   */
+  public final UnaryCallable composeObjectCallable() {
+    return stub.composeObjectCallable();
+  }
+
+  // AUTO-GENERATED DOCUMENTATION AND METHOD.
+  /**
+   * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for
+   * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the
+   * bucket. When this API is used to delete an object from a bucket that has soft delete policy
+   * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime`
+   * properties are set on the object. This API cannot be used to permanently delete soft-deleted
+   * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`.
+   *
+   * 

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String object = "object-1023368385";
+   *   storageClient.deleteObject(bucket, object);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the finalized object to delete. Note: If you want to delete + * an unfinalized resumable upload please use `CancelResumableWrite`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteObject(BucketName bucket, String object) { + DeleteObjectRequest request = + DeleteObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setObject(object) + .build(); + deleteObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for + * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the + * bucket. When this API is used to delete an object from a bucket that has soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. This API cannot be used to permanently delete soft-deleted + * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + * + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String object = "object-1023368385";
+   *   storageClient.deleteObject(bucket, object);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the finalized object to delete. Note: If you want to delete + * an unfinalized resumable upload please use `CancelResumableWrite`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteObject(String bucket, String object) { + DeleteObjectRequest request = + DeleteObjectRequest.newBuilder().setBucket(bucket).setObject(object).build(); + deleteObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for + * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the + * bucket. When this API is used to delete an object from a bucket that has soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. This API cannot be used to permanently delete soft-deleted + * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + * + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   storageClient.deleteObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the finalized object to delete. Note: If you want to delete + * an unfinalized resumable upload please use `CancelResumableWrite`. + * @param generation Optional. If present, permanently deletes a specific revision of this object + * (as opposed to the latest version, the default). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteObject(BucketName bucket, String object, long generation) { + DeleteObjectRequest request = + DeleteObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setObject(object) + .setGeneration(generation) + .build(); + deleteObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for + * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the + * bucket. When this API is used to delete an object from a bucket that has soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. This API cannot be used to permanently delete soft-deleted + * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + * + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   storageClient.deleteObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the finalized object to delete. Note: If you want to delete + * an unfinalized resumable upload please use `CancelResumableWrite`. + * @param generation Optional. If present, permanently deletes a specific revision of this object + * (as opposed to the latest version, the default). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteObject(String bucket, String object, long generation) { + DeleteObjectRequest request = + DeleteObjectRequest.newBuilder() + .setBucket(bucket) + .setObject(object) + .setGeneration(generation) + .build(); + deleteObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for + * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the + * bucket. When this API is used to delete an object from a bucket that has soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. This API cannot be used to permanently delete soft-deleted + * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + * + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   DeleteObjectRequest request =
+   *       DeleteObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   storageClient.deleteObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteObject(DeleteObjectRequest request) { + deleteObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for + * the bucket, or if the generation parameter is used, or if soft delete is not enabled for the + * bucket. When this API is used to delete an object from a bucket that has soft delete policy + * enabled, the object becomes soft deleted, and the `softDeleteTime` and `hardDeleteTime` + * properties are set on the object. This API cannot be used to permanently delete soft-deleted + * objects. Soft-deleted objects are permanently deleted according to their `hardDeleteTime`. + * + *

You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] API to restore + * soft-deleted objects until the soft delete retention period has passed. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.delete` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   DeleteObjectRequest request =
+   *       DeleteObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.deleteObjectCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteObjectCallable() { + return stub.deleteObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restores a soft-deleted object. When a soft-deleted object is restored, a new copy of that + * object is created in the same bucket and inherits the same metadata as the soft-deleted object. + * The inherited metadata is the metadata that existed when the original object became soft + * deleted, with the following exceptions: + * + *

- The `createTime` of the new object is set to the time at which the soft-deleted object was + * restored. - The `softDeleteTime` and `hardDeleteTime` values are cleared. - A new generation is + * assigned and the metageneration is reset to 1. - If the soft-deleted object was in a bucket + * that had Autoclass enabled, the new object is restored to Standard storage. - The restored + * object inherits the bucket's default object ACL, unless `copySourceAcl` is `true`. + * + *

If a live object using the same name already exists in the bucket and becomes overwritten, + * the live object becomes a noncurrent object if Object Versioning is enabled on the bucket. If + * Object Versioning is not enabled, the live object becomes soft deleted. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.restore` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) - `storage.objects.getIamPolicy` (only required if + * `projection` is `full` and the relevant bucket has uniform bucket-level access disabled) - + * `storage.objects.setIamPolicy` (only required if `copySourceAcl` is `true` and the relevant + * bucket has uniform bucket-level access disabled) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   Object response = storageClient.restoreObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the object to restore. + * @param generation Required. The specific revision of the object to restore. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object restoreObject(BucketName bucket, String object, long generation) { + RestoreObjectRequest request = + RestoreObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setObject(object) + .setGeneration(generation) + .build(); + return restoreObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restores a soft-deleted object. When a soft-deleted object is restored, a new copy of that + * object is created in the same bucket and inherits the same metadata as the soft-deleted object. + * The inherited metadata is the metadata that existed when the original object became soft + * deleted, with the following exceptions: + * + *

- The `createTime` of the new object is set to the time at which the soft-deleted object was + * restored. - The `softDeleteTime` and `hardDeleteTime` values are cleared. - A new generation is + * assigned and the metageneration is reset to 1. - If the soft-deleted object was in a bucket + * that had Autoclass enabled, the new object is restored to Standard storage. - The restored + * object inherits the bucket's default object ACL, unless `copySourceAcl` is `true`. + * + *

If a live object using the same name already exists in the bucket and becomes overwritten, + * the live object becomes a noncurrent object if Object Versioning is enabled on the bucket. If + * Object Versioning is not enabled, the live object becomes soft deleted. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.restore` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) - `storage.objects.getIamPolicy` (only required if + * `projection` is `full` and the relevant bucket has uniform bucket-level access disabled) - + * `storage.objects.setIamPolicy` (only required if `copySourceAcl` is `true` and the relevant + * bucket has uniform bucket-level access disabled) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   Object response = storageClient.restoreObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. The name of the object to restore. + * @param generation Required. The specific revision of the object to restore. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object restoreObject(String bucket, String object, long generation) { + RestoreObjectRequest request = + RestoreObjectRequest.newBuilder() + .setBucket(bucket) + .setObject(object) + .setGeneration(generation) + .build(); + return restoreObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restores a soft-deleted object. When a soft-deleted object is restored, a new copy of that + * object is created in the same bucket and inherits the same metadata as the soft-deleted object. + * The inherited metadata is the metadata that existed when the original object became soft + * deleted, with the following exceptions: + * + *

- The `createTime` of the new object is set to the time at which the soft-deleted object was + * restored. - The `softDeleteTime` and `hardDeleteTime` values are cleared. - A new generation is + * assigned and the metageneration is reset to 1. - If the soft-deleted object was in a bucket + * that had Autoclass enabled, the new object is restored to Standard storage. - The restored + * object inherits the bucket's default object ACL, unless `copySourceAcl` is `true`. + * + *

If a live object using the same name already exists in the bucket and becomes overwritten, + * the live object becomes a noncurrent object if Object Versioning is enabled on the bucket. If + * Object Versioning is not enabled, the live object becomes soft deleted. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.restore` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) - `storage.objects.getIamPolicy` (only required if + * `projection` is `full` and the relevant bucket has uniform bucket-level access disabled) - + * `storage.objects.setIamPolicy` (only required if `copySourceAcl` is `true` and the relevant + * bucket has uniform bucket-level access disabled) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   RestoreObjectRequest request =
+   *       RestoreObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setRestoreToken("restoreToken1638686731")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCopySourceAcl(true)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   Object response = storageClient.restoreObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object restoreObject(RestoreObjectRequest request) { + return restoreObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restores a soft-deleted object. When a soft-deleted object is restored, a new copy of that + * object is created in the same bucket and inherits the same metadata as the soft-deleted object. + * The inherited metadata is the metadata that existed when the original object became soft + * deleted, with the following exceptions: + * + *

- The `createTime` of the new object is set to the time at which the soft-deleted object was + * restored. - The `softDeleteTime` and `hardDeleteTime` values are cleared. - A new generation is + * assigned and the metageneration is reset to 1. - If the soft-deleted object was in a bucket + * that had Autoclass enabled, the new object is restored to Standard storage. - The restored + * object inherits the bucket's default object ACL, unless `copySourceAcl` is `true`. + * + *

If a live object using the same name already exists in the bucket and becomes overwritten, + * the live object becomes a noncurrent object if Object Versioning is enabled on the bucket. If + * Object Versioning is not enabled, the live object becomes soft deleted. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.restore` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) - `storage.objects.getIamPolicy` (only required if + * `projection` is `full` and the relevant bucket has uniform bucket-level access disabled) - + * `storage.objects.setIamPolicy` (only required if `copySourceAcl` is `true` and the relevant + * bucket has uniform bucket-level access disabled) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   RestoreObjectRequest request =
+   *       RestoreObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setRestoreToken("restoreToken1638686731")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCopySourceAcl(true)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.restoreObjectCallable().futureCall(request);
+   *   // Do something.
+   *   Object response = future.get();
+   * }
+   * }
+   */
+  public final UnaryCallable restoreObjectCallable() {
+    return stub.restoreObjectCallable();
+  }
+
+  // AUTO-GENERATED DOCUMENTATION AND METHOD.
+  /**
+   * Cancels an in-progress resumable upload.
+   *
+   * 

Any attempts to write to the resumable upload after cancelling the upload fail. + * + *

The behavior for any in-progress write operations is not guaranteed; they could either + * complete before the cancellation or fail if the cancellation completes first. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String uploadId = "uploadId1563990780";
+   *   CancelResumableWriteResponse response = storageClient.cancelResumableWrite(uploadId);
+   * }
+   * }
+ * + * @param uploadId Required. The upload_id of the resumable upload to cancel. This should be + * copied from the `upload_id` field of `StartResumableWriteResponse`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CancelResumableWriteResponse cancelResumableWrite(String uploadId) { + CancelResumableWriteRequest request = + CancelResumableWriteRequest.newBuilder().setUploadId(uploadId).build(); + return cancelResumableWrite(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Cancels an in-progress resumable upload. + * + *

Any attempts to write to the resumable upload after cancelling the upload fail. + * + *

The behavior for any in-progress write operations is not guaranteed; they could either + * complete before the cancellation or fail if the cancellation completes first. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   CancelResumableWriteRequest request =
+   *       CancelResumableWriteRequest.newBuilder().setUploadId("uploadId1563990780").build();
+   *   CancelResumableWriteResponse response = storageClient.cancelResumableWrite(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CancelResumableWriteResponse cancelResumableWrite( + CancelResumableWriteRequest request) { + return cancelResumableWriteCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Cancels an in-progress resumable upload. + * + *

Any attempts to write to the resumable upload after cancelling the upload fail. + * + *

The behavior for any in-progress write operations is not guaranteed; they could either + * complete before the cancellation or fail if the cancellation completes first. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   CancelResumableWriteRequest request =
+   *       CancelResumableWriteRequest.newBuilder().setUploadId("uploadId1563990780").build();
+   *   ApiFuture future =
+   *       storageClient.cancelResumableWriteCallable().futureCall(request);
+   *   // Do something.
+   *   CancelResumableWriteResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + cancelResumableWriteCallable() { + return stub.cancelResumableWriteCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String object = "object-1023368385";
+   *   Object response = storageClient.getObject(bucket, object);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. Name of the object. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object getObject(BucketName bucket, String object) { + GetObjectRequest request = + GetObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setObject(object) + .build(); + return getObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String object = "object-1023368385";
+   *   Object response = storageClient.getObject(bucket, object);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. Name of the object. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object getObject(String bucket, String object) { + GetObjectRequest request = + GetObjectRequest.newBuilder().setBucket(bucket).setObject(object).build(); + return getObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   Object response = storageClient.getObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. Name of the object. + * @param generation Optional. If present, selects a specific revision of this object (as opposed + * to the latest version, the default). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object getObject(BucketName bucket, String object, long generation) { + GetObjectRequest request = + GetObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setObject(object) + .setGeneration(generation) + .build(); + return getObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String object = "object-1023368385";
+   *   long generation = 305703192;
+   *   Object response = storageClient.getObject(bucket, object, generation);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param object Required. Name of the object. + * @param generation Optional. If present, selects a specific revision of this object (as opposed + * to the latest version, the default). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object getObject(String bucket, String object, long generation) { + GetObjectRequest request = + GetObjectRequest.newBuilder() + .setBucket(bucket) + .setObject(object) + .setGeneration(generation) + .build(); + return getObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetObjectRequest request =
+   *       GetObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setSoftDeleted(true)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setRestoreToken("restoreToken1638686731")
+   *           .build();
+   *   Object response = storageClient.getObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object getObject(GetObjectRequest request) { + return getObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves object metadata. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. To return object ACLs, the + * authenticated user must also have the `storage.objects.getIamPolicy` permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   GetObjectRequest request =
+   *       GetObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setSoftDeleted(true)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setRestoreToken("restoreToken1638686731")
+   *           .build();
+   *   ApiFuture future = storageClient.getObjectCallable().futureCall(request);
+   *   // Do something.
+   *   Object response = future.get();
+   * }
+   * }
+   */
+  public final UnaryCallable getObjectCallable() {
+    return stub.getObjectCallable();
+  }
+
+  // AUTO-GENERATED DOCUMENTATION AND METHOD.
+  /**
+   * Retrieves object data.
+   *
+   * 

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ReadObjectRequest request =
+   *       ReadObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setObject("object-1023368385")
+   *           .setGeneration(305703192)
+   *           .setReadOffset(-715377828)
+   *           .setReadLimit(-164298798)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ServerStream stream = storageClient.readObjectCallable().call(request);
+   *   for (ReadObjectResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable readObjectCallable() { + return stub.readObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads an object's data. + * + *

This bi-directional API reads data from an object, allowing you to request multiple data + * ranges within a single stream, even across several messages. If an error occurs with any + * request, the stream closes with a relevant error code. Since you can have multiple outstanding + * requests, the error response includes a `BidiReadObjectError` proto in its `details` field, + * reporting the specific error, if any, for each pending `read_id`. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.get` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BidiStream bidiStream =
+   *       storageClient.bidiReadObjectCallable().call();
+   *   BidiReadObjectRequest request =
+   *       BidiReadObjectRequest.newBuilder()
+   *           .setReadObjectSpec(BidiReadObjectSpec.newBuilder().build())
+   *           .addAllReadRanges(new ArrayList())
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (BidiReadObjectResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable + bidiReadObjectCallable() { + return stub.bidiReadObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an object's metadata. Equivalent to JSON API's `storage.objects.patch` method. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   Object object = Object.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   Object response = storageClient.updateObject(object, updateMask);
+   * }
+   * }
+ * + * @param object Required. The object to update. The object's bucket and name fields are used to + * identify the object to update. If present, the object's generation field selects a specific + * revision of this object whose metadata should be updated. Otherwise, assumes the live + * version of the object. + * @param updateMask Required. List of fields to be updated. + *

To specify ALL fields, equivalent to the JSON API's "update" function, specify a single + * field with the value `*`. Note: not recommended. If a new field is introduced at a + * later time, an older client updating with the `*` might accidentally reset the new + * field's value. + *

Not specifying any fields is an error. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object updateObject(Object object, FieldMask updateMask) { + UpdateObjectRequest request = + UpdateObjectRequest.newBuilder().setObject(object).setUpdateMask(updateMask).build(); + return updateObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an object's metadata. Equivalent to JSON API's `storage.objects.patch` method. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   UpdateObjectRequest request =
+   *       UpdateObjectRequest.newBuilder()
+   *           .setObject(Object.newBuilder().build())
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setOverrideUnlockedRetention(true)
+   *           .build();
+   *   Object response = storageClient.updateObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object updateObject(UpdateObjectRequest request) { + return updateObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an object's metadata. Equivalent to JSON API's `storage.objects.patch` method. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.update` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   UpdateObjectRequest request =
+   *       UpdateObjectRequest.newBuilder()
+   *           .setObject(Object.newBuilder().build())
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setPredefinedAcl("predefinedAcl1207041188")
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setOverrideUnlockedRetention(true)
+   *           .build();
+   *   ApiFuture future = storageClient.updateObjectCallable().futureCall(request);
+   *   // Do something.
+   *   Object response = future.get();
+   * }
+   * }
+   */
+  public final UnaryCallable updateObjectCallable() {
+    return stub.updateObjectCallable();
+  }
+
+  // AUTO-GENERATED DOCUMENTATION AND METHOD.
+  /**
+   * Stores a new object and metadata.
+   *
+   * 

An object can be written either in a single message stream or in a resumable sequence of + * message streams. To write using a single stream, the client should include in the first message + * of the stream an `WriteObjectSpec` describing the destination bucket, object, and any + * preconditions. Additionally, the final message must set 'finish_write' to true, or else it is + * an error. + * + *

For a resumable write, the client should instead call `StartResumableWrite()`, populating a + * `WriteObjectSpec` into that request. They should then attach the returned `upload_id` to the + * first message of each following call to `WriteObject`. If the stream is closed before finishing + * the upload (either explicitly by the client or due to a network error or an error response from + * the server), the client should do as follows: + * + *

- Check the result Status of the stream, to determine if writing can be resumed on this + * stream or must be restarted from scratch (by calling `StartResumableWrite()`). The resumable + * errors are `DEADLINE_EXCEEDED`, `INTERNAL`, and `UNAVAILABLE`. For each case, the client should + * use binary exponential backoff before retrying. Additionally, writes can be resumed after + * `RESOURCE_EXHAUSTED` errors, but only after taking appropriate measures, which might include + * reducing aggregate send rate across clients and/or requesting a quota increase for your + * project. - If the call to `WriteObject` returns `ABORTED`, that indicates concurrent attempts + * to update the resumable write, caused either by multiple racing clients or by a single client + * where the previous request was timed out on the client side but nonetheless reached the server. + * In this case the client should take steps to prevent further concurrent writes. For example, + * increase the timeouts and stop using more than one process to perform the upload. Follow the + * steps below for resuming the upload. - For resumable errors, the client should call + * `QueryWriteStatus()` and then continue writing from the returned `persisted_size`. This might + * be less than the amount of data the client previously sent. Note also that it is acceptable to + * send data starting at an offset earlier than the returned `persisted_size`; in this case, the + * service skips data at offsets that were already persisted (without checking that it matches the + * previously written data), and write only the data starting from the persisted offset. Even + * though the data isn't written, it might still incur a performance cost over resuming at the + * correct write offset. This behavior can make client-side handling simpler in some cases. - + * Clients must only send data that is a multiple of 256 KiB per message, unless the object is + * being finished with `finish_write` set to `true`. + * + *

The service does not view the object as complete until the client has sent a + * `WriteObjectRequest` with `finish_write` set to `true`. Sending any requests on a stream after + * sending a request with `finish_write` set to `true` causes an error. The client must check the + * response it receives to determine how much data the service is able to commit and whether the + * service views the object as complete. + * + *

Attempting to resume an already finalized object results in an `OK` status, with a + * `WriteObjectResponse` containing the finalized object's metadata. + * + *

Alternatively, you can use the `BidiWriteObject` operation to write an object with controls + * over flushing and the ability to fetch the ability to determine the current persisted size. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.create` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ApiStreamObserver responseObserver =
+   *       new ApiStreamObserver() {
+   *         {@literal @}Override
+   *         public void onNext(WriteObjectResponse response) {
+   *           // Do something when a response is received.
+   *         }
+   *
+   *         {@literal @}Override
+   *         public void onError(Throwable t) {
+   *           // Add error-handling
+   *         }
+   *
+   *         {@literal @}Override
+   *         public void onCompleted() {
+   *           // Do something when complete.
+   *         }
+   *       };
+   *   ApiStreamObserver requestObserver =
+   *       storageClient.writeObject().clientStreamingCall(responseObserver);
+   *   WriteObjectRequest request =
+   *       WriteObjectRequest.newBuilder()
+   *           .setWriteOffset(-1559543565)
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .setFinishWrite(true)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   requestObserver.onNext(request);
+   * }
+   * }
+ */ + public final ClientStreamingCallable + writeObjectCallable() { + return stub.writeObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stores a new object and metadata. + * + *

This is similar to the `WriteObject` call with the added support for manual flushing of + * persisted state, and the ability to determine current persisted size without closing the + * stream. + * + *

The client might specify one or both of the `state_lookup` and `flush` fields in each + * `BidiWriteObjectRequest`. If `flush` is specified, the data written so far is persisted to + * storage. If `state_lookup` is specified, the service responds with a `BidiWriteObjectResponse` + * that contains the persisted size. If both `flush` and `state_lookup` are specified, the flush + * always occurs before a `state_lookup`, so that both might be set in the same request and the + * returned state is the state of the object post-flush. When the stream is closed, a + * `BidiWriteObjectResponse` is always sent to the client, regardless of the value of + * `state_lookup`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BidiStream bidiStream =
+   *       storageClient.bidiWriteObjectCallable().call();
+   *   BidiWriteObjectRequest request =
+   *       BidiWriteObjectRequest.newBuilder()
+   *           .setWriteOffset(-1559543565)
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .setStateLookup(true)
+   *           .setFlush(true)
+   *           .setFinishWrite(true)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (BidiWriteObjectResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable + bidiWriteObjectCallable() { + return stub.bidiWriteObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of objects matching the criteria. + * + *

**IAM Permissions**: + * + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To + * return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   for (Object element : storageClient.listObjects(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which to look for objects. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListObjectsPagedResponse listObjects(BucketName parent) { + ListObjectsRequest request = + ListObjectsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listObjects(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of objects matching the criteria. + * + *

**IAM Permissions**: + * + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To + * return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   for (Object element : storageClient.listObjects(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which to look for objects. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListObjectsPagedResponse listObjects(String parent) { + ListObjectsRequest request = ListObjectsRequest.newBuilder().setParent(parent).build(); + return listObjects(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of objects matching the criteria. + * + *

**IAM Permissions**: + * + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To + * return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListObjectsRequest request =
+   *       ListObjectsRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setIncludeTrailingDelimiter(true)
+   *           .setPrefix("prefix-980110702")
+   *           .setVersions(true)
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setSoftDeleted(true)
+   *           .setIncludeFoldersAsPrefixes(true)
+   *           .setMatchGlob("matchGlob613636317")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   for (Object element : storageClient.listObjects(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListObjectsPagedResponse listObjects(ListObjectsRequest request) { + return listObjectsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of objects matching the criteria. + * + *

**IAM Permissions**: + * + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To + * return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListObjectsRequest request =
+   *       ListObjectsRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setIncludeTrailingDelimiter(true)
+   *           .setPrefix("prefix-980110702")
+   *           .setVersions(true)
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setSoftDeleted(true)
+   *           .setIncludeFoldersAsPrefixes(true)
+   *           .setMatchGlob("matchGlob613636317")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   ApiFuture future = storageClient.listObjectsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Object element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+   */
+  public final UnaryCallable
+      listObjectsPagedCallable() {
+    return stub.listObjectsPagedCallable();
+  }
+
+  // AUTO-GENERATED DOCUMENTATION AND METHOD.
+  /**
+   * Retrieves a list of objects matching the criteria.
+   *
+   * 

**IAM Permissions**: + * + *

The authenticated user requires `storage.objects.list` IAM permission to use this method. To + * return object ACLs, the authenticated user must also have the `storage.objects.getIamPolicy` + * permission. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   ListObjectsRequest request =
+   *       ListObjectsRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setIncludeTrailingDelimiter(true)
+   *           .setPrefix("prefix-980110702")
+   *           .setVersions(true)
+   *           .setReadMask(FieldMask.newBuilder().build())
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setSoftDeleted(true)
+   *           .setIncludeFoldersAsPrefixes(true)
+   *           .setMatchGlob("matchGlob613636317")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   while (true) {
+   *     ListObjectsResponse response = storageClient.listObjectsCallable().call(request);
+   *     for (Object element : response.getObjectsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listObjectsCallable() { + return stub.listObjectsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rewrites a source object to a destination object. Optionally overrides metadata. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   RewriteObjectRequest request =
+   *       RewriteObjectRequest.newBuilder()
+   *           .setDestinationName("destinationName-1762755655")
+   *           .setDestinationBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setDestinationKmsKey(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setDestination(Object.newBuilder().build())
+   *           .setSourceBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setSourceObject("sourceObject1196439354")
+   *           .setSourceGeneration(1232209852)
+   *           .setRewriteToken("rewriteToken80654285")
+   *           .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setIfSourceGenerationMatch(-1427877280)
+   *           .setIfSourceGenerationNotMatch(1575612532)
+   *           .setIfSourceMetagenerationMatch(1143319909)
+   *           .setIfSourceMetagenerationNotMatch(1900822777)
+   *           .setMaxBytesRewrittenPerCall(1178170730)
+   *           .setCopySourceEncryptionAlgorithm("copySourceEncryptionAlgorithm-1524952548")
+   *           .setCopySourceEncryptionKeyBytes(ByteString.EMPTY)
+   *           .setCopySourceEncryptionKeySha256Bytes(ByteString.EMPTY)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .build();
+   *   RewriteResponse response = storageClient.rewriteObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final RewriteResponse rewriteObject(RewriteObjectRequest request) { + return rewriteObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rewrites a source object to a destination object. Optionally overrides metadata. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   RewriteObjectRequest request =
+   *       RewriteObjectRequest.newBuilder()
+   *           .setDestinationName("destinationName-1762755655")
+   *           .setDestinationBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setDestinationKmsKey(
+   *               CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]")
+   *                   .toString())
+   *           .setDestination(Object.newBuilder().build())
+   *           .setSourceBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setSourceObject("sourceObject1196439354")
+   *           .setSourceGeneration(1232209852)
+   *           .setRewriteToken("rewriteToken80654285")
+   *           .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814")
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setIfSourceGenerationMatch(-1427877280)
+   *           .setIfSourceGenerationNotMatch(1575612532)
+   *           .setIfSourceMetagenerationMatch(1143319909)
+   *           .setIfSourceMetagenerationNotMatch(1900822777)
+   *           .setMaxBytesRewrittenPerCall(1178170730)
+   *           .setCopySourceEncryptionAlgorithm("copySourceEncryptionAlgorithm-1524952548")
+   *           .setCopySourceEncryptionKeyBytes(ByteString.EMPTY)
+   *           .setCopySourceEncryptionKeySha256Bytes(ByteString.EMPTY)
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageClient.rewriteObjectCallable().futureCall(request);
+   *   // Do something.
+   *   RewriteResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable rewriteObjectCallable() { + return stub.rewriteObjectCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts a resumable write operation. This method is part of the Resumable upload feature. This + * allows you to upload large objects in multiple chunks, which is more resilient to network + * interruptions than a single upload. The validity duration of the write operation, and the + * consequences of it becoming invalid, are service-dependent. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.create` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   StartResumableWriteRequest request =
+   *       StartResumableWriteRequest.newBuilder()
+   *           .setWriteObjectSpec(WriteObjectSpec.newBuilder().build())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .build();
+   *   StartResumableWriteResponse response = storageClient.startResumableWrite(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StartResumableWriteResponse startResumableWrite(StartResumableWriteRequest request) { + return startResumableWriteCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts a resumable write operation. This method is part of the Resumable upload feature. This + * allows you to upload large objects in multiple chunks, which is more resilient to network + * interruptions than a single upload. The validity duration of the write operation, and the + * consequences of it becoming invalid, are service-dependent. + * + *

**IAM Permissions**: + * + *

Requires `storage.objects.create` IAM permission on the bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   StartResumableWriteRequest request =
+   *       StartResumableWriteRequest.newBuilder()
+   *           .setWriteObjectSpec(WriteObjectSpec.newBuilder().build())
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .setObjectChecksums(ObjectChecksums.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       storageClient.startResumableWriteCallable().futureCall(request);
+   *   // Do something.
+   *   StartResumableWriteResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + startResumableWriteCallable() { + return stub.startResumableWriteCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Determines the `persisted_size` of an object that is being written. This method is part of the + * resumable upload feature. The returned value is the size of the object that has been persisted + * so far. The value can be used as the `write_offset` for the next `Write()` call. + * + *

If the object does not exist, meaning if it was deleted, or the first `Write()` has not yet + * reached the service, this method returns the error `NOT_FOUND`. + * + *

This method is useful for clients that buffer data and need to know which data can be safely + * evicted. The client can call `QueryWriteStatus()` at any time to determine how much data has + * been logged for this object. For any sequence of `QueryWriteStatus()` calls for a given object + * name, the sequence of returned `persisted_size` values are non-decreasing. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String uploadId = "uploadId1563990780";
+   *   QueryWriteStatusResponse response = storageClient.queryWriteStatus(uploadId);
+   * }
+   * }
+ * + * @param uploadId Required. The name of the resume token for the object whose write status is + * being requested. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final QueryWriteStatusResponse queryWriteStatus(String uploadId) { + QueryWriteStatusRequest request = + QueryWriteStatusRequest.newBuilder().setUploadId(uploadId).build(); + return queryWriteStatus(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Determines the `persisted_size` of an object that is being written. This method is part of the + * resumable upload feature. The returned value is the size of the object that has been persisted + * so far. The value can be used as the `write_offset` for the next `Write()` call. + * + *

If the object does not exist, meaning if it was deleted, or the first `Write()` has not yet + * reached the service, this method returns the error `NOT_FOUND`. + * + *

This method is useful for clients that buffer data and need to know which data can be safely + * evicted. The client can call `QueryWriteStatus()` at any time to determine how much data has + * been logged for this object. For any sequence of `QueryWriteStatus()` calls for a given object + * name, the sequence of returned `persisted_size` values are non-decreasing. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   QueryWriteStatusRequest request =
+   *       QueryWriteStatusRequest.newBuilder()
+   *           .setUploadId("uploadId1563990780")
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   QueryWriteStatusResponse response = storageClient.queryWriteStatus(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final QueryWriteStatusResponse queryWriteStatus(QueryWriteStatusRequest request) { + return queryWriteStatusCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Determines the `persisted_size` of an object that is being written. This method is part of the + * resumable upload feature. The returned value is the size of the object that has been persisted + * so far. The value can be used as the `write_offset` for the next `Write()` call. + * + *

If the object does not exist, meaning if it was deleted, or the first `Write()` has not yet + * reached the service, this method returns the error `NOT_FOUND`. + * + *

This method is useful for clients that buffer data and need to know which data can be safely + * evicted. The client can call `QueryWriteStatus()` at any time to determine how much data has + * been logged for this object. For any sequence of `QueryWriteStatus()` calls for a given object + * name, the sequence of returned `persisted_size` values are non-decreasing. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   QueryWriteStatusRequest request =
+   *       QueryWriteStatusRequest.newBuilder()
+   *           .setUploadId("uploadId1563990780")
+   *           .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       storageClient.queryWriteStatusCallable().futureCall(request);
+   *   // Do something.
+   *   QueryWriteStatusResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + queryWriteStatusCallable() { + return stub.queryWriteStatusCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves the source object to the destination object in the same bucket. This operation moves a + * source object to a destination object in the same bucket by renaming the object. The move + * itself is an atomic transaction, ensuring all steps either complete successfully or no changes + * are made. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.move` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   String sourceObject = "sourceObject1196439354";
+   *   String destinationObject = "destinationObject-1761603347";
+   *   Object response = storageClient.moveObject(bucket, sourceObject, destinationObject);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param sourceObject Required. Name of the source object. + * @param destinationObject Required. Name of the destination object. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object moveObject(BucketName bucket, String sourceObject, String destinationObject) { + MoveObjectRequest request = + MoveObjectRequest.newBuilder() + .setBucket(bucket == null ? null : bucket.toString()) + .setSourceObject(sourceObject) + .setDestinationObject(destinationObject) + .build(); + return moveObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves the source object to the destination object in the same bucket. This operation moves a + * source object to a destination object in the same bucket by renaming the object. The move + * itself is an atomic transaction, ensuring all steps either complete successfully or no changes + * are made. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.move` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   String bucket = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   String sourceObject = "sourceObject1196439354";
+   *   String destinationObject = "destinationObject-1761603347";
+   *   Object response = storageClient.moveObject(bucket, sourceObject, destinationObject);
+   * }
+   * }
+ * + * @param bucket Required. Name of the bucket in which the object resides. + * @param sourceObject Required. Name of the source object. + * @param destinationObject Required. Name of the destination object. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object moveObject(String bucket, String sourceObject, String destinationObject) { + MoveObjectRequest request = + MoveObjectRequest.newBuilder() + .setBucket(bucket) + .setSourceObject(sourceObject) + .setDestinationObject(destinationObject) + .build(); + return moveObject(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves the source object to the destination object in the same bucket. This operation moves a + * source object to a destination object in the same bucket by renaming the object. The move + * itself is an atomic transaction, ensuring all steps either complete successfully or no changes + * are made. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.move` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   MoveObjectRequest request =
+   *       MoveObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setSourceObject("sourceObject1196439354")
+   *           .setDestinationObject("destinationObject-1761603347")
+   *           .setIfSourceGenerationMatch(-1427877280)
+   *           .setIfSourceGenerationNotMatch(1575612532)
+   *           .setIfSourceMetagenerationMatch(1143319909)
+   *           .setIfSourceMetagenerationNotMatch(1900822777)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .build();
+   *   Object response = storageClient.moveObject(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Object moveObject(MoveObjectRequest request) { + return moveObjectCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves the source object to the destination object in the same bucket. This operation moves a + * source object to a destination object in the same bucket by renaming the object. The move + * itself is an atomic transaction, ensuring all steps either complete successfully or no changes + * are made. + * + *

**IAM Permissions**: + * + *

Requires the following IAM permissions to use this method: + * + *

- `storage.objects.move` - `storage.objects.create` - `storage.objects.delete` (only + * required if overwriting an existing object) + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageClient storageClient = StorageClient.create()) {
+   *   MoveObjectRequest request =
+   *       MoveObjectRequest.newBuilder()
+   *           .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setSourceObject("sourceObject1196439354")
+   *           .setDestinationObject("destinationObject-1761603347")
+   *           .setIfSourceGenerationMatch(-1427877280)
+   *           .setIfSourceGenerationNotMatch(1575612532)
+   *           .setIfSourceMetagenerationMatch(1143319909)
+   *           .setIfSourceMetagenerationNotMatch(1900822777)
+   *           .setIfGenerationMatch(-1086241088)
+   *           .setIfGenerationNotMatch(1475720404)
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .build();
+   *   ApiFuture future = storageClient.moveObjectCallable().futureCall(request);
+   *   // Do something.
+   *   Object response = future.get();
+   * }
+   * }
+   */
+  public final UnaryCallable moveObjectCallable() {
+    return stub.moveObjectCallable();
+  }
+
+  @Override
+  public final void close() {
+    stub.close();
+  }
+
+  @Override
+  public void shutdown() {
+    stub.shutdown();
+  }
+
+  @Override
+  public boolean isShutdown() {
+    return stub.isShutdown();
+  }
+
+  @Override
+  public boolean isTerminated() {
+    return stub.isTerminated();
+  }
+
+  @Override
+  public void shutdownNow() {
+    stub.shutdownNow();
+  }
+
+  @Override
+  public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException {
+    return stub.awaitTermination(duration, unit);
+  }
+
+  public static class ListBucketsPagedResponse
+      extends AbstractPagedListResponse<
+          ListBucketsRequest,
+          ListBucketsResponse,
+          Bucket,
+          ListBucketsPage,
+          ListBucketsFixedSizeCollection> {
+
+    public static ApiFuture createAsync(
+        PageContext context,
+        ApiFuture futureResponse) {
+      ApiFuture futurePage =
+          ListBucketsPage.createEmptyPage().createPageAsync(context, futureResponse);
+      return ApiFutures.transform(
+          futurePage, input -> new ListBucketsPagedResponse(input), MoreExecutors.directExecutor());
+    }
+
+    private ListBucketsPagedResponse(ListBucketsPage page) {
+      super(page, ListBucketsFixedSizeCollection.createEmptyCollection());
+    }
+  }
+
+  public static class ListBucketsPage
+      extends AbstractPage {
+
+    private ListBucketsPage(
+        PageContext context,
+        ListBucketsResponse response) {
+      super(context, response);
+    }
+
+    private static ListBucketsPage createEmptyPage() {
+      return new ListBucketsPage(null, null);
+    }
+
+    @Override
+    protected ListBucketsPage createPage(
+        PageContext context,
+        ListBucketsResponse response) {
+      return new ListBucketsPage(context, response);
+    }
+
+    @Override
+    public ApiFuture createPageAsync(
+        PageContext context,
+        ApiFuture futureResponse) {
+      return super.createPageAsync(context, futureResponse);
+    }
+  }
+
+  public static class ListBucketsFixedSizeCollection
+      extends AbstractFixedSizeCollection<
+          ListBucketsRequest,
+          ListBucketsResponse,
+          Bucket,
+          ListBucketsPage,
+          ListBucketsFixedSizeCollection> {
+
+    private ListBucketsFixedSizeCollection(List pages, int collectionSize) {
+      super(pages, collectionSize);
+    }
+
+    private static ListBucketsFixedSizeCollection createEmptyCollection() {
+      return new ListBucketsFixedSizeCollection(null, 0);
+    }
+
+    @Override
+    protected ListBucketsFixedSizeCollection createCollection(
+        List pages, int collectionSize) {
+      return new ListBucketsFixedSizeCollection(pages, collectionSize);
+    }
+  }
+
+  public static class ListObjectsPagedResponse
+      extends AbstractPagedListResponse<
+          ListObjectsRequest,
+          ListObjectsResponse,
+          Object,
+          ListObjectsPage,
+          ListObjectsFixedSizeCollection> {
+
+    public static ApiFuture createAsync(
+        PageContext context,
+        ApiFuture futureResponse) {
+      ApiFuture futurePage =
+          ListObjectsPage.createEmptyPage().createPageAsync(context, futureResponse);
+      return ApiFutures.transform(
+          futurePage, input -> new ListObjectsPagedResponse(input), MoreExecutors.directExecutor());
+    }
+
+    private ListObjectsPagedResponse(ListObjectsPage page) {
+      super(page, ListObjectsFixedSizeCollection.createEmptyCollection());
+    }
+  }
+
+  public static class ListObjectsPage
+      extends AbstractPage {
+
+    private ListObjectsPage(
+        PageContext context,
+        ListObjectsResponse response) {
+      super(context, response);
+    }
+
+    private static ListObjectsPage createEmptyPage() {
+      return new ListObjectsPage(null, null);
+    }
+
+    @Override
+    protected ListObjectsPage createPage(
+        PageContext context,
+        ListObjectsResponse response) {
+      return new ListObjectsPage(context, response);
+    }
+
+    @Override
+    public ApiFuture createPageAsync(
+        PageContext context,
+        ApiFuture futureResponse) {
+      return super.createPageAsync(context, futureResponse);
+    }
+  }
+
+  public static class ListObjectsFixedSizeCollection
+      extends AbstractFixedSizeCollection<
+          ListObjectsRequest,
+          ListObjectsResponse,
+          Object,
+          ListObjectsPage,
+          ListObjectsFixedSizeCollection> {
+
+    private ListObjectsFixedSizeCollection(List pages, int collectionSize) {
+      super(pages, collectionSize);
+    }
+
+    private static ListObjectsFixedSizeCollection createEmptyCollection() {
+      return new ListObjectsFixedSizeCollection(null, 0);
+    }
+
+    @Override
+    protected ListObjectsFixedSizeCollection createCollection(
+        List pages, int collectionSize) {
+      return new ListObjectsFixedSizeCollection(pages, collectionSize);
+    }
+  }
+}
diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageSettings.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageSettings.java
new file mode 100644
index 000000000000..39158d843b94
--- /dev/null
+++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageSettings.java
@@ -0,0 +1,460 @@
+/*
+ * Copyright 2026 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.storage.v2;
+
+import static com.google.storage.v2.StorageClient.ListBucketsPagedResponse;
+import static com.google.storage.v2.StorageClient.ListObjectsPagedResponse;
+
+import com.google.api.core.ApiFunction;
+import com.google.api.gax.core.GoogleCredentialsProvider;
+import com.google.api.gax.core.InstantiatingExecutorProvider;
+import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
+import com.google.api.gax.rpc.ApiClientHeaderProvider;
+import com.google.api.gax.rpc.ClientContext;
+import com.google.api.gax.rpc.ClientSettings;
+import com.google.api.gax.rpc.PagedCallSettings;
+import com.google.api.gax.rpc.ServerStreamingCallSettings;
+import com.google.api.gax.rpc.StreamingCallSettings;
+import com.google.api.gax.rpc.TransportChannelProvider;
+import com.google.api.gax.rpc.UnaryCallSettings;
+import com.google.iam.v1.GetIamPolicyRequest;
+import com.google.iam.v1.Policy;
+import com.google.iam.v1.SetIamPolicyRequest;
+import com.google.iam.v1.TestIamPermissionsRequest;
+import com.google.iam.v1.TestIamPermissionsResponse;
+import com.google.protobuf.Empty;
+import com.google.storage.v2.stub.StorageStubSettings;
+import java.io.IOException;
+import java.util.List;
+import javax.annotation.Generated;
+
+// AUTO-GENERATED DOCUMENTATION AND CLASS.
+/**
+ * Settings class to configure an instance of {@link StorageClient}.
+ *
+ * 

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (storage.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of deleteBucket: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageSettings.Builder storageSettingsBuilder = StorageSettings.newBuilder();
+ * storageSettingsBuilder
+ *     .deleteBucketSettings()
+ *     .setRetrySettings(
+ *         storageSettingsBuilder
+ *             .deleteBucketSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * StorageSettings storageSettings = storageSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class StorageSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to deleteBucket. */ + public UnaryCallSettings deleteBucketSettings() { + return ((StorageStubSettings) getStubSettings()).deleteBucketSettings(); + } + + /** Returns the object with the settings used for calls to getBucket. */ + public UnaryCallSettings getBucketSettings() { + return ((StorageStubSettings) getStubSettings()).getBucketSettings(); + } + + /** Returns the object with the settings used for calls to createBucket. */ + public UnaryCallSettings createBucketSettings() { + return ((StorageStubSettings) getStubSettings()).createBucketSettings(); + } + + /** Returns the object with the settings used for calls to listBuckets. */ + public PagedCallSettings + listBucketsSettings() { + return ((StorageStubSettings) getStubSettings()).listBucketsSettings(); + } + + /** Returns the object with the settings used for calls to lockBucketRetentionPolicy. */ + public UnaryCallSettings + lockBucketRetentionPolicySettings() { + return ((StorageStubSettings) getStubSettings()).lockBucketRetentionPolicySettings(); + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return ((StorageStubSettings) getStubSettings()).getIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return ((StorageStubSettings) getStubSettings()).setIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return ((StorageStubSettings) getStubSettings()).testIamPermissionsSettings(); + } + + /** Returns the object with the settings used for calls to updateBucket. */ + public UnaryCallSettings updateBucketSettings() { + return ((StorageStubSettings) getStubSettings()).updateBucketSettings(); + } + + /** Returns the object with the settings used for calls to composeObject. */ + public UnaryCallSettings composeObjectSettings() { + return ((StorageStubSettings) getStubSettings()).composeObjectSettings(); + } + + /** Returns the object with the settings used for calls to deleteObject. */ + public UnaryCallSettings deleteObjectSettings() { + return ((StorageStubSettings) getStubSettings()).deleteObjectSettings(); + } + + /** Returns the object with the settings used for calls to restoreObject. */ + public UnaryCallSettings restoreObjectSettings() { + return ((StorageStubSettings) getStubSettings()).restoreObjectSettings(); + } + + /** Returns the object with the settings used for calls to cancelResumableWrite. */ + public UnaryCallSettings + cancelResumableWriteSettings() { + return ((StorageStubSettings) getStubSettings()).cancelResumableWriteSettings(); + } + + /** Returns the object with the settings used for calls to getObject. */ + public UnaryCallSettings getObjectSettings() { + return ((StorageStubSettings) getStubSettings()).getObjectSettings(); + } + + /** Returns the object with the settings used for calls to readObject. */ + public ServerStreamingCallSettings readObjectSettings() { + return ((StorageStubSettings) getStubSettings()).readObjectSettings(); + } + + /** Returns the object with the settings used for calls to bidiReadObject. */ + public StreamingCallSettings + bidiReadObjectSettings() { + return ((StorageStubSettings) getStubSettings()).bidiReadObjectSettings(); + } + + /** Returns the object with the settings used for calls to updateObject. */ + public UnaryCallSettings updateObjectSettings() { + return ((StorageStubSettings) getStubSettings()).updateObjectSettings(); + } + + /** Returns the object with the settings used for calls to writeObject. */ + public StreamingCallSettings writeObjectSettings() { + return ((StorageStubSettings) getStubSettings()).writeObjectSettings(); + } + + /** Returns the object with the settings used for calls to bidiWriteObject. */ + public StreamingCallSettings + bidiWriteObjectSettings() { + return ((StorageStubSettings) getStubSettings()).bidiWriteObjectSettings(); + } + + /** Returns the object with the settings used for calls to listObjects. */ + public PagedCallSettings + listObjectsSettings() { + return ((StorageStubSettings) getStubSettings()).listObjectsSettings(); + } + + /** Returns the object with the settings used for calls to rewriteObject. */ + public UnaryCallSettings rewriteObjectSettings() { + return ((StorageStubSettings) getStubSettings()).rewriteObjectSettings(); + } + + /** Returns the object with the settings used for calls to startResumableWrite. */ + public UnaryCallSettings + startResumableWriteSettings() { + return ((StorageStubSettings) getStubSettings()).startResumableWriteSettings(); + } + + /** Returns the object with the settings used for calls to queryWriteStatus. */ + public UnaryCallSettings + queryWriteStatusSettings() { + return ((StorageStubSettings) getStubSettings()).queryWriteStatusSettings(); + } + + /** Returns the object with the settings used for calls to moveObject. */ + public UnaryCallSettings moveObjectSettings() { + return ((StorageStubSettings) getStubSettings()).moveObjectSettings(); + } + + public static final StorageSettings create(StorageStubSettings stub) throws IOException { + return new StorageSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return StorageStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return StorageStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return StorageStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return StorageStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return StorageStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return StorageStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return StorageStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected StorageSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for StorageSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(StorageStubSettings.newBuilder(clientContext)); + } + + protected Builder(StorageSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(StorageStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(StorageStubSettings.newBuilder()); + } + + public StorageStubSettings.Builder getStubSettingsBuilder() { + return ((StorageStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to deleteBucket. */ + public UnaryCallSettings.Builder deleteBucketSettings() { + return getStubSettingsBuilder().deleteBucketSettings(); + } + + /** Returns the builder for the settings used for calls to getBucket. */ + public UnaryCallSettings.Builder getBucketSettings() { + return getStubSettingsBuilder().getBucketSettings(); + } + + /** Returns the builder for the settings used for calls to createBucket. */ + public UnaryCallSettings.Builder createBucketSettings() { + return getStubSettingsBuilder().createBucketSettings(); + } + + /** Returns the builder for the settings used for calls to listBuckets. */ + public PagedCallSettings.Builder< + ListBucketsRequest, ListBucketsResponse, ListBucketsPagedResponse> + listBucketsSettings() { + return getStubSettingsBuilder().listBucketsSettings(); + } + + /** Returns the builder for the settings used for calls to lockBucketRetentionPolicy. */ + public UnaryCallSettings.Builder + lockBucketRetentionPolicySettings() { + return getStubSettingsBuilder().lockBucketRetentionPolicySettings(); + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getStubSettingsBuilder().getIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return getStubSettingsBuilder().setIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return getStubSettingsBuilder().testIamPermissionsSettings(); + } + + /** Returns the builder for the settings used for calls to updateBucket. */ + public UnaryCallSettings.Builder updateBucketSettings() { + return getStubSettingsBuilder().updateBucketSettings(); + } + + /** Returns the builder for the settings used for calls to composeObject. */ + public UnaryCallSettings.Builder composeObjectSettings() { + return getStubSettingsBuilder().composeObjectSettings(); + } + + /** Returns the builder for the settings used for calls to deleteObject. */ + public UnaryCallSettings.Builder deleteObjectSettings() { + return getStubSettingsBuilder().deleteObjectSettings(); + } + + /** Returns the builder for the settings used for calls to restoreObject. */ + public UnaryCallSettings.Builder restoreObjectSettings() { + return getStubSettingsBuilder().restoreObjectSettings(); + } + + /** Returns the builder for the settings used for calls to cancelResumableWrite. */ + public UnaryCallSettings.Builder + cancelResumableWriteSettings() { + return getStubSettingsBuilder().cancelResumableWriteSettings(); + } + + /** Returns the builder for the settings used for calls to getObject. */ + public UnaryCallSettings.Builder getObjectSettings() { + return getStubSettingsBuilder().getObjectSettings(); + } + + /** Returns the builder for the settings used for calls to readObject. */ + public ServerStreamingCallSettings.Builder + readObjectSettings() { + return getStubSettingsBuilder().readObjectSettings(); + } + + /** Returns the builder for the settings used for calls to bidiReadObject. */ + public StreamingCallSettings.Builder + bidiReadObjectSettings() { + return getStubSettingsBuilder().bidiReadObjectSettings(); + } + + /** Returns the builder for the settings used for calls to updateObject. */ + public UnaryCallSettings.Builder updateObjectSettings() { + return getStubSettingsBuilder().updateObjectSettings(); + } + + /** Returns the builder for the settings used for calls to writeObject. */ + public StreamingCallSettings.Builder + writeObjectSettings() { + return getStubSettingsBuilder().writeObjectSettings(); + } + + /** Returns the builder for the settings used for calls to bidiWriteObject. */ + public StreamingCallSettings.Builder + bidiWriteObjectSettings() { + return getStubSettingsBuilder().bidiWriteObjectSettings(); + } + + /** Returns the builder for the settings used for calls to listObjects. */ + public PagedCallSettings.Builder< + ListObjectsRequest, ListObjectsResponse, ListObjectsPagedResponse> + listObjectsSettings() { + return getStubSettingsBuilder().listObjectsSettings(); + } + + /** Returns the builder for the settings used for calls to rewriteObject. */ + public UnaryCallSettings.Builder + rewriteObjectSettings() { + return getStubSettingsBuilder().rewriteObjectSettings(); + } + + /** Returns the builder for the settings used for calls to startResumableWrite. */ + public UnaryCallSettings.Builder + startResumableWriteSettings() { + return getStubSettingsBuilder().startResumableWriteSettings(); + } + + /** Returns the builder for the settings used for calls to queryWriteStatus. */ + public UnaryCallSettings.Builder + queryWriteStatusSettings() { + return getStubSettingsBuilder().queryWriteStatusSettings(); + } + + /** Returns the builder for the settings used for calls to moveObject. */ + public UnaryCallSettings.Builder moveObjectSettings() { + return getStubSettingsBuilder().moveObjectSettings(); + } + + @Override + public StorageSettings build() throws IOException { + return new StorageSettings(this); + } + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/package-info.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/package-info.java new file mode 100644 index 000000000000..8bd261cb4ac6 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/package-info.java @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= StorageClient ======================= + * + *

Service Description: ## API Overview and Naming Syntax + * + *

The Cloud Storage gRPC API allows applications to read and write data through the abstractions + * of buckets and objects. For a description of these abstractions please see [Cloud Storage + * documentation](https://cloud.google.com/storage/docs). + * + *

Resources are named as follows: + * + *

- Projects are referred to as they are defined by the Resource Manager API, using strings like + * `projects/123456` or `projects/my-string-id`. - Buckets are named using string names of the form: + * `projects/{project}/buckets/{bucket}`. For globally unique buckets, `_` might be substituted for + * the project. - Objects are uniquely identified by their name along with the name of the bucket + * they belong to, as separate strings in this API. For example: + * + *

``` ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' object: 'my-object' } ``` + * + *

Note that object names can contain `/` characters, which are treated as any other character + * (no special directory semantics). + * + *

Sample for StorageClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (StorageClient storageClient = StorageClient.create()) {
+ *   BucketName name = BucketName.of("[PROJECT]", "[BUCKET]");
+ *   storageClient.deleteBucket(name);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.storage.v2; + +import javax.annotation.Generated; diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageCallableFactory.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageCallableFactory.java new file mode 100644 index 000000000000..272e2c8c22c6 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the Storage service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcStorageCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java new file mode 100644 index 000000000000..096ef7ae237a --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java @@ -0,0 +1,1011 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2.stub; + +import static com.google.storage.v2.StorageClient.ListBucketsPagedResponse; +import static com.google.storage.v2.StorageClient.ListObjectsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.pathtemplate.PathTemplate; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.CancelResumableWriteRequest; +import com.google.storage.v2.CancelResumableWriteResponse; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListBucketsResponse; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.ListObjectsResponse; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.RewriteResponse; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the Storage service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcStorageStub extends StorageStub { + private static final MethodDescriptor deleteBucketMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/DeleteBucket") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteBucketRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getBucketMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/GetBucket") + .setRequestMarshaller(ProtoUtils.marshaller(GetBucketRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor createBucketMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/CreateBucket") + .setRequestMarshaller(ProtoUtils.marshaller(CreateBucketRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listBucketsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/ListBuckets") + .setRequestMarshaller(ProtoUtils.marshaller(ListBucketsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBucketsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + lockBucketRetentionPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/LockBucketRetentionPolicy") + .setRequestMarshaller( + ProtoUtils.marshaller(LockBucketRetentionPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/GetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor setIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/SetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + testIamPermissionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/TestIamPermissions") + .setRequestMarshaller( + ProtoUtils.marshaller(TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(TestIamPermissionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor updateBucketMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/UpdateBucket") + .setRequestMarshaller(ProtoUtils.marshaller(UpdateBucketRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + composeObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/ComposeObject") + .setRequestMarshaller( + ProtoUtils.marshaller(ComposeObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor deleteObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/DeleteObject") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + restoreObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/RestoreObject") + .setRequestMarshaller( + ProtoUtils.marshaller(RestoreObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + cancelResumableWriteMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/CancelResumableWrite") + .setRequestMarshaller( + ProtoUtils.marshaller(CancelResumableWriteRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(CancelResumableWriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/GetObject") + .setRequestMarshaller(ProtoUtils.marshaller(GetObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + readObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.storage.v2.Storage/ReadObject") + .setRequestMarshaller(ProtoUtils.marshaller(ReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + bidiReadObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.storage.v2.Storage/BidiReadObject") + .setRequestMarshaller( + ProtoUtils.marshaller(BidiReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BidiReadObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor updateObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/UpdateObject") + .setRequestMarshaller(ProtoUtils.marshaller(UpdateObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + writeObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.CLIENT_STREAMING) + .setFullMethodName("google.storage.v2.Storage/WriteObject") + .setRequestMarshaller(ProtoUtils.marshaller(WriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(WriteObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + bidiWriteObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.storage.v2.Storage/BidiWriteObject") + .setRequestMarshaller( + ProtoUtils.marshaller(BidiWriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BidiWriteObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listObjectsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/ListObjects") + .setRequestMarshaller(ProtoUtils.marshaller(ListObjectsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListObjectsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + rewriteObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/RewriteObject") + .setRequestMarshaller( + ProtoUtils.marshaller(RewriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(RewriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + startResumableWriteMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/StartResumableWrite") + .setRequestMarshaller( + ProtoUtils.marshaller(StartResumableWriteRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(StartResumableWriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + queryWriteStatusMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/QueryWriteStatus") + .setRequestMarshaller( + ProtoUtils.marshaller(QueryWriteStatusRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(QueryWriteStatusResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor moveObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.v2.Storage/MoveObject") + .setRequestMarshaller(ProtoUtils.marshaller(MoveObjectRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable deleteBucketCallable; + private final UnaryCallable getBucketCallable; + private final UnaryCallable createBucketCallable; + private final UnaryCallable listBucketsCallable; + private final UnaryCallable + listBucketsPagedCallable; + private final UnaryCallable + lockBucketRetentionPolicyCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + private final UnaryCallable updateBucketCallable; + private final UnaryCallable composeObjectCallable; + private final UnaryCallable deleteObjectCallable; + private final UnaryCallable restoreObjectCallable; + private final UnaryCallable + cancelResumableWriteCallable; + private final UnaryCallable getObjectCallable; + private final ServerStreamingCallable readObjectCallable; + private final BidiStreamingCallable + bidiReadObjectCallable; + private final UnaryCallable updateObjectCallable; + private final ClientStreamingCallable + writeObjectCallable; + private final BidiStreamingCallable + bidiWriteObjectCallable; + private final UnaryCallable listObjectsCallable; + private final UnaryCallable + listObjectsPagedCallable; + private final UnaryCallable rewriteObjectCallable; + private final UnaryCallable + startResumableWriteCallable; + private final UnaryCallable + queryWriteStatusCallable; + private final UnaryCallable moveObjectCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + private static final PathTemplate DELETE_BUCKET_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate GET_BUCKET_0_PATH_TEMPLATE = PathTemplate.create("{bucket=**}"); + private static final PathTemplate CREATE_BUCKET_0_PATH_TEMPLATE = + PathTemplate.create("{project=**}"); + private static final PathTemplate CREATE_BUCKET_1_PATH_TEMPLATE = + PathTemplate.create("{project=**}"); + private static final PathTemplate LIST_BUCKETS_0_PATH_TEMPLATE = + PathTemplate.create("{project=**}"); + private static final PathTemplate LOCK_BUCKET_RETENTION_POLICY_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate GET_IAM_POLICY_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate GET_IAM_POLICY_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate SET_IAM_POLICY_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate SET_IAM_POLICY_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate TEST_IAM_PERMISSIONS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate TEST_IAM_PERMISSIONS_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/objects/**"); + private static final PathTemplate TEST_IAM_PERMISSIONS_2_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/managedFolders/**"); + private static final PathTemplate UPDATE_BUCKET_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate COMPOSE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate DELETE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate RESTORE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate CANCEL_RESUMABLE_WRITE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate GET_OBJECT_0_PATH_TEMPLATE = PathTemplate.create("{bucket=**}"); + private static final PathTemplate READ_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate BIDI_READ_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate UPDATE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate LIST_OBJECTS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate REWRITE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{source_bucket=**}"); + private static final PathTemplate REWRITE_OBJECT_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate START_RESUMABLE_WRITE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate QUERY_WRITE_STATUS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate MOVE_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + + public static final GrpcStorageStub create(StorageStubSettings settings) throws IOException { + return new GrpcStorageStub(settings, ClientContext.create(settings)); + } + + public static final GrpcStorageStub create(ClientContext clientContext) throws IOException { + return new GrpcStorageStub(StorageStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcStorageStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcStorageStub( + StorageStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcStorageStub, using the given settings. This is protected so that + * it is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected GrpcStorageStub(StorageStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcStorageCallableFactory()); + } + + /** + * Constructs an instance of GrpcStorageStub, using the given settings. This is protected so that + * it is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected GrpcStorageStub( + StorageStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings deleteBucketTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteBucketMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", DELETE_BUCKET_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings getBucketTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getBucketMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", GET_BUCKET_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings createBucketTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createBucketMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "project", CREATE_BUCKET_0_PATH_TEMPLATE); + if (request.getBucket() != null) { + builder.add( + request.getBucket().getProject(), "project", CREATE_BUCKET_1_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings listBucketsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBucketsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "project", LIST_BUCKETS_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + lockBucketRetentionPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(lockBucketRetentionPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getBucket(), + "bucket", + LOCK_BUCKET_RETENTION_POLICY_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings getIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getResource(), "bucket", GET_IAM_POLICY_0_PATH_TEMPLATE); + builder.add(request.getResource(), "bucket", GET_IAM_POLICY_1_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings setIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getResource(), "bucket", SET_IAM_POLICY_0_PATH_TEMPLATE); + builder.add(request.getResource(), "bucket", SET_IAM_POLICY_1_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + testIamPermissionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_0_PATH_TEMPLATE); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_1_PATH_TEMPLATE); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_2_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings updateBucketTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateBucketMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getBucket() != null) { + builder.add( + request.getBucket().getName(), "bucket", UPDATE_BUCKET_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings composeObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(composeObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getDestination() != null) { + builder.add( + request.getDestination().getBucket(), + "bucket", + COMPOSE_OBJECT_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings deleteObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getBucket(), "bucket", DELETE_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings restoreObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(restoreObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getBucket(), "bucket", RESTORE_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + cancelResumableWriteTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(cancelResumableWriteMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getUploadId(), "bucket", CANCEL_RESUMABLE_WRITE_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings getObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getBucket(), "bucket", GET_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings readObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getBucket(), "bucket", READ_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + bidiReadObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(bidiReadObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getReadObjectSpec() != null) { + builder.add( + request.getReadObjectSpec().getBucket(), + "bucket", + BIDI_READ_OBJECT_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings updateObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getObject() != null) { + builder.add( + request.getObject().getBucket(), "bucket", UPDATE_OBJECT_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings writeObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(writeObjectMethodDescriptor) + .build(); + GrpcCallSettings + bidiWriteObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(bidiWriteObjectMethodDescriptor) + .build(); + GrpcCallSettings listObjectsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listObjectsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "bucket", LIST_OBJECTS_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings rewriteObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(rewriteObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getSourceBucket(), "source_bucket", REWRITE_OBJECT_0_PATH_TEMPLATE); + builder.add( + request.getDestinationBucket(), "bucket", REWRITE_OBJECT_1_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + startResumableWriteTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(startResumableWriteMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getWriteObjectSpec() != null + && request.getWriteObjectSpec().getResource() != null) { + builder.add( + request.getWriteObjectSpec().getResource().getBucket(), + "bucket", + START_RESUMABLE_WRITE_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .build(); + GrpcCallSettings + queryWriteStatusTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(queryWriteStatusMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getUploadId(), "bucket", QUERY_WRITE_STATUS_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings moveObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(moveObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getBucket(), "bucket", MOVE_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + + this.deleteBucketCallable = + callableFactory.createUnaryCallable( + deleteBucketTransportSettings, settings.deleteBucketSettings(), clientContext); + this.getBucketCallable = + callableFactory.createUnaryCallable( + getBucketTransportSettings, settings.getBucketSettings(), clientContext); + this.createBucketCallable = + callableFactory.createUnaryCallable( + createBucketTransportSettings, settings.createBucketSettings(), clientContext); + this.listBucketsCallable = + callableFactory.createUnaryCallable( + listBucketsTransportSettings, settings.listBucketsSettings(), clientContext); + this.listBucketsPagedCallable = + callableFactory.createPagedCallable( + listBucketsTransportSettings, settings.listBucketsSettings(), clientContext); + this.lockBucketRetentionPolicyCallable = + callableFactory.createUnaryCallable( + lockBucketRetentionPolicyTransportSettings, + settings.lockBucketRetentionPolicySettings(), + clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + this.updateBucketCallable = + callableFactory.createUnaryCallable( + updateBucketTransportSettings, settings.updateBucketSettings(), clientContext); + this.composeObjectCallable = + callableFactory.createUnaryCallable( + composeObjectTransportSettings, settings.composeObjectSettings(), clientContext); + this.deleteObjectCallable = + callableFactory.createUnaryCallable( + deleteObjectTransportSettings, settings.deleteObjectSettings(), clientContext); + this.restoreObjectCallable = + callableFactory.createUnaryCallable( + restoreObjectTransportSettings, settings.restoreObjectSettings(), clientContext); + this.cancelResumableWriteCallable = + callableFactory.createUnaryCallable( + cancelResumableWriteTransportSettings, + settings.cancelResumableWriteSettings(), + clientContext); + this.getObjectCallable = + callableFactory.createUnaryCallable( + getObjectTransportSettings, settings.getObjectSettings(), clientContext); + this.readObjectCallable = + callableFactory.createServerStreamingCallable( + readObjectTransportSettings, settings.readObjectSettings(), clientContext); + this.bidiReadObjectCallable = + callableFactory.createBidiStreamingCallable( + bidiReadObjectTransportSettings, settings.bidiReadObjectSettings(), clientContext); + this.updateObjectCallable = + callableFactory.createUnaryCallable( + updateObjectTransportSettings, settings.updateObjectSettings(), clientContext); + this.writeObjectCallable = + callableFactory.createClientStreamingCallable( + writeObjectTransportSettings, settings.writeObjectSettings(), clientContext); + this.bidiWriteObjectCallable = + callableFactory.createBidiStreamingCallable( + bidiWriteObjectTransportSettings, settings.bidiWriteObjectSettings(), clientContext); + this.listObjectsCallable = + callableFactory.createUnaryCallable( + listObjectsTransportSettings, settings.listObjectsSettings(), clientContext); + this.listObjectsPagedCallable = + callableFactory.createPagedCallable( + listObjectsTransportSettings, settings.listObjectsSettings(), clientContext); + this.rewriteObjectCallable = + callableFactory.createUnaryCallable( + rewriteObjectTransportSettings, settings.rewriteObjectSettings(), clientContext); + this.startResumableWriteCallable = + callableFactory.createUnaryCallable( + startResumableWriteTransportSettings, + settings.startResumableWriteSettings(), + clientContext); + this.queryWriteStatusCallable = + callableFactory.createUnaryCallable( + queryWriteStatusTransportSettings, settings.queryWriteStatusSettings(), clientContext); + this.moveObjectCallable = + callableFactory.createUnaryCallable( + moveObjectTransportSettings, settings.moveObjectSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable deleteBucketCallable() { + return deleteBucketCallable; + } + + @Override + public UnaryCallable getBucketCallable() { + return getBucketCallable; + } + + @Override + public UnaryCallable createBucketCallable() { + return createBucketCallable; + } + + @Override + public UnaryCallable listBucketsCallable() { + return listBucketsCallable; + } + + @Override + public UnaryCallable listBucketsPagedCallable() { + return listBucketsPagedCallable; + } + + @Override + public UnaryCallable + lockBucketRetentionPolicyCallable() { + return lockBucketRetentionPolicyCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public UnaryCallable updateBucketCallable() { + return updateBucketCallable; + } + + @Override + public UnaryCallable composeObjectCallable() { + return composeObjectCallable; + } + + @Override + public UnaryCallable deleteObjectCallable() { + return deleteObjectCallable; + } + + @Override + public UnaryCallable restoreObjectCallable() { + return restoreObjectCallable; + } + + @Override + public UnaryCallable + cancelResumableWriteCallable() { + return cancelResumableWriteCallable; + } + + @Override + public UnaryCallable getObjectCallable() { + return getObjectCallable; + } + + @Override + public ServerStreamingCallable readObjectCallable() { + return readObjectCallable; + } + + @Override + public BidiStreamingCallable + bidiReadObjectCallable() { + return bidiReadObjectCallable; + } + + @Override + public UnaryCallable updateObjectCallable() { + return updateObjectCallable; + } + + @Override + public ClientStreamingCallable writeObjectCallable() { + return writeObjectCallable; + } + + @Override + public BidiStreamingCallable + bidiWriteObjectCallable() { + return bidiWriteObjectCallable; + } + + @Override + public UnaryCallable listObjectsCallable() { + return listObjectsCallable; + } + + @Override + public UnaryCallable listObjectsPagedCallable() { + return listObjectsPagedCallable; + } + + @Override + public UnaryCallable rewriteObjectCallable() { + return rewriteObjectCallable; + } + + @Override + public UnaryCallable + startResumableWriteCallable() { + return startResumableWriteCallable; + } + + @Override + public UnaryCallable + queryWriteStatusCallable() { + return queryWriteStatusCallable; + } + + @Override + public UnaryCallable moveObjectCallable() { + return moveObjectCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStub.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStub.java new file mode 100644 index 000000000000..2def7e8670a3 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStub.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2.stub; + +import static com.google.storage.v2.StorageClient.ListBucketsPagedResponse; +import static com.google.storage.v2.StorageClient.ListObjectsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.Empty; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.CancelResumableWriteRequest; +import com.google.storage.v2.CancelResumableWriteResponse; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListBucketsResponse; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.ListObjectsResponse; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.RewriteResponse; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the Storage service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class StorageStub implements BackgroundResource { + + public UnaryCallable deleteBucketCallable() { + throw new UnsupportedOperationException("Not implemented: deleteBucketCallable()"); + } + + public UnaryCallable getBucketCallable() { + throw new UnsupportedOperationException("Not implemented: getBucketCallable()"); + } + + public UnaryCallable createBucketCallable() { + throw new UnsupportedOperationException("Not implemented: createBucketCallable()"); + } + + public UnaryCallable listBucketsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBucketsPagedCallable()"); + } + + public UnaryCallable listBucketsCallable() { + throw new UnsupportedOperationException("Not implemented: listBucketsCallable()"); + } + + public UnaryCallable + lockBucketRetentionPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: lockBucketRetentionPolicyCallable()"); + } + + public UnaryCallable getIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: getIamPolicyCallable()"); + } + + public UnaryCallable setIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: setIamPolicyCallable()"); + } + + public UnaryCallable + testIamPermissionsCallable() { + throw new UnsupportedOperationException("Not implemented: testIamPermissionsCallable()"); + } + + public UnaryCallable updateBucketCallable() { + throw new UnsupportedOperationException("Not implemented: updateBucketCallable()"); + } + + public UnaryCallable composeObjectCallable() { + throw new UnsupportedOperationException("Not implemented: composeObjectCallable()"); + } + + public UnaryCallable deleteObjectCallable() { + throw new UnsupportedOperationException("Not implemented: deleteObjectCallable()"); + } + + public UnaryCallable restoreObjectCallable() { + throw new UnsupportedOperationException("Not implemented: restoreObjectCallable()"); + } + + public UnaryCallable + cancelResumableWriteCallable() { + throw new UnsupportedOperationException("Not implemented: cancelResumableWriteCallable()"); + } + + public UnaryCallable getObjectCallable() { + throw new UnsupportedOperationException("Not implemented: getObjectCallable()"); + } + + public ServerStreamingCallable readObjectCallable() { + throw new UnsupportedOperationException("Not implemented: readObjectCallable()"); + } + + public BidiStreamingCallable + bidiReadObjectCallable() { + throw new UnsupportedOperationException("Not implemented: bidiReadObjectCallable()"); + } + + public UnaryCallable updateObjectCallable() { + throw new UnsupportedOperationException("Not implemented: updateObjectCallable()"); + } + + public ClientStreamingCallable writeObjectCallable() { + throw new UnsupportedOperationException("Not implemented: writeObjectCallable()"); + } + + public BidiStreamingCallable + bidiWriteObjectCallable() { + throw new UnsupportedOperationException("Not implemented: bidiWriteObjectCallable()"); + } + + public UnaryCallable listObjectsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listObjectsPagedCallable()"); + } + + public UnaryCallable listObjectsCallable() { + throw new UnsupportedOperationException("Not implemented: listObjectsCallable()"); + } + + public UnaryCallable rewriteObjectCallable() { + throw new UnsupportedOperationException("Not implemented: rewriteObjectCallable()"); + } + + public UnaryCallable + startResumableWriteCallable() { + throw new UnsupportedOperationException("Not implemented: startResumableWriteCallable()"); + } + + public UnaryCallable + queryWriteStatusCallable() { + throw new UnsupportedOperationException("Not implemented: queryWriteStatusCallable()"); + } + + public UnaryCallable moveObjectCallable() { + throw new UnsupportedOperationException("Not implemented: moveObjectCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStubSettings.java b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStubSettings.java new file mode 100644 index 000000000000..ff0eaa44ebf2 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/StorageStubSettings.java @@ -0,0 +1,986 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2.stub; + +import static com.google.storage.v2.StorageClient.ListBucketsPagedResponse; +import static com.google.storage.v2.StorageClient.ListObjectsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.Empty; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.CancelResumableWriteRequest; +import com.google.storage.v2.CancelResumableWriteResponse; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListBucketsResponse; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.ListObjectsResponse; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.RewriteResponse; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link StorageStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (storage.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of deleteBucket: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageStubSettings.Builder storageSettingsBuilder = StorageStubSettings.newBuilder();
+ * storageSettingsBuilder
+ *     .deleteBucketSettings()
+ *     .setRetrySettings(
+ *         storageSettingsBuilder
+ *             .deleteBucketSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * StorageStubSettings storageSettings = storageSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class StorageStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/cloud-platform") + .add("https://www.googleapis.com/auth/cloud-platform.read-only") + .add("https://www.googleapis.com/auth/devstorage.full_control") + .add("https://www.googleapis.com/auth/devstorage.read_only") + .add("https://www.googleapis.com/auth/devstorage.read_write") + .build(); + + private final UnaryCallSettings deleteBucketSettings; + private final UnaryCallSettings getBucketSettings; + private final UnaryCallSettings createBucketSettings; + private final PagedCallSettings + listBucketsSettings; + private final UnaryCallSettings + lockBucketRetentionPolicySettings; + private final UnaryCallSettings getIamPolicySettings; + private final UnaryCallSettings setIamPolicySettings; + private final UnaryCallSettings + testIamPermissionsSettings; + private final UnaryCallSettings updateBucketSettings; + private final UnaryCallSettings composeObjectSettings; + private final UnaryCallSettings deleteObjectSettings; + private final UnaryCallSettings restoreObjectSettings; + private final UnaryCallSettings + cancelResumableWriteSettings; + private final UnaryCallSettings getObjectSettings; + private final ServerStreamingCallSettings + readObjectSettings; + private final StreamingCallSettings + bidiReadObjectSettings; + private final UnaryCallSettings updateObjectSettings; + private final StreamingCallSettings writeObjectSettings; + private final StreamingCallSettings + bidiWriteObjectSettings; + private final PagedCallSettings + listObjectsSettings; + private final UnaryCallSettings rewriteObjectSettings; + private final UnaryCallSettings + startResumableWriteSettings; + private final UnaryCallSettings + queryWriteStatusSettings; + private final UnaryCallSettings moveObjectSettings; + + private static final PagedListDescriptor + LIST_BUCKETS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBucketsRequest injectToken(ListBucketsRequest payload, String token) { + return ListBucketsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBucketsRequest injectPageSize(ListBucketsRequest payload, int pageSize) { + return ListBucketsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBucketsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBucketsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBucketsResponse payload) { + return payload.getBucketsList(); + } + }; + + private static final PagedListDescriptor + LIST_OBJECTS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListObjectsRequest injectToken(ListObjectsRequest payload, String token) { + return ListObjectsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListObjectsRequest injectPageSize(ListObjectsRequest payload, int pageSize) { + return ListObjectsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListObjectsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListObjectsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListObjectsResponse payload) { + return payload.getObjectsList(); + } + }; + + private static final PagedListResponseFactory< + ListBucketsRequest, ListBucketsResponse, ListBucketsPagedResponse> + LIST_BUCKETS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBucketsRequest, ListBucketsResponse, ListBucketsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBucketsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_BUCKETS_PAGE_STR_DESC, request, context); + return ListBucketsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListObjectsRequest, ListObjectsResponse, ListObjectsPagedResponse> + LIST_OBJECTS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListObjectsRequest, ListObjectsResponse, ListObjectsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListObjectsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_OBJECTS_PAGE_STR_DESC, request, context); + return ListObjectsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to deleteBucket. */ + public UnaryCallSettings deleteBucketSettings() { + return deleteBucketSettings; + } + + /** Returns the object with the settings used for calls to getBucket. */ + public UnaryCallSettings getBucketSettings() { + return getBucketSettings; + } + + /** Returns the object with the settings used for calls to createBucket. */ + public UnaryCallSettings createBucketSettings() { + return createBucketSettings; + } + + /** Returns the object with the settings used for calls to listBuckets. */ + public PagedCallSettings + listBucketsSettings() { + return listBucketsSettings; + } + + /** Returns the object with the settings used for calls to lockBucketRetentionPolicy. */ + public UnaryCallSettings + lockBucketRetentionPolicySettings() { + return lockBucketRetentionPolicySettings; + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the object with the settings used for calls to updateBucket. */ + public UnaryCallSettings updateBucketSettings() { + return updateBucketSettings; + } + + /** Returns the object with the settings used for calls to composeObject. */ + public UnaryCallSettings composeObjectSettings() { + return composeObjectSettings; + } + + /** Returns the object with the settings used for calls to deleteObject. */ + public UnaryCallSettings deleteObjectSettings() { + return deleteObjectSettings; + } + + /** Returns the object with the settings used for calls to restoreObject. */ + public UnaryCallSettings restoreObjectSettings() { + return restoreObjectSettings; + } + + /** Returns the object with the settings used for calls to cancelResumableWrite. */ + public UnaryCallSettings + cancelResumableWriteSettings() { + return cancelResumableWriteSettings; + } + + /** Returns the object with the settings used for calls to getObject. */ + public UnaryCallSettings getObjectSettings() { + return getObjectSettings; + } + + /** Returns the object with the settings used for calls to readObject. */ + public ServerStreamingCallSettings readObjectSettings() { + return readObjectSettings; + } + + /** Returns the object with the settings used for calls to bidiReadObject. */ + public StreamingCallSettings + bidiReadObjectSettings() { + return bidiReadObjectSettings; + } + + /** Returns the object with the settings used for calls to updateObject. */ + public UnaryCallSettings updateObjectSettings() { + return updateObjectSettings; + } + + /** Returns the object with the settings used for calls to writeObject. */ + public StreamingCallSettings writeObjectSettings() { + return writeObjectSettings; + } + + /** Returns the object with the settings used for calls to bidiWriteObject. */ + public StreamingCallSettings + bidiWriteObjectSettings() { + return bidiWriteObjectSettings; + } + + /** Returns the object with the settings used for calls to listObjects. */ + public PagedCallSettings + listObjectsSettings() { + return listObjectsSettings; + } + + /** Returns the object with the settings used for calls to rewriteObject. */ + public UnaryCallSettings rewriteObjectSettings() { + return rewriteObjectSettings; + } + + /** Returns the object with the settings used for calls to startResumableWrite. */ + public UnaryCallSettings + startResumableWriteSettings() { + return startResumableWriteSettings; + } + + /** Returns the object with the settings used for calls to queryWriteStatus. */ + public UnaryCallSettings + queryWriteStatusSettings() { + return queryWriteStatusSettings; + } + + /** Returns the object with the settings used for calls to moveObject. */ + public UnaryCallSettings moveObjectSettings() { + return moveObjectSettings; + } + + public StorageStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcStorageStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "storage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "storage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "storage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken("gapic", GaxProperties.getLibraryVersion(StorageStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected StorageStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + deleteBucketSettings = settingsBuilder.deleteBucketSettings().build(); + getBucketSettings = settingsBuilder.getBucketSettings().build(); + createBucketSettings = settingsBuilder.createBucketSettings().build(); + listBucketsSettings = settingsBuilder.listBucketsSettings().build(); + lockBucketRetentionPolicySettings = settingsBuilder.lockBucketRetentionPolicySettings().build(); + getIamPolicySettings = settingsBuilder.getIamPolicySettings().build(); + setIamPolicySettings = settingsBuilder.setIamPolicySettings().build(); + testIamPermissionsSettings = settingsBuilder.testIamPermissionsSettings().build(); + updateBucketSettings = settingsBuilder.updateBucketSettings().build(); + composeObjectSettings = settingsBuilder.composeObjectSettings().build(); + deleteObjectSettings = settingsBuilder.deleteObjectSettings().build(); + restoreObjectSettings = settingsBuilder.restoreObjectSettings().build(); + cancelResumableWriteSettings = settingsBuilder.cancelResumableWriteSettings().build(); + getObjectSettings = settingsBuilder.getObjectSettings().build(); + readObjectSettings = settingsBuilder.readObjectSettings().build(); + bidiReadObjectSettings = settingsBuilder.bidiReadObjectSettings().build(); + updateObjectSettings = settingsBuilder.updateObjectSettings().build(); + writeObjectSettings = settingsBuilder.writeObjectSettings().build(); + bidiWriteObjectSettings = settingsBuilder.bidiWriteObjectSettings().build(); + listObjectsSettings = settingsBuilder.listObjectsSettings().build(); + rewriteObjectSettings = settingsBuilder.rewriteObjectSettings().build(); + startResumableWriteSettings = settingsBuilder.startResumableWriteSettings().build(); + queryWriteStatusSettings = settingsBuilder.queryWriteStatusSettings().build(); + moveObjectSettings = settingsBuilder.moveObjectSettings().build(); + } + + /** Builder for StorageStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder deleteBucketSettings; + private final UnaryCallSettings.Builder getBucketSettings; + private final UnaryCallSettings.Builder createBucketSettings; + private final PagedCallSettings.Builder< + ListBucketsRequest, ListBucketsResponse, ListBucketsPagedResponse> + listBucketsSettings; + private final UnaryCallSettings.Builder + lockBucketRetentionPolicySettings; + private final UnaryCallSettings.Builder getIamPolicySettings; + private final UnaryCallSettings.Builder setIamPolicySettings; + private final UnaryCallSettings.Builder + testIamPermissionsSettings; + private final UnaryCallSettings.Builder updateBucketSettings; + private final UnaryCallSettings.Builder composeObjectSettings; + private final UnaryCallSettings.Builder deleteObjectSettings; + private final UnaryCallSettings.Builder restoreObjectSettings; + private final UnaryCallSettings.Builder< + CancelResumableWriteRequest, CancelResumableWriteResponse> + cancelResumableWriteSettings; + private final UnaryCallSettings.Builder getObjectSettings; + private final ServerStreamingCallSettings.Builder + readObjectSettings; + private final StreamingCallSettings.Builder + bidiReadObjectSettings; + private final UnaryCallSettings.Builder updateObjectSettings; + private final StreamingCallSettings.Builder + writeObjectSettings; + private final StreamingCallSettings.Builder + bidiWriteObjectSettings; + private final PagedCallSettings.Builder< + ListObjectsRequest, ListObjectsResponse, ListObjectsPagedResponse> + listObjectsSettings; + private final UnaryCallSettings.Builder + rewriteObjectSettings; + private final UnaryCallSettings.Builder + startResumableWriteSettings; + private final UnaryCallSettings.Builder + queryWriteStatusSettings; + private final UnaryCallSettings.Builder moveObjectSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + deleteBucketSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getBucketSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createBucketSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listBucketsSettings = PagedCallSettings.newBuilder(LIST_BUCKETS_PAGE_STR_FACT); + lockBucketRetentionPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + setIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + testIamPermissionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateBucketSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + composeObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + restoreObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + cancelResumableWriteSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readObjectSettings = ServerStreamingCallSettings.newBuilder(); + bidiReadObjectSettings = StreamingCallSettings.newBuilder(); + updateObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + writeObjectSettings = StreamingCallSettings.newBuilder(); + bidiWriteObjectSettings = StreamingCallSettings.newBuilder(); + listObjectsSettings = PagedCallSettings.newBuilder(LIST_OBJECTS_PAGE_STR_FACT); + rewriteObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + startResumableWriteSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + queryWriteStatusSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + moveObjectSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + deleteBucketSettings, + getBucketSettings, + createBucketSettings, + listBucketsSettings, + lockBucketRetentionPolicySettings, + getIamPolicySettings, + setIamPolicySettings, + testIamPermissionsSettings, + updateBucketSettings, + composeObjectSettings, + deleteObjectSettings, + restoreObjectSettings, + cancelResumableWriteSettings, + getObjectSettings, + updateObjectSettings, + listObjectsSettings, + rewriteObjectSettings, + startResumableWriteSettings, + queryWriteStatusSettings, + moveObjectSettings); + initDefaults(this); + } + + protected Builder(StorageStubSettings settings) { + super(settings); + + deleteBucketSettings = settings.deleteBucketSettings.toBuilder(); + getBucketSettings = settings.getBucketSettings.toBuilder(); + createBucketSettings = settings.createBucketSettings.toBuilder(); + listBucketsSettings = settings.listBucketsSettings.toBuilder(); + lockBucketRetentionPolicySettings = settings.lockBucketRetentionPolicySettings.toBuilder(); + getIamPolicySettings = settings.getIamPolicySettings.toBuilder(); + setIamPolicySettings = settings.setIamPolicySettings.toBuilder(); + testIamPermissionsSettings = settings.testIamPermissionsSettings.toBuilder(); + updateBucketSettings = settings.updateBucketSettings.toBuilder(); + composeObjectSettings = settings.composeObjectSettings.toBuilder(); + deleteObjectSettings = settings.deleteObjectSettings.toBuilder(); + restoreObjectSettings = settings.restoreObjectSettings.toBuilder(); + cancelResumableWriteSettings = settings.cancelResumableWriteSettings.toBuilder(); + getObjectSettings = settings.getObjectSettings.toBuilder(); + readObjectSettings = settings.readObjectSettings.toBuilder(); + bidiReadObjectSettings = settings.bidiReadObjectSettings.toBuilder(); + updateObjectSettings = settings.updateObjectSettings.toBuilder(); + writeObjectSettings = settings.writeObjectSettings.toBuilder(); + bidiWriteObjectSettings = settings.bidiWriteObjectSettings.toBuilder(); + listObjectsSettings = settings.listObjectsSettings.toBuilder(); + rewriteObjectSettings = settings.rewriteObjectSettings.toBuilder(); + startResumableWriteSettings = settings.startResumableWriteSettings.toBuilder(); + queryWriteStatusSettings = settings.queryWriteStatusSettings.toBuilder(); + moveObjectSettings = settings.moveObjectSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + deleteBucketSettings, + getBucketSettings, + createBucketSettings, + listBucketsSettings, + lockBucketRetentionPolicySettings, + getIamPolicySettings, + setIamPolicySettings, + testIamPermissionsSettings, + updateBucketSettings, + composeObjectSettings, + deleteObjectSettings, + restoreObjectSettings, + cancelResumableWriteSettings, + getObjectSettings, + updateObjectSettings, + listObjectsSettings, + rewriteObjectSettings, + startResumableWriteSettings, + queryWriteStatusSettings, + moveObjectSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .deleteBucketSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getBucketSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createBucketSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listBucketsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .lockBucketRetentionPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .setIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .testIamPermissionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateBucketSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .composeObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .restoreObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .cancelResumableWriteSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .readObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listObjectsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .rewriteObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .startResumableWriteSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .queryWriteStatusSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .moveObjectSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to deleteBucket. */ + public UnaryCallSettings.Builder deleteBucketSettings() { + return deleteBucketSettings; + } + + /** Returns the builder for the settings used for calls to getBucket. */ + public UnaryCallSettings.Builder getBucketSettings() { + return getBucketSettings; + } + + /** Returns the builder for the settings used for calls to createBucket. */ + public UnaryCallSettings.Builder createBucketSettings() { + return createBucketSettings; + } + + /** Returns the builder for the settings used for calls to listBuckets. */ + public PagedCallSettings.Builder< + ListBucketsRequest, ListBucketsResponse, ListBucketsPagedResponse> + listBucketsSettings() { + return listBucketsSettings; + } + + /** Returns the builder for the settings used for calls to lockBucketRetentionPolicy. */ + public UnaryCallSettings.Builder + lockBucketRetentionPolicySettings() { + return lockBucketRetentionPolicySettings; + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the builder for the settings used for calls to updateBucket. */ + public UnaryCallSettings.Builder updateBucketSettings() { + return updateBucketSettings; + } + + /** Returns the builder for the settings used for calls to composeObject. */ + public UnaryCallSettings.Builder composeObjectSettings() { + return composeObjectSettings; + } + + /** Returns the builder for the settings used for calls to deleteObject. */ + public UnaryCallSettings.Builder deleteObjectSettings() { + return deleteObjectSettings; + } + + /** Returns the builder for the settings used for calls to restoreObject. */ + public UnaryCallSettings.Builder restoreObjectSettings() { + return restoreObjectSettings; + } + + /** Returns the builder for the settings used for calls to cancelResumableWrite. */ + public UnaryCallSettings.Builder + cancelResumableWriteSettings() { + return cancelResumableWriteSettings; + } + + /** Returns the builder for the settings used for calls to getObject. */ + public UnaryCallSettings.Builder getObjectSettings() { + return getObjectSettings; + } + + /** Returns the builder for the settings used for calls to readObject. */ + public ServerStreamingCallSettings.Builder + readObjectSettings() { + return readObjectSettings; + } + + /** Returns the builder for the settings used for calls to bidiReadObject. */ + public StreamingCallSettings.Builder + bidiReadObjectSettings() { + return bidiReadObjectSettings; + } + + /** Returns the builder for the settings used for calls to updateObject. */ + public UnaryCallSettings.Builder updateObjectSettings() { + return updateObjectSettings; + } + + /** Returns the builder for the settings used for calls to writeObject. */ + public StreamingCallSettings.Builder + writeObjectSettings() { + return writeObjectSettings; + } + + /** Returns the builder for the settings used for calls to bidiWriteObject. */ + public StreamingCallSettings.Builder + bidiWriteObjectSettings() { + return bidiWriteObjectSettings; + } + + /** Returns the builder for the settings used for calls to listObjects. */ + public PagedCallSettings.Builder< + ListObjectsRequest, ListObjectsResponse, ListObjectsPagedResponse> + listObjectsSettings() { + return listObjectsSettings; + } + + /** Returns the builder for the settings used for calls to rewriteObject. */ + public UnaryCallSettings.Builder + rewriteObjectSettings() { + return rewriteObjectSettings; + } + + /** Returns the builder for the settings used for calls to startResumableWrite. */ + public UnaryCallSettings.Builder + startResumableWriteSettings() { + return startResumableWriteSettings; + } + + /** Returns the builder for the settings used for calls to queryWriteStatus. */ + public UnaryCallSettings.Builder + queryWriteStatusSettings() { + return queryWriteStatusSettings; + } + + /** Returns the builder for the settings used for calls to moveObject. */ + public UnaryCallSettings.Builder moveObjectSettings() { + return moveObjectSettings; + } + + @Override + public StorageStubSettings build() throws IOException { + return new StorageStubSettings(this); + } + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/resources/META-INF/native-image/com.google.storage.v2/reflect-config.json b/java-storage/gapic-google-cloud-storage-v2/src/main/resources/META-INF/native-image/com.google.storage.v2/reflect-config.json new file mode 100644 index 000000000000..6d583a1c3b6d --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/resources/META-INF/native-image/com.google.storage.v2/reflect-config.json @@ -0,0 +1,3143 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingParameter", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingParameter$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$LogType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.AppendObjectSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.AppendObjectSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadHandle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadHandle$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectRedirectedError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectRedirectedError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiReadObjectSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteHandle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteHandle$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectRedirectedError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectRedirectedError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BidiWriteObjectResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Autoclass", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Autoclass$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Billing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Billing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Cors", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Cors$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$CustomPlacementConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$CustomPlacementConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$CustomerManagedEncryptionEnforcementConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$CustomerManagedEncryptionEnforcementConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$CustomerSuppliedEncryptionEnforcementConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$CustomerSuppliedEncryptionEnforcementConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$GoogleManagedEncryptionEnforcementConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Encryption$GoogleManagedEncryptionEnforcementConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$HierarchicalNamespace", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$HierarchicalNamespace$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IamConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IamConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IamConfig$UniformBucketLevelAccess", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IamConfig$UniformBucketLevelAccess$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter$PublicNetworkSource", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter$PublicNetworkSource$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter$VpcNetworkSource", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$IpFilter$VpcNetworkSource$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule$Action$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule$Condition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Lifecycle$Rule$Condition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Logging", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Logging$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$ObjectRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$ObjectRetention$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$RetentionPolicy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$RetentionPolicy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$SoftDeletePolicy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$SoftDeletePolicy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Versioning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Versioning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Website", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Bucket$Website$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BucketAccessControl", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.BucketAccessControl$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CancelResumableWriteRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CancelResumableWriteRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CancelResumableWriteResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CancelResumableWriteResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ChecksummedData", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ChecksummedData$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CommonObjectRequestParams", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CommonObjectRequestParams$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest$SourceObject", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest$SourceObject$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest$SourceObject$ObjectPreconditions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ComposeObjectRequest$SourceObject$ObjectPreconditions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ContentRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ContentRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CreateBucketRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CreateBucketRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CustomerEncryption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.CustomerEncryption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.DeleteBucketRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.DeleteBucketRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.DeleteObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.DeleteObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.GetBucketRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.GetBucketRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.GetObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.GetObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListBucketsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListBucketsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListBucketsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListBucketsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListObjectsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListObjectsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListObjectsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ListObjectsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.LockBucketRetentionPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.LockBucketRetentionPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.MoveObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.MoveObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Object", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Object$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Object$Retention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Object$Retention$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Object$Retention$Mode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectAccessControl", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectAccessControl$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectChecksums", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectChecksums$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectContexts", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectContexts$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectCustomContextPayload", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectCustomContextPayload$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectRangeData", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ObjectRangeData$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Owner", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.Owner$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ProjectTeam", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ProjectTeam$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.QueryWriteStatusRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.QueryWriteStatusRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.QueryWriteStatusResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.QueryWriteStatusResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadObjectResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadObjectResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadRangeError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ReadRangeError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RestoreObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RestoreObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RewriteObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RewriteObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RewriteResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.RewriteResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ServiceConstants", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ServiceConstants$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.ServiceConstants$Values", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.StartResumableWriteRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.StartResumableWriteRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.StartResumableWriteResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.StartResumableWriteResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.UpdateBucketRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.UpdateBucketRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.UpdateObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.UpdateObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.v2.WriteObjectSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Date", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Date$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-storage/gapic-google-cloud-storage-v2/src/main/resources/com/google/storage/v2/gapic_metadata.json b/java-storage/gapic-google-cloud-storage-v2/src/main/resources/com/google/storage/v2/gapic_metadata.json new file mode 100644 index 000000000000..78b2d9c7ebb2 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/main/resources/com/google/storage/v2/gapic_metadata.json @@ -0,0 +1,90 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.storage.v2", + "libraryPackage": "com.google.storage.v2", + "services": { + "Storage": { + "clients": { + "grpc": { + "libraryClient": "StorageClient", + "rpcs": { + "BidiReadObject": { + "methods": ["bidiReadObjectCallable"] + }, + "BidiWriteObject": { + "methods": ["bidiWriteObjectCallable"] + }, + "CancelResumableWrite": { + "methods": ["cancelResumableWrite", "cancelResumableWrite", "cancelResumableWriteCallable"] + }, + "ComposeObject": { + "methods": ["composeObject", "composeObjectCallable"] + }, + "CreateBucket": { + "methods": ["createBucket", "createBucket", "createBucket", "createBucketCallable"] + }, + "DeleteBucket": { + "methods": ["deleteBucket", "deleteBucket", "deleteBucket", "deleteBucketCallable"] + }, + "DeleteObject": { + "methods": ["deleteObject", "deleteObject", "deleteObject", "deleteObject", "deleteObject", "deleteObjectCallable"] + }, + "GetBucket": { + "methods": ["getBucket", "getBucket", "getBucket", "getBucketCallable"] + }, + "GetIamPolicy": { + "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] + }, + "GetObject": { + "methods": ["getObject", "getObject", "getObject", "getObject", "getObject", "getObjectCallable"] + }, + "ListBuckets": { + "methods": ["listBuckets", "listBuckets", "listBuckets", "listBucketsPagedCallable", "listBucketsCallable"] + }, + "ListObjects": { + "methods": ["listObjects", "listObjects", "listObjects", "listObjectsPagedCallable", "listObjectsCallable"] + }, + "LockBucketRetentionPolicy": { + "methods": ["lockBucketRetentionPolicy", "lockBucketRetentionPolicy", "lockBucketRetentionPolicy", "lockBucketRetentionPolicyCallable"] + }, + "MoveObject": { + "methods": ["moveObject", "moveObject", "moveObject", "moveObjectCallable"] + }, + "QueryWriteStatus": { + "methods": ["queryWriteStatus", "queryWriteStatus", "queryWriteStatusCallable"] + }, + "ReadObject": { + "methods": ["readObjectCallable"] + }, + "RestoreObject": { + "methods": ["restoreObject", "restoreObject", "restoreObject", "restoreObjectCallable"] + }, + "RewriteObject": { + "methods": ["rewriteObject", "rewriteObjectCallable"] + }, + "SetIamPolicy": { + "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] + }, + "StartResumableWrite": { + "methods": ["startResumableWrite", "startResumableWriteCallable"] + }, + "TestIamPermissions": { + "methods": ["testIamPermissions", "testIamPermissions", "testIamPermissions", "testIamPermissionsCallable"] + }, + "UpdateBucket": { + "methods": ["updateBucket", "updateBucket", "updateBucketCallable"] + }, + "UpdateObject": { + "methods": ["updateObject", "updateObject", "updateObjectCallable"] + }, + "WriteObject": { + "methods": ["writeObjectCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorage.java b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorage.java new file mode 100644 index 000000000000..37179e587035 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorage.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockStorage implements MockGrpcService { + private final MockStorageImpl serviceImpl; + + public MockStorage() { + serviceImpl = new MockStorageImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorageImpl.java b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorageImpl.java new file mode 100644 index 000000000000..fd6c74ad0fdd --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/MockStorageImpl.java @@ -0,0 +1,613 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.core.BetaApi; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockStorageImpl extends StorageImplBase { + private List requests; + private Queue responses; + + public MockStorageImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void deleteBucket(DeleteBucketRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteBucket, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getBucket(GetBucketRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Bucket) { + requests.add(request); + responseObserver.onNext(((Bucket) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetBucket, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Bucket.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createBucket(CreateBucketRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Bucket) { + requests.add(request); + responseObserver.onNext(((Bucket) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateBucket, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Bucket.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBuckets( + ListBucketsRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof ListBucketsResponse) { + requests.add(request); + responseObserver.onNext(((ListBucketsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBuckets, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListBucketsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void lockBucketRetentionPolicy( + LockBucketRetentionPolicyRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Bucket) { + requests.add(request); + responseObserver.onNext(((Bucket) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method LockBucketRetentionPolicy, expected %s" + + " or %s", + response == null ? "null" : response.getClass().getName(), + Bucket.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof TestIamPermissionsResponse) { + requests.add(request); + responseObserver.onNext(((TestIamPermissionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method TestIamPermissions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + TestIamPermissionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateBucket(UpdateBucketRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Bucket) { + requests.add(request); + responseObserver.onNext(((Bucket) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateBucket, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Bucket.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void composeObject(ComposeObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Object) { + requests.add(request); + responseObserver.onNext(((Object) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ComposeObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Object.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteObject(DeleteObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void restoreObject(RestoreObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Object) { + requests.add(request); + responseObserver.onNext(((Object) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method RestoreObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Object.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void cancelResumableWrite( + CancelResumableWriteRequest request, + StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof CancelResumableWriteResponse) { + requests.add(request); + responseObserver.onNext(((CancelResumableWriteResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CancelResumableWrite, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + CancelResumableWriteResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getObject(GetObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Object) { + requests.add(request); + responseObserver.onNext(((Object) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Object.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof ReadObjectResponse) { + requests.add(request); + responseObserver.onNext(((ReadObjectResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ReadObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ReadObjectResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver bidiReadObject( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(BidiReadObjectRequest value) { + requests.add(value); + final java.lang.Object response = responses.remove(); + if (response instanceof BidiReadObjectResponse) { + responseObserver.onNext(((BidiReadObjectResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BidiReadObject, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BidiReadObjectResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void updateObject(UpdateObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Object) { + requests.add(request); + responseObserver.onNext(((Object) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Object.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver writeObject( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(WriteObjectRequest value) { + requests.add(value); + final java.lang.Object response = responses.remove(); + if (response instanceof WriteObjectResponse) { + responseObserver.onNext(((WriteObjectResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method WriteObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteObjectResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public StreamObserver bidiWriteObject( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(BidiWriteObjectRequest value) { + requests.add(value); + final java.lang.Object response = responses.remove(); + if (response instanceof BidiWriteObjectResponse) { + responseObserver.onNext(((BidiWriteObjectResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BidiWriteObject, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BidiWriteObjectResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void listObjects( + ListObjectsRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof ListObjectsResponse) { + requests.add(request); + responseObserver.onNext(((ListObjectsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListObjects, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListObjectsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void rewriteObject( + RewriteObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof RewriteResponse) { + requests.add(request); + responseObserver.onNext(((RewriteResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method RewriteObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + RewriteResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void startResumableWrite( + StartResumableWriteRequest request, + StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof StartResumableWriteResponse) { + requests.add(request); + responseObserver.onNext(((StartResumableWriteResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StartResumableWrite, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + StartResumableWriteResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void queryWriteStatus( + QueryWriteStatusRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof QueryWriteStatusResponse) { + requests.add(request); + responseObserver.onNext(((QueryWriteStatusResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method QueryWriteStatus, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + QueryWriteStatusResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void moveObject(MoveObjectRequest request, StreamObserver responseObserver) { + java.lang.Object response = responses.poll(); + if (response instanceof Object) { + requests.add(request); + responseObserver.onNext(((Object) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method MoveObject, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Object.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/StorageClientTest.java b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/StorageClientTest.java new file mode 100644 index 000000000000..f59fe20a0ed3 --- /dev/null +++ b/java-storage/gapic-google-cloud-storage-v2/src/test/java/com/google/storage/v2/StorageClientTest.java @@ -0,0 +1,2563 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import static com.google.storage.v2.StorageClient.ListBucketsPagedResponse; +import static com.google.storage.v2.StorageClient.ListObjectsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class StorageClientTest { + private static MockServiceHelper mockServiceHelper; + private static MockStorage mockStorage; + private LocalChannelProvider channelProvider; + private StorageClient client; + + @BeforeClass + public static void startStaticServer() { + mockStorage = new MockStorage(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockStorage)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + StorageSettings settings = + StorageSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = StorageClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void deleteBucketTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + BucketName name = BucketName.of("[PROJECT]", "[BUCKET]"); + + client.deleteBucket(name); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBucketRequest actualRequest = ((DeleteBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBucketExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName name = BucketName.of("[PROJECT]", "[BUCKET]"); + client.deleteBucket(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBucketTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteBucket(name); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBucketRequest actualRequest = ((DeleteBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBucketExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String name = "name3373707"; + client.deleteBucket(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBucketTest() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName name = BucketName.of("[PROJECT]", "[BUCKET]"); + + Bucket actualResponse = client.getBucket(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBucketRequest actualRequest = ((GetBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBucketExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName name = BucketName.of("[PROJECT]", "[BUCKET]"); + client.getBucket(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBucketTest2() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String name = "name3373707"; + + Bucket actualResponse = client.getBucket(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBucketRequest actualRequest = ((GetBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBucketExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String name = "name3373707"; + client.getBucket(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBucketTest() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + Bucket bucket = Bucket.newBuilder().build(); + String bucketId = "bucketId-1603305307"; + + Bucket actualResponse = client.createBucket(parent, bucket, bucketId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBucketRequest actualRequest = ((CreateBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(bucketId, actualRequest.getBucketId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBucketExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + Bucket bucket = Bucket.newBuilder().build(); + String bucketId = "bucketId-1603305307"; + client.createBucket(parent, bucket, bucketId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBucketTest2() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String parent = "parent-995424086"; + Bucket bucket = Bucket.newBuilder().build(); + String bucketId = "bucketId-1603305307"; + + Bucket actualResponse = client.createBucket(parent, bucket, bucketId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBucketRequest actualRequest = ((CreateBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(bucketId, actualRequest.getBucketId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBucketExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String parent = "parent-995424086"; + Bucket bucket = Bucket.newBuilder().build(); + String bucketId = "bucketId-1603305307"; + client.createBucket(parent, bucket, bucketId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBucketsTest() throws Exception { + Bucket responsesElement = Bucket.newBuilder().build(); + ListBucketsResponse expectedResponse = + ListBucketsResponse.newBuilder() + .setNextPageToken("") + .addAllBuckets(Arrays.asList(responsesElement)) + .build(); + mockStorage.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListBucketsPagedResponse pagedListResponse = client.listBuckets(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBucketsList().get(0), resources.get(0)); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBucketsRequest actualRequest = ((ListBucketsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBucketsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listBuckets(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBucketsTest2() throws Exception { + Bucket responsesElement = Bucket.newBuilder().build(); + ListBucketsResponse expectedResponse = + ListBucketsResponse.newBuilder() + .setNextPageToken("") + .addAllBuckets(Arrays.asList(responsesElement)) + .build(); + mockStorage.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBucketsPagedResponse pagedListResponse = client.listBuckets(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBucketsList().get(0), resources.get(0)); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBucketsRequest actualRequest = ((ListBucketsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBucketsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBuckets(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void lockBucketRetentionPolicyTest() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + + Bucket actualResponse = client.lockBucketRetentionPolicy(bucket); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + LockBucketRetentionPolicyRequest actualRequest = + ((LockBucketRetentionPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void lockBucketRetentionPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + client.lockBucketRetentionPolicy(bucket); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void lockBucketRetentionPolicyTest2() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + + Bucket actualResponse = client.lockBucketRetentionPolicy(bucket); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + LockBucketRetentionPolicyRequest actualRequest = + ((LockBucketRetentionPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void lockBucketRetentionPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + client.lockBucketRetentionPolicy(bucket); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorage.addResponse(expectedResponse); + + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorage.addResponse(expectedResponse); + + String resource = "resource-341064690"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String resource = "resource-341064690"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorage.addResponse(expectedResponse); + + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorage.addResponse(expectedResponse); + + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockStorage.addResponse(expectedResponse); + + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ResourceName resource = + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockStorage.addResponse(expectedResponse); + + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBucketTest() throws Exception { + Bucket expectedResponse = + Bucket.newBuilder() + .setName(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setBucketId("bucketId-1603305307") + .setEtag("etag3123477") + .setProject(ProjectName.of("[PROJECT]").toString()) + .setMetageneration(1048558813) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setStorageClass("storageClass871353277") + .setRpo("rpo113137") + .addAllAcl(new ArrayList()) + .addAllDefaultObjectAcl(new ArrayList()) + .setLifecycle(Bucket.Lifecycle.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .addAllCors(new ArrayList()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setDefaultEventBasedHold(true) + .putAllLabels(new HashMap()) + .setWebsite(Bucket.Website.newBuilder().build()) + .setVersioning(Bucket.Versioning.newBuilder().build()) + .setLogging(Bucket.Logging.newBuilder().build()) + .setOwner(Owner.newBuilder().build()) + .setEncryption(Bucket.Encryption.newBuilder().build()) + .setBilling(Bucket.Billing.newBuilder().build()) + .setRetentionPolicy(Bucket.RetentionPolicy.newBuilder().build()) + .setIamConfig(Bucket.IamConfig.newBuilder().build()) + .setSatisfiesPzs(true) + .setCustomPlacementConfig(Bucket.CustomPlacementConfig.newBuilder().build()) + .setAutoclass(Bucket.Autoclass.newBuilder().build()) + .setHierarchicalNamespace(Bucket.HierarchicalNamespace.newBuilder().build()) + .setSoftDeletePolicy(Bucket.SoftDeletePolicy.newBuilder().build()) + .setObjectRetention(Bucket.ObjectRetention.newBuilder().build()) + .setIpFilter(Bucket.IpFilter.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + Bucket bucket = Bucket.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Bucket actualResponse = client.updateBucket(bucket, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateBucketRequest actualRequest = ((UpdateBucketRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateBucketExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + Bucket bucket = Bucket.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBucket(bucket, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void composeObjectTest() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + ComposeObjectRequest request = + ComposeObjectRequest.newBuilder() + .setDestination(Object.newBuilder().build()) + .addAllSourceObjects(new ArrayList()) + .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814") + .setIfGenerationMatch(-1086241088) + .setIfMetagenerationMatch(1043427781) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setDeleteSourceObjects(true) + .build(); + + Object actualResponse = client.composeObject(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ComposeObjectRequest actualRequest = ((ComposeObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getDestination(), actualRequest.getDestination()); + Assert.assertEquals(request.getSourceObjectsList(), actualRequest.getSourceObjectsList()); + Assert.assertEquals( + request.getDestinationPredefinedAcl(), actualRequest.getDestinationPredefinedAcl()); + Assert.assertEquals(request.getIfGenerationMatch(), actualRequest.getIfGenerationMatch()); + Assert.assertEquals( + request.getIfMetagenerationMatch(), actualRequest.getIfMetagenerationMatch()); + Assert.assertEquals(request.getKmsKey(), actualRequest.getKmsKey()); + Assert.assertEquals( + request.getCommonObjectRequestParams(), actualRequest.getCommonObjectRequestParams()); + Assert.assertEquals(request.getObjectChecksums(), actualRequest.getObjectChecksums()); + Assert.assertEquals(request.getDeleteSourceObjects(), actualRequest.getDeleteSourceObjects()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void composeObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + ComposeObjectRequest request = + ComposeObjectRequest.newBuilder() + .setDestination(Object.newBuilder().build()) + .addAllSourceObjects(new ArrayList()) + .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814") + .setIfGenerationMatch(-1086241088) + .setIfMetagenerationMatch(1043427781) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setDeleteSourceObjects(true) + .build(); + client.composeObject(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteObjectTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + + client.deleteObject(bucket, object); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteObjectRequest actualRequest = ((DeleteObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + client.deleteObject(bucket, object); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteObjectTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + + client.deleteObject(bucket, object); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteObjectRequest actualRequest = ((DeleteObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteObjectExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + client.deleteObject(bucket, object); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteObjectTest3() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + + client.deleteObject(bucket, object, generation); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteObjectRequest actualRequest = ((DeleteObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteObjectExceptionTest3() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + client.deleteObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteObjectTest4() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + + client.deleteObject(bucket, object, generation); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteObjectRequest actualRequest = ((DeleteObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteObjectExceptionTest4() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + client.deleteObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restoreObjectTest() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + + Object actualResponse = client.restoreObject(bucket, object, generation); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreObjectRequest actualRequest = ((RestoreObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + client.restoreObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restoreObjectTest2() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + + Object actualResponse = client.restoreObject(bucket, object, generation); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreObjectRequest actualRequest = ((RestoreObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreObjectExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + client.restoreObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void cancelResumableWriteTest() throws Exception { + CancelResumableWriteResponse expectedResponse = + CancelResumableWriteResponse.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + String uploadId = "uploadId1563990780"; + + CancelResumableWriteResponse actualResponse = client.cancelResumableWrite(uploadId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CancelResumableWriteRequest actualRequest = + ((CancelResumableWriteRequest) actualRequests.get(0)); + + Assert.assertEquals(uploadId, actualRequest.getUploadId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void cancelResumableWriteExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String uploadId = "uploadId1563990780"; + client.cancelResumableWrite(uploadId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getObjectTest() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + + Object actualResponse = client.getObject(bucket, object); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetObjectRequest actualRequest = ((GetObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + client.getObject(bucket, object); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getObjectTest2() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + + Object actualResponse = client.getObject(bucket, object); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetObjectRequest actualRequest = ((GetObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getObjectExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + client.getObject(bucket, object); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getObjectTest3() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + + Object actualResponse = client.getObject(bucket, object, generation); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetObjectRequest actualRequest = ((GetObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getObjectExceptionTest3() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String object = "object-1023368385"; + long generation = 305703192; + client.getObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getObjectTest4() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + + Object actualResponse = client.getObject(bucket, object, generation); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetObjectRequest actualRequest = ((GetObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(generation, actualRequest.getGeneration()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getObjectExceptionTest4() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String object = "object-1023368385"; + long generation = 305703192; + client.getObject(bucket, object, generation); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readObjectTest() throws Exception { + ReadObjectResponse expectedResponse = + ReadObjectResponse.newBuilder() + .setChecksummedData(ChecksummedData.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setContentRange(ContentRange.newBuilder().build()) + .setMetadata(Object.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + ReadObjectRequest request = + ReadObjectRequest.newBuilder() + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setObject("object-1023368385") + .setGeneration(305703192) + .setReadOffset(-715377828) + .setReadLimit(-164298798) + .setIfGenerationMatch(-1086241088) + .setIfGenerationNotMatch(1475720404) + .setIfMetagenerationMatch(1043427781) + .setIfMetagenerationNotMatch(1025430873) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setReadMask(FieldMask.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.readObjectCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void readObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + ReadObjectRequest request = + ReadObjectRequest.newBuilder() + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setObject("object-1023368385") + .setGeneration(305703192) + .setReadOffset(-715377828) + .setReadLimit(-164298798) + .setIfGenerationMatch(-1086241088) + .setIfGenerationNotMatch(1475720404) + .setIfMetagenerationMatch(1043427781) + .setIfMetagenerationNotMatch(1025430873) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setReadMask(FieldMask.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.readObjectCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void bidiReadObjectTest() throws Exception { + BidiReadObjectResponse expectedResponse = + BidiReadObjectResponse.newBuilder() + .addAllObjectDataRanges(new ArrayList()) + .setMetadata(Object.newBuilder().build()) + .setReadHandle(BidiReadHandle.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + BidiReadObjectRequest request = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec(BidiReadObjectSpec.newBuilder().build()) + .addAllReadRanges(new ArrayList()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.bidiReadObjectCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void bidiReadObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + BidiReadObjectRequest request = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec(BidiReadObjectSpec.newBuilder().build()) + .addAllReadRanges(new ArrayList()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.bidiReadObjectCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateObjectTest() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + Object object = Object.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Object actualResponse = client.updateObject(object, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateObjectRequest actualRequest = ((UpdateObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(object, actualRequest.getObject()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + Object object = Object.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateObject(object, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void writeObjectTest() throws Exception { + WriteObjectResponse expectedResponse = WriteObjectResponse.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + WriteObjectRequest request = + WriteObjectRequest.newBuilder() + .setWriteOffset(-1559543565) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setFinishWrite(true) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ClientStreamingCallable callable = + client.writeObjectCallable(); + ApiStreamObserver requestObserver = + callable.clientStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void writeObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + WriteObjectRequest request = + WriteObjectRequest.newBuilder() + .setWriteOffset(-1559543565) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setFinishWrite(true) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ClientStreamingCallable callable = + client.writeObjectCallable(); + ApiStreamObserver requestObserver = + callable.clientStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void bidiWriteObjectTest() throws Exception { + BidiWriteObjectResponse expectedResponse = + BidiWriteObjectResponse.newBuilder() + .setWriteHandle(BidiWriteHandle.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + BidiWriteObjectRequest request = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(-1559543565) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setStateLookup(true) + .setFlush(true) + .setFinishWrite(true) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.bidiWriteObjectCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void bidiWriteObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + BidiWriteObjectRequest request = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(-1559543565) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .setStateLookup(true) + .setFlush(true) + .setFinishWrite(true) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.bidiWriteObjectCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void listObjectsTest() throws Exception { + Object responsesElement = Object.newBuilder().build(); + ListObjectsResponse expectedResponse = + ListObjectsResponse.newBuilder() + .setNextPageToken("") + .addAllObjects(Arrays.asList(responsesElement)) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + + ListObjectsPagedResponse pagedListResponse = client.listObjects(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getObjectsList().get(0), resources.get(0)); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListObjectsRequest actualRequest = ((ListObjectsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listObjectsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + client.listObjects(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listObjectsTest2() throws Exception { + Object responsesElement = Object.newBuilder().build(); + ListObjectsResponse expectedResponse = + ListObjectsResponse.newBuilder() + .setNextPageToken("") + .addAllObjects(Arrays.asList(responsesElement)) + .build(); + mockStorage.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListObjectsPagedResponse pagedListResponse = client.listObjects(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getObjectsList().get(0), resources.get(0)); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListObjectsRequest actualRequest = ((ListObjectsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listObjectsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String parent = "parent-995424086"; + client.listObjects(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void rewriteObjectTest() throws Exception { + RewriteResponse expectedResponse = + RewriteResponse.newBuilder() + .setTotalBytesRewritten(-1109205579) + .setObjectSize(-1277221631) + .setDone(true) + .setRewriteToken("rewriteToken80654285") + .setResource(Object.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + RewriteObjectRequest request = + RewriteObjectRequest.newBuilder() + .setDestinationName("destinationName-1762755655") + .setDestinationBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setDestinationKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setDestination(Object.newBuilder().build()) + .setSourceBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setSourceObject("sourceObject1196439354") + .setSourceGeneration(1232209852) + .setRewriteToken("rewriteToken80654285") + .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814") + .setIfGenerationMatch(-1086241088) + .setIfGenerationNotMatch(1475720404) + .setIfMetagenerationMatch(1043427781) + .setIfMetagenerationNotMatch(1025430873) + .setIfSourceGenerationMatch(-1427877280) + .setIfSourceGenerationNotMatch(1575612532) + .setIfSourceMetagenerationMatch(1143319909) + .setIfSourceMetagenerationNotMatch(1900822777) + .setMaxBytesRewrittenPerCall(1178170730) + .setCopySourceEncryptionAlgorithm("copySourceEncryptionAlgorithm-1524952548") + .setCopySourceEncryptionKeyBytes(ByteString.EMPTY) + .setCopySourceEncryptionKeySha256Bytes(ByteString.EMPTY) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .build(); + + RewriteResponse actualResponse = client.rewriteObject(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RewriteObjectRequest actualRequest = ((RewriteObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getDestinationName(), actualRequest.getDestinationName()); + Assert.assertEquals(request.getDestinationBucket(), actualRequest.getDestinationBucket()); + Assert.assertEquals(request.getDestinationKmsKey(), actualRequest.getDestinationKmsKey()); + Assert.assertEquals(request.getDestination(), actualRequest.getDestination()); + Assert.assertEquals(request.getSourceBucket(), actualRequest.getSourceBucket()); + Assert.assertEquals(request.getSourceObject(), actualRequest.getSourceObject()); + Assert.assertEquals(request.getSourceGeneration(), actualRequest.getSourceGeneration()); + Assert.assertEquals(request.getRewriteToken(), actualRequest.getRewriteToken()); + Assert.assertEquals( + request.getDestinationPredefinedAcl(), actualRequest.getDestinationPredefinedAcl()); + Assert.assertEquals(request.getIfGenerationMatch(), actualRequest.getIfGenerationMatch()); + Assert.assertEquals(request.getIfGenerationNotMatch(), actualRequest.getIfGenerationNotMatch()); + Assert.assertEquals( + request.getIfMetagenerationMatch(), actualRequest.getIfMetagenerationMatch()); + Assert.assertEquals( + request.getIfMetagenerationNotMatch(), actualRequest.getIfMetagenerationNotMatch()); + Assert.assertEquals( + request.getIfSourceGenerationMatch(), actualRequest.getIfSourceGenerationMatch()); + Assert.assertEquals( + request.getIfSourceGenerationNotMatch(), actualRequest.getIfSourceGenerationNotMatch()); + Assert.assertEquals( + request.getIfSourceMetagenerationMatch(), actualRequest.getIfSourceMetagenerationMatch()); + Assert.assertEquals( + request.getIfSourceMetagenerationNotMatch(), + actualRequest.getIfSourceMetagenerationNotMatch()); + Assert.assertEquals( + request.getMaxBytesRewrittenPerCall(), actualRequest.getMaxBytesRewrittenPerCall()); + Assert.assertEquals( + request.getCopySourceEncryptionAlgorithm(), + actualRequest.getCopySourceEncryptionAlgorithm()); + Assert.assertEquals( + request.getCopySourceEncryptionKeyBytes(), actualRequest.getCopySourceEncryptionKeyBytes()); + Assert.assertEquals( + request.getCopySourceEncryptionKeySha256Bytes(), + actualRequest.getCopySourceEncryptionKeySha256Bytes()); + Assert.assertEquals( + request.getCommonObjectRequestParams(), actualRequest.getCommonObjectRequestParams()); + Assert.assertEquals(request.getObjectChecksums(), actualRequest.getObjectChecksums()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void rewriteObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + RewriteObjectRequest request = + RewriteObjectRequest.newBuilder() + .setDestinationName("destinationName-1762755655") + .setDestinationBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setDestinationKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setDestination(Object.newBuilder().build()) + .setSourceBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setSourceObject("sourceObject1196439354") + .setSourceGeneration(1232209852) + .setRewriteToken("rewriteToken80654285") + .setDestinationPredefinedAcl("destinationPredefinedAcl1111125814") + .setIfGenerationMatch(-1086241088) + .setIfGenerationNotMatch(1475720404) + .setIfMetagenerationMatch(1043427781) + .setIfMetagenerationNotMatch(1025430873) + .setIfSourceGenerationMatch(-1427877280) + .setIfSourceGenerationNotMatch(1575612532) + .setIfSourceMetagenerationMatch(1143319909) + .setIfSourceMetagenerationNotMatch(1900822777) + .setMaxBytesRewrittenPerCall(1178170730) + .setCopySourceEncryptionAlgorithm("copySourceEncryptionAlgorithm-1524952548") + .setCopySourceEncryptionKeyBytes(ByteString.EMPTY) + .setCopySourceEncryptionKeySha256Bytes(ByteString.EMPTY) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .build(); + client.rewriteObject(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void startResumableWriteTest() throws Exception { + StartResumableWriteResponse expectedResponse = + StartResumableWriteResponse.newBuilder().setUploadId("uploadId1563990780").build(); + mockStorage.addResponse(expectedResponse); + + StartResumableWriteRequest request = + StartResumableWriteRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().build()) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .build(); + + StartResumableWriteResponse actualResponse = client.startResumableWrite(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + StartResumableWriteRequest actualRequest = ((StartResumableWriteRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getWriteObjectSpec(), actualRequest.getWriteObjectSpec()); + Assert.assertEquals( + request.getCommonObjectRequestParams(), actualRequest.getCommonObjectRequestParams()); + Assert.assertEquals(request.getObjectChecksums(), actualRequest.getObjectChecksums()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void startResumableWriteExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + StartResumableWriteRequest request = + StartResumableWriteRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().build()) + .setCommonObjectRequestParams(CommonObjectRequestParams.newBuilder().build()) + .setObjectChecksums(ObjectChecksums.newBuilder().build()) + .build(); + client.startResumableWrite(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void queryWriteStatusTest() throws Exception { + QueryWriteStatusResponse expectedResponse = QueryWriteStatusResponse.newBuilder().build(); + mockStorage.addResponse(expectedResponse); + + String uploadId = "uploadId1563990780"; + + QueryWriteStatusResponse actualResponse = client.queryWriteStatus(uploadId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + QueryWriteStatusRequest actualRequest = ((QueryWriteStatusRequest) actualRequests.get(0)); + + Assert.assertEquals(uploadId, actualRequest.getUploadId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void queryWriteStatusExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String uploadId = "uploadId1563990780"; + client.queryWriteStatus(uploadId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void moveObjectTest() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String sourceObject = "sourceObject1196439354"; + String destinationObject = "destinationObject-1761603347"; + + Object actualResponse = client.moveObject(bucket, sourceObject, destinationObject); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + MoveObjectRequest actualRequest = ((MoveObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket.toString(), actualRequest.getBucket()); + Assert.assertEquals(sourceObject, actualRequest.getSourceObject()); + Assert.assertEquals(destinationObject, actualRequest.getDestinationObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void moveObjectExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + BucketName bucket = BucketName.of("[PROJECT]", "[BUCKET]"); + String sourceObject = "sourceObject1196439354"; + String destinationObject = "destinationObject-1761603347"; + client.moveObject(bucket, sourceObject, destinationObject); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void moveObjectTest2() throws Exception { + Object expectedResponse = + Object.newBuilder() + .setName("name3373707") + .setBucket(BucketName.of("[PROJECT]", "[BUCKET]").toString()) + .setEtag("etag3123477") + .setGeneration(305703192) + .setRestoreToken("restoreToken1638686731") + .setMetageneration(1048558813) + .setStorageClass("storageClass871353277") + .setSize(3530753) + .setContentEncoding("contentEncoding-160088852") + .setContentDisposition("contentDisposition1034341758") + .setCacheControl("cacheControl-1336592517") + .addAllAcl(new ArrayList()) + .setContentLanguage("contentLanguage810066673") + .setDeleteTime(Timestamp.newBuilder().build()) + .setFinalizeTime(Timestamp.newBuilder().build()) + .setContentType("contentType-389131437") + .setCreateTime(Timestamp.newBuilder().build()) + .setComponentCount(-485073075) + .setChecksums(ObjectChecksums.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setKmsKey( + CryptoKeyName.of("[PROJECT]", "[LOCATION]", "[KEY_RING]", "[CRYPTO_KEY]") + .toString()) + .setUpdateStorageClassTime(Timestamp.newBuilder().build()) + .setTemporaryHold(true) + .setRetentionExpireTime(Timestamp.newBuilder().build()) + .putAllMetadata(new HashMap()) + .setContexts(ObjectContexts.newBuilder().build()) + .setEventBasedHold(true) + .setOwner(Owner.newBuilder().build()) + .setCustomerEncryption(CustomerEncryption.newBuilder().build()) + .setCustomTime(Timestamp.newBuilder().build()) + .setSoftDeleteTime(Timestamp.newBuilder().build()) + .setHardDeleteTime(Timestamp.newBuilder().build()) + .setRetention(Object.Retention.newBuilder().build()) + .build(); + mockStorage.addResponse(expectedResponse); + + String bucket = "bucket-1378203158"; + String sourceObject = "sourceObject1196439354"; + String destinationObject = "destinationObject-1761603347"; + + Object actualResponse = client.moveObject(bucket, sourceObject, destinationObject); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + MoveObjectRequest actualRequest = ((MoveObjectRequest) actualRequests.get(0)); + + Assert.assertEquals(bucket, actualRequest.getBucket()); + Assert.assertEquals(sourceObject, actualRequest.getSourceObject()); + Assert.assertEquals(destinationObject, actualRequest.getDestinationObject()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void moveObjectExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorage.addException(exception); + + try { + String bucket = "bucket-1378203158"; + String sourceObject = "sourceObject1196439354"; + String destinationObject = "destinationObject-1761603347"; + client.moveObject(bucket, sourceObject, destinationObject); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-storage/google-cloud-storage-bom/pom.xml b/java-storage/google-cloud-storage-bom/pom.xml new file mode 100644 index 000000000000..45997b5652bc --- /dev/null +++ b/java-storage/google-cloud-storage-bom/pom.xml @@ -0,0 +1,118 @@ + + + + + 4.0.0 + com.google.cloud + google-cloud-storage-bom + 2.64.1-SNAPSHOT + pom + + com.google.cloud + google-cloud-pom-parent + 1.82.0-SNAPSHOT + ../../google-cloud-pom-parent/pom.xml + + + Google Cloud Storage BOM + https://github.com/googleapis/google-cloud-java + + BOM for Google Cloud Storage + + + + Google LLC + + + + + BenWhitehead + Ben Whitehead + benwhitehead [at] google.com + Google LLC + + Developer + + + + + + scm:git:https://github.com/googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + + + com.google.cloud + google-cloud-storage + 2.64.1-SNAPSHOT + + + com.google.api.grpc + gapic-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.cloud + google-cloud-storage-control + 2.64.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + + diff --git a/java-storage/google-cloud-storage-control/pom.xml b/java-storage/google-cloud-storage-control/pom.xml new file mode 100644 index 000000000000..d157fda38a8e --- /dev/null +++ b/java-storage/google-cloud-storage-control/pom.xml @@ -0,0 +1,121 @@ + + + 4.0.0 + com.google.cloud + google-cloud-storage-control + 2.64.1-SNAPSHOT + google-cloud-storage-control + GRPC library for google-cloud-storage-control + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-storage-control-v2 + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api + api-common + + + com.google.api + gax + + + com.google.api + gax + test + testlib + + + + com.google.api + gax-grpc + + + com.google.api + gax-httpjson + + + com.google.api + gax-httpjson + test + testlib + + + com.google.guava + guava + + + junit + junit + + + + com.google.api + gax-grpc + testlib + test + + + com.google.api.grpc + grpc-google-cloud-storage-control-v2 + + + com.google.http-client + google-http-client + test + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + 8 + 8 + UTF-8 + + + diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlClient.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlClient.java new file mode 100644 index 000000000000..cd59599f9ffe --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlClient.java @@ -0,0 +1,4579 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.httpjson.longrunning.OperationsClient; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.resourcenames.ResourceName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.storage.control.v2.stub.StorageControlStub; +import com.google.storage.control.v2.stub.StorageControlStubSettings; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: StorageControl service includes selected control plane operations. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+ *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+ *   Folder folder = Folder.newBuilder().build();
+ *   String folderId = "folderId294109737";
+ *   Folder response = storageControlClient.createFolder(parent, folder, folderId);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the StorageControlClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateFolder

Creates a new folder. This operation is only applicable to a hierarchical namespace enabled bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createFolder(CreateFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createFolder(BucketName parent, Folder folder, String folderId) + *

  • createFolder(String parent, Folder folder, String folderId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createFolderCallable() + *

+ *

DeleteFolder

Permanently deletes an empty folder. This operation is only applicable to a hierarchical namespace enabled bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteFolder(DeleteFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteFolder(FolderName name) + *

  • deleteFolder(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteFolderCallable() + *

+ *

GetFolder

Returns metadata for the specified folder. This operation is only applicable to a hierarchical namespace enabled bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getFolder(GetFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getFolder(FolderName name) + *

  • getFolder(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getFolderCallable() + *

+ *

ListFolders

Retrieves a list of folders. This operation is only applicable to a hierarchical namespace enabled bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listFolders(ListFoldersRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listFolders(BucketName parent) + *

  • listFolders(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listFoldersPagedCallable() + *

  • listFoldersCallable() + *

+ *

RenameFolder

Renames a source folder to a destination folder. This operation is only applicable to a hierarchical namespace enabled bucket. During a rename, the source and destination folders are locked until the long running operation completes.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • renameFolderAsync(RenameFolderRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • renameFolderAsync(FolderName name, String destinationFolderId) + *

  • renameFolderAsync(String name, String destinationFolderId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • renameFolderOperationCallable() + *

  • renameFolderCallable() + *

+ *

DeleteFolderRecursive

Deletes a folder recursively. This operation is only applicable to a hierarchical namespace enabled bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteFolderRecursiveAsync(DeleteFolderRecursiveRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • deleteFolderRecursiveAsync(FolderName name) + *

  • deleteFolderRecursiveAsync(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteFolderRecursiveOperationCallable() + *

  • deleteFolderRecursiveCallable() + *

+ *

GetStorageLayout

Returns the storage layout configuration for a given bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getStorageLayout(GetStorageLayoutRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getStorageLayout(StorageLayoutName name) + *

  • getStorageLayout(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getStorageLayoutCallable() + *

+ *

CreateManagedFolder

Creates a new managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createManagedFolder(CreateManagedFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createManagedFolder(BucketName parent, ManagedFolder managedFolder, String managedFolderId) + *

  • createManagedFolder(String parent, ManagedFolder managedFolder, String managedFolderId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createManagedFolderCallable() + *

+ *

DeleteManagedFolder

Permanently deletes an empty managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteManagedFolder(DeleteManagedFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteManagedFolder(ManagedFolderName name) + *

  • deleteManagedFolder(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteManagedFolderCallable() + *

+ *

GetManagedFolder

Returns metadata for the specified managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getManagedFolder(GetManagedFolderRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getManagedFolder(ManagedFolderName name) + *

  • getManagedFolder(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getManagedFolderCallable() + *

+ *

ListManagedFolders

Retrieves a list of managed folders for a given bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listManagedFolders(ListManagedFoldersRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listManagedFolders(BucketName parent) + *

  • listManagedFolders(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listManagedFoldersPagedCallable() + *

  • listManagedFoldersCallable() + *

+ *

CreateAnywhereCache

Creates an Anywhere Cache instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createAnywhereCacheAsync(CreateAnywhereCacheRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createAnywhereCacheAsync(BucketName parent, AnywhereCache anywhereCache) + *

  • createAnywhereCacheAsync(String parent, AnywhereCache anywhereCache) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createAnywhereCacheOperationCallable() + *

  • createAnywhereCacheCallable() + *

+ *

UpdateAnywhereCache

Updates an Anywhere Cache instance. Mutable fields include `ttl` and `admission_policy`.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateAnywhereCacheAsync(UpdateAnywhereCacheRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateAnywhereCacheAsync(AnywhereCache anywhereCache, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateAnywhereCacheOperationCallable() + *

  • updateAnywhereCacheCallable() + *

+ *

DisableAnywhereCache

Disables an Anywhere Cache instance. A disabled instance is read-only. The disablement could be revoked by calling ResumeAnywhereCache. The cache instance will be deleted automatically if it remains in the disabled state for at least one hour.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • disableAnywhereCache(DisableAnywhereCacheRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • disableAnywhereCache(AnywhereCacheName name) + *

  • disableAnywhereCache(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • disableAnywhereCacheCallable() + *

+ *

PauseAnywhereCache

Pauses an Anywhere Cache instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • pauseAnywhereCache(PauseAnywhereCacheRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • pauseAnywhereCache(AnywhereCacheName name) + *

  • pauseAnywhereCache(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • pauseAnywhereCacheCallable() + *

+ *

ResumeAnywhereCache

Resumes a disabled or paused Anywhere Cache instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • resumeAnywhereCache(ResumeAnywhereCacheRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • resumeAnywhereCache(AnywhereCacheName name) + *

  • resumeAnywhereCache(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • resumeAnywhereCacheCallable() + *

+ *

GetAnywhereCache

Gets an Anywhere Cache instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getAnywhereCache(GetAnywhereCacheRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getAnywhereCache(AnywhereCacheName name) + *

  • getAnywhereCache(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getAnywhereCacheCallable() + *

+ *

ListAnywhereCaches

Lists Anywhere Cache instances for a given bucket.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listAnywhereCaches(ListAnywhereCachesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listAnywhereCaches(BucketName parent) + *

  • listAnywhereCaches(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listAnywhereCachesPagedCallable() + *

  • listAnywhereCachesCallable() + *

+ *

GetProjectIntelligenceConfig

Returns the Project scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getProjectIntelligenceConfig(GetProjectIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getProjectIntelligenceConfig(IntelligenceConfigName name) + *

  • getProjectIntelligenceConfig(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getProjectIntelligenceConfigCallable() + *

+ *

UpdateProjectIntelligenceConfig

Updates the Project scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateProjectIntelligenceConfig(UpdateProjectIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateProjectIntelligenceConfig(IntelligenceConfig intelligenceConfig, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateProjectIntelligenceConfigCallable() + *

+ *

GetFolderIntelligenceConfig

Returns the Folder scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getFolderIntelligenceConfig(GetFolderIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getFolderIntelligenceConfig(IntelligenceConfigName name) + *

  • getFolderIntelligenceConfig(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getFolderIntelligenceConfigCallable() + *

+ *

UpdateFolderIntelligenceConfig

Updates the Folder scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateFolderIntelligenceConfig(UpdateFolderIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateFolderIntelligenceConfig(IntelligenceConfig intelligenceConfig, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateFolderIntelligenceConfigCallable() + *

+ *

GetOrganizationIntelligenceConfig

Returns the Organization scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getOrganizationIntelligenceConfig(GetOrganizationIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getOrganizationIntelligenceConfig(IntelligenceConfigName name) + *

  • getOrganizationIntelligenceConfig(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getOrganizationIntelligenceConfigCallable() + *

+ *

UpdateOrganizationIntelligenceConfig

Updates the Organization scoped singleton IntelligenceConfig resource.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateOrganizationIntelligenceConfig(UpdateOrganizationIntelligenceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateOrganizationIntelligenceConfig(IntelligenceConfig intelligenceConfig, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateOrganizationIntelligenceConfigCallable() + *

+ *

GetIamPolicy

Gets the IAM policy for a specified bucket. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getIamPolicy(GetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getIamPolicy(ResourceName resource) + *

  • getIamPolicy(String resource) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getIamPolicyCallable() + *

+ *

SetIamPolicy

Updates an IAM policy for the specified bucket. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • setIamPolicy(SetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • setIamPolicy(ResourceName resource, Policy policy) + *

  • setIamPolicy(String resource, Policy policy) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • setIamPolicyCallable() + *

+ *

TestIamPermissions

Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, are held by the caller. The `resource` field in the request should be `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • testIamPermissions(TestIamPermissionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • testIamPermissions(ResourceName resource, List<String> permissions) + *

  • testIamPermissions(String resource, List<String> permissions) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • testIamPermissionsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of StorageControlSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlSettings storageControlSettings =
+ *     StorageControlSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * StorageControlClient storageControlClient = StorageControlClient.create(storageControlSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlSettings storageControlSettings =
+ *     StorageControlSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * StorageControlClient storageControlClient = StorageControlClient.create(storageControlSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlSettings storageControlSettings =
+ *     StorageControlSettings.newHttpJsonBuilder().build();
+ * StorageControlClient storageControlClient = StorageControlClient.create(storageControlSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class StorageControlClient implements BackgroundResource { + private final StorageControlSettings settings; + private final StorageControlStub stub; + private final OperationsClient httpJsonOperationsClient; + private final com.google.longrunning.OperationsClient operationsClient; + + /** Constructs an instance of StorageControlClient with default settings. */ + public static final StorageControlClient create() throws IOException { + return create(StorageControlSettings.newBuilder().build()); + } + + /** + * Constructs an instance of StorageControlClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final StorageControlClient create(StorageControlSettings settings) + throws IOException { + return new StorageControlClient(settings); + } + + /** + * Constructs an instance of StorageControlClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(StorageControlSettings). + */ + public static final StorageControlClient create(StorageControlStub stub) { + return new StorageControlClient(stub); + } + + /** + * Constructs an instance of StorageControlClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected StorageControlClient(StorageControlSettings settings) throws IOException { + this.settings = settings; + this.stub = ((StorageControlStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + protected StorageControlClient(StorageControlStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + public final StorageControlSettings getSettings() { + return settings; + } + + public StorageControlStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + public final com.google.longrunning.OperationsClient getOperationsClient() { + return operationsClient; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi + public final OperationsClient getHttpJsonOperationsClient() { + return httpJsonOperationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new folder. This operation is only applicable to a hierarchical namespace enabled + * bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   Folder folder = Folder.newBuilder().build();
+   *   String folderId = "folderId294109737";
+   *   Folder response = storageControlClient.createFolder(parent, folder, folderId);
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which the folder will reside. The bucket must be + * a hierarchical namespace enabled bucket. + * @param folder Required. Properties of the new folder being created. The bucket and name of the + * folder are specified in the parent and folder_id fields, respectively. Populating those + * fields in `folder` will result in an error. + * @param folderId Required. The full name of a folder, including all its parent folders. Folders + * use single '/' characters as a delimiter. The folder_id must end with a slash. For example, + * the folder_id of "books/biographies/" would create a new "biographies/" folder under the + * "books/" folder. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder createFolder(BucketName parent, Folder folder, String folderId) { + CreateFolderRequest request = + CreateFolderRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setFolder(folder) + .setFolderId(folderId) + .build(); + return createFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new folder. This operation is only applicable to a hierarchical namespace enabled + * bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   Folder folder = Folder.newBuilder().build();
+   *   String folderId = "folderId294109737";
+   *   Folder response = storageControlClient.createFolder(parent, folder, folderId);
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which the folder will reside. The bucket must be + * a hierarchical namespace enabled bucket. + * @param folder Required. Properties of the new folder being created. The bucket and name of the + * folder are specified in the parent and folder_id fields, respectively. Populating those + * fields in `folder` will result in an error. + * @param folderId Required. The full name of a folder, including all its parent folders. Folders + * use single '/' characters as a delimiter. The folder_id must end with a slash. For example, + * the folder_id of "books/biographies/" would create a new "biographies/" folder under the + * "books/" folder. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder createFolder(String parent, Folder folder, String folderId) { + CreateFolderRequest request = + CreateFolderRequest.newBuilder() + .setParent(parent) + .setFolder(folder) + .setFolderId(folderId) + .build(); + return createFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new folder. This operation is only applicable to a hierarchical namespace enabled + * bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateFolderRequest request =
+   *       CreateFolderRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setFolder(Folder.newBuilder().build())
+   *           .setFolderId("folderId294109737")
+   *           .setRecursive(true)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Folder response = storageControlClient.createFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder createFolder(CreateFolderRequest request) { + return createFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new folder. This operation is only applicable to a hierarchical namespace enabled + * bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateFolderRequest request =
+   *       CreateFolderRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setFolder(Folder.newBuilder().build())
+   *           .setFolderId("folderId294109737")
+   *           .setRecursive(true)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future = storageControlClient.createFolderCallable().futureCall(request);
+   *   // Do something.
+   *   Folder response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createFolderCallable() { + return stub.createFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]");
+   *   storageControlClient.deleteFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the folder. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteFolder(FolderName name) { + DeleteFolderRequest request = + DeleteFolderRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString();
+   *   storageControlClient.deleteFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the folder. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteFolder(String name) { + DeleteFolderRequest request = DeleteFolderRequest.newBuilder().setName(name).build(); + deleteFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteFolderRequest request =
+   *       DeleteFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   storageControlClient.deleteFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteFolder(DeleteFolderRequest request) { + deleteFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteFolderRequest request =
+   *       DeleteFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future = storageControlClient.deleteFolderCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteFolderCallable() { + return stub.deleteFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]");
+   *   Folder response = storageControlClient.getFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the folder. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder getFolder(FolderName name) { + GetFolderRequest request = + GetFolderRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString();
+   *   Folder response = storageControlClient.getFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the folder. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder getFolder(String name) { + GetFolderRequest request = GetFolderRequest.newBuilder().setName(name).build(); + return getFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetFolderRequest request =
+   *       GetFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Folder response = storageControlClient.getFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Folder getFolder(GetFolderRequest request) { + return getFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified folder. This operation is only applicable to a hierarchical + * namespace enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetFolderRequest request =
+   *       GetFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future = storageControlClient.getFolderCallable().futureCall(request);
+   *   // Do something.
+   *   Folder response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getFolderCallable() { + return stub.getFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of folders. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   for (Folder element : storageControlClient.listFolders(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which to look for folders. The bucket must be a + * hierarchical namespace enabled bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListFoldersPagedResponse listFolders(BucketName parent) { + ListFoldersRequest request = + ListFoldersRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listFolders(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of folders. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   for (Folder element : storageControlClient.listFolders(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket in which to look for folders. The bucket must be a + * hierarchical namespace enabled bucket. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListFoldersPagedResponse listFolders(String parent) { + ListFoldersRequest request = ListFoldersRequest.newBuilder().setParent(parent).build(); + return listFolders(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of folders. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListFoldersRequest request =
+   *       ListFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   for (Folder element : storageControlClient.listFolders(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListFoldersPagedResponse listFolders(ListFoldersRequest request) { + return listFoldersPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of folders. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListFoldersRequest request =
+   *       ListFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.listFoldersPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Folder element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listFoldersPagedCallable() { + return stub.listFoldersPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of folders. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListFoldersRequest request =
+   *       ListFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setDelimiter("delimiter-250518009")
+   *           .setLexicographicStart("lexicographicStart-2093413008")
+   *           .setLexicographicEnd("lexicographicEnd1646968169")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   while (true) {
+   *     ListFoldersResponse response = storageControlClient.listFoldersCallable().call(request);
+   *     for (Folder element : response.getFoldersList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listFoldersCallable() { + return stub.listFoldersCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Renames a source folder to a destination folder. This operation is only applicable to a + * hierarchical namespace enabled bucket. During a rename, the source and destination folders are + * locked until the long running operation completes. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]");
+   *   String destinationFolderId = "destinationFolderId-480084905";
+   *   Folder response = storageControlClient.renameFolderAsync(name, destinationFolderId).get();
+   * }
+   * }
+ * + * @param name Required. Name of the source folder being renamed. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @param destinationFolderId Required. The destination folder ID, e.g. `foo/bar/`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture renameFolderAsync( + FolderName name, String destinationFolderId) { + RenameFolderRequest request = + RenameFolderRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .setDestinationFolderId(destinationFolderId) + .build(); + return renameFolderAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Renames a source folder to a destination folder. This operation is only applicable to a + * hierarchical namespace enabled bucket. During a rename, the source and destination folders are + * locked until the long running operation completes. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString();
+   *   String destinationFolderId = "destinationFolderId-480084905";
+   *   Folder response = storageControlClient.renameFolderAsync(name, destinationFolderId).get();
+   * }
+   * }
+ * + * @param name Required. Name of the source folder being renamed. Format: + * `projects/{project}/buckets/{bucket}/folders/{folder}` + * @param destinationFolderId Required. The destination folder ID, e.g. `foo/bar/`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture renameFolderAsync( + String name, String destinationFolderId) { + RenameFolderRequest request = + RenameFolderRequest.newBuilder() + .setName(name) + .setDestinationFolderId(destinationFolderId) + .build(); + return renameFolderAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Renames a source folder to a destination folder. This operation is only applicable to a + * hierarchical namespace enabled bucket. During a rename, the source and destination folders are + * locked until the long running operation completes. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   RenameFolderRequest request =
+   *       RenameFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setDestinationFolderId("destinationFolderId-480084905")
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Folder response = storageControlClient.renameFolderAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture renameFolderAsync( + RenameFolderRequest request) { + return renameFolderOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Renames a source folder to a destination folder. This operation is only applicable to a + * hierarchical namespace enabled bucket. During a rename, the source and destination folders are + * locked until the long running operation completes. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   RenameFolderRequest request =
+   *       RenameFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setDestinationFolderId("destinationFolderId-480084905")
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       storageControlClient.renameFolderOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Folder response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + renameFolderOperationCallable() { + return stub.renameFolderOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Renames a source folder to a destination folder. This operation is only applicable to a + * hierarchical namespace enabled bucket. During a rename, the source and destination folders are + * locked until the long running operation completes. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   RenameFolderRequest request =
+   *       RenameFolderRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setDestinationFolderId("destinationFolderId-480084905")
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future = storageControlClient.renameFolderCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable renameFolderCallable() { + return stub.renameFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a folder recursively. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]");
+   *   storageControlClient.deleteFolderRecursiveAsync(name).get();
+   * }
+   * }
+ * + * @param name Required. Name of the folder being deleted, however all of its contents will be + * deleted too. Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteFolderRecursiveAsync( + FolderName name) { + DeleteFolderRecursiveRequest request = + DeleteFolderRecursiveRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return deleteFolderRecursiveAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a folder recursively. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString();
+   *   storageControlClient.deleteFolderRecursiveAsync(name).get();
+   * }
+   * }
+ * + * @param name Required. Name of the folder being deleted, however all of its contents will be + * deleted too. Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteFolderRecursiveAsync( + String name) { + DeleteFolderRecursiveRequest request = + DeleteFolderRecursiveRequest.newBuilder().setName(name).build(); + return deleteFolderRecursiveAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a folder recursively. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteFolderRecursiveRequest request =
+   *       DeleteFolderRecursiveRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   storageControlClient.deleteFolderRecursiveAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteFolderRecursiveAsync( + DeleteFolderRecursiveRequest request) { + return deleteFolderRecursiveOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a folder recursively. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteFolderRecursiveRequest request =
+   *       DeleteFolderRecursiveRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       storageControlClient.deleteFolderRecursiveOperationCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final OperationCallable + deleteFolderRecursiveOperationCallable() { + return stub.deleteFolderRecursiveOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a folder recursively. This operation is only applicable to a hierarchical namespace + * enabled bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteFolderRecursiveRequest request =
+   *       DeleteFolderRecursiveRequest.newBuilder()
+   *           .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.deleteFolderRecursiveCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + deleteFolderRecursiveCallable() { + return stub.deleteFolderRecursiveCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the storage layout configuration for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   StorageLayoutName name = StorageLayoutName.of("[PROJECT]", "[BUCKET]");
+   *   StorageLayout response = storageControlClient.getStorageLayout(name);
+   * }
+   * }
+ * + * @param name Required. The name of the StorageLayout resource. Format: + * `projects/{project}/buckets/{bucket}/storageLayout` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StorageLayout getStorageLayout(StorageLayoutName name) { + GetStorageLayoutRequest request = + GetStorageLayoutRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getStorageLayout(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the storage layout configuration for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = StorageLayoutName.of("[PROJECT]", "[BUCKET]").toString();
+   *   StorageLayout response = storageControlClient.getStorageLayout(name);
+   * }
+   * }
+ * + * @param name Required. The name of the StorageLayout resource. Format: + * `projects/{project}/buckets/{bucket}/storageLayout` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StorageLayout getStorageLayout(String name) { + GetStorageLayoutRequest request = GetStorageLayoutRequest.newBuilder().setName(name).build(); + return getStorageLayout(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the storage layout configuration for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetStorageLayoutRequest request =
+   *       GetStorageLayoutRequest.newBuilder()
+   *           .setName(StorageLayoutName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPrefix("prefix-980110702")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   StorageLayout response = storageControlClient.getStorageLayout(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StorageLayout getStorageLayout(GetStorageLayoutRequest request) { + return getStorageLayoutCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the storage layout configuration for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetStorageLayoutRequest request =
+   *       GetStorageLayoutRequest.newBuilder()
+   *           .setName(StorageLayoutName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPrefix("prefix-980110702")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getStorageLayoutCallable().futureCall(request);
+   *   // Do something.
+   *   StorageLayout response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getStorageLayoutCallable() { + return stub.getStorageLayoutCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   ManagedFolder managedFolder = ManagedFolder.newBuilder().build();
+   *   String managedFolderId = "managedFolderId-2027084056";
+   *   ManagedFolder response =
+   *       storageControlClient.createManagedFolder(parent, managedFolder, managedFolderId);
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket this managed folder belongs to. + * @param managedFolder Required. Properties of the managed folder being created. The bucket and + * managed folder names are specified in the `parent` and `managed_folder_id` fields. + * Populating these fields in `managed_folder` will result in an error. + * @param managedFolderId Required. The name of the managed folder. It uses a single `/` as + * delimiter and leading and trailing `/` are allowed. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder createManagedFolder( + BucketName parent, ManagedFolder managedFolder, String managedFolderId) { + CreateManagedFolderRequest request = + CreateManagedFolderRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setManagedFolder(managedFolder) + .setManagedFolderId(managedFolderId) + .build(); + return createManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   ManagedFolder managedFolder = ManagedFolder.newBuilder().build();
+   *   String managedFolderId = "managedFolderId-2027084056";
+   *   ManagedFolder response =
+   *       storageControlClient.createManagedFolder(parent, managedFolder, managedFolderId);
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket this managed folder belongs to. + * @param managedFolder Required. Properties of the managed folder being created. The bucket and + * managed folder names are specified in the `parent` and `managed_folder_id` fields. + * Populating these fields in `managed_folder` will result in an error. + * @param managedFolderId Required. The name of the managed folder. It uses a single `/` as + * delimiter and leading and trailing `/` are allowed. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder createManagedFolder( + String parent, ManagedFolder managedFolder, String managedFolderId) { + CreateManagedFolderRequest request = + CreateManagedFolderRequest.newBuilder() + .setParent(parent) + .setManagedFolder(managedFolder) + .setManagedFolderId(managedFolderId) + .build(); + return createManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateManagedFolderRequest request =
+   *       CreateManagedFolderRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setManagedFolder(ManagedFolder.newBuilder().build())
+   *           .setManagedFolderId("managedFolderId-2027084056")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ManagedFolder response = storageControlClient.createManagedFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder createManagedFolder(CreateManagedFolderRequest request) { + return createManagedFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateManagedFolderRequest request =
+   *       CreateManagedFolderRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setManagedFolder(ManagedFolder.newBuilder().build())
+   *           .setManagedFolderId("managedFolderId-2027084056")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.createManagedFolderCallable().futureCall(request);
+   *   // Do something.
+   *   ManagedFolder response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createManagedFolderCallable() { + return stub.createManagedFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]");
+   *   storageControlClient.deleteManagedFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the managed folder. Format: + * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteManagedFolder(ManagedFolderName name) { + DeleteManagedFolderRequest request = + DeleteManagedFolderRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString();
+   *   storageControlClient.deleteManagedFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the managed folder. Format: + * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteManagedFolder(String name) { + DeleteManagedFolderRequest request = + DeleteManagedFolderRequest.newBuilder().setName(name).build(); + deleteManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteManagedFolderRequest request =
+   *       DeleteManagedFolderRequest.newBuilder()
+   *           .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setAllowNonEmpty(true)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   storageControlClient.deleteManagedFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteManagedFolder(DeleteManagedFolderRequest request) { + deleteManagedFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Permanently deletes an empty managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DeleteManagedFolderRequest request =
+   *       DeleteManagedFolderRequest.newBuilder()
+   *           .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setAllowNonEmpty(true)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.deleteManagedFolderCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteManagedFolderCallable() { + return stub.deleteManagedFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]");
+   *   ManagedFolder response = storageControlClient.getManagedFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the managed folder. Format: + * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder getManagedFolder(ManagedFolderName name) { + GetManagedFolderRequest request = + GetManagedFolderRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString();
+   *   ManagedFolder response = storageControlClient.getManagedFolder(name);
+   * }
+   * }
+ * + * @param name Required. Name of the managed folder. Format: + * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder getManagedFolder(String name) { + GetManagedFolderRequest request = GetManagedFolderRequest.newBuilder().setName(name).build(); + return getManagedFolder(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetManagedFolderRequest request =
+   *       GetManagedFolderRequest.newBuilder()
+   *           .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ManagedFolder response = storageControlClient.getManagedFolder(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ManagedFolder getManagedFolder(GetManagedFolderRequest request) { + return getManagedFolderCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns metadata for the specified managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetManagedFolderRequest request =
+   *       GetManagedFolderRequest.newBuilder()
+   *           .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString())
+   *           .setIfMetagenerationMatch(1043427781)
+   *           .setIfMetagenerationNotMatch(1025430873)
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getManagedFolderCallable().futureCall(request);
+   *   // Do something.
+   *   ManagedFolder response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getManagedFolderCallable() { + return stub.getManagedFolderCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of managed folders for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   for (ManagedFolder element : storageControlClient.listManagedFolders(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket this managed folder belongs to. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListManagedFoldersPagedResponse listManagedFolders(BucketName parent) { + ListManagedFoldersRequest request = + ListManagedFoldersRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listManagedFolders(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of managed folders for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   for (ManagedFolder element : storageControlClient.listManagedFolders(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Name of the bucket this managed folder belongs to. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListManagedFoldersPagedResponse listManagedFolders(String parent) { + ListManagedFoldersRequest request = + ListManagedFoldersRequest.newBuilder().setParent(parent).build(); + return listManagedFolders(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of managed folders for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListManagedFoldersRequest request =
+   *       ListManagedFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   for (ManagedFolder element : storageControlClient.listManagedFolders(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListManagedFoldersPagedResponse listManagedFolders( + ListManagedFoldersRequest request) { + return listManagedFoldersPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of managed folders for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListManagedFoldersRequest request =
+   *       ListManagedFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.listManagedFoldersPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (ManagedFolder element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listManagedFoldersPagedCallable() { + return stub.listManagedFoldersPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Retrieves a list of managed folders for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListManagedFoldersRequest request =
+   *       ListManagedFoldersRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setPrefix("prefix-980110702")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   while (true) {
+   *     ListManagedFoldersResponse response =
+   *         storageControlClient.listManagedFoldersCallable().call(request);
+   *     for (ManagedFolder element : response.getManagedFoldersList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listManagedFoldersCallable() { + return stub.listManagedFoldersCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   AnywhereCache anywhereCache = AnywhereCache.newBuilder().build();
+   *   AnywhereCache response =
+   *       storageControlClient.createAnywhereCacheAsync(parent, anywhereCache).get();
+   * }
+   * }
+ * + * @param parent Required. The bucket to which this cache belongs. Format: + * `projects/{project}/buckets/{bucket}` + * @param anywhereCache Required. Properties of the Anywhere Cache instance being created. The + * parent bucket name is specified in the `parent` field. Server uses the default value of + * `ttl` or `admission_policy` if not specified in request. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createAnywhereCacheAsync( + BucketName parent, AnywhereCache anywhereCache) { + CreateAnywhereCacheRequest request = + CreateAnywhereCacheRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setAnywhereCache(anywhereCache) + .build(); + return createAnywhereCacheAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   AnywhereCache anywhereCache = AnywhereCache.newBuilder().build();
+   *   AnywhereCache response =
+   *       storageControlClient.createAnywhereCacheAsync(parent, anywhereCache).get();
+   * }
+   * }
+ * + * @param parent Required. The bucket to which this cache belongs. Format: + * `projects/{project}/buckets/{bucket}` + * @param anywhereCache Required. Properties of the Anywhere Cache instance being created. The + * parent bucket name is specified in the `parent` field. Server uses the default value of + * `ttl` or `admission_policy` if not specified in request. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createAnywhereCacheAsync( + String parent, AnywhereCache anywhereCache) { + CreateAnywhereCacheRequest request = + CreateAnywhereCacheRequest.newBuilder() + .setParent(parent) + .setAnywhereCache(anywhereCache) + .build(); + return createAnywhereCacheAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateAnywhereCacheRequest request =
+   *       CreateAnywhereCacheRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.createAnywhereCacheAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createAnywhereCacheAsync( + CreateAnywhereCacheRequest request) { + return createAnywhereCacheOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateAnywhereCacheRequest request =
+   *       CreateAnywhereCacheRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       storageControlClient.createAnywhereCacheOperationCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationCallable() { + return stub.createAnywhereCacheOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   CreateAnywhereCacheRequest request =
+   *       CreateAnywhereCacheRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.createAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createAnywhereCacheCallable() { + return stub.createAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an Anywhere Cache instance. Mutable fields include `ttl` and `admission_policy`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   AnywhereCache anywhereCache = AnywhereCache.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   AnywhereCache response =
+   *       storageControlClient.updateAnywhereCacheAsync(anywhereCache, updateMask).get();
+   * }
+   * }
+ * + * @param anywhereCache Required. The Anywhere Cache instance to be updated. + * @param updateMask Required. List of fields to be updated. Mutable fields of AnywhereCache + * include `ttl` and `admission_policy`. + *

To specify ALL fields, specify a single field with the value `*`. Note: We recommend + * against doing this. If a new field is introduced at a later time, an older client updating + * with the `*` may accidentally reset the new field's value. + *

Not specifying any fields is an error. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateAnywhereCacheAsync( + AnywhereCache anywhereCache, FieldMask updateMask) { + UpdateAnywhereCacheRequest request = + UpdateAnywhereCacheRequest.newBuilder() + .setAnywhereCache(anywhereCache) + .setUpdateMask(updateMask) + .build(); + return updateAnywhereCacheAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an Anywhere Cache instance. Mutable fields include `ttl` and `admission_policy`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateAnywhereCacheRequest request =
+   *       UpdateAnywhereCacheRequest.newBuilder()
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.updateAnywhereCacheAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateAnywhereCacheAsync( + UpdateAnywhereCacheRequest request) { + return updateAnywhereCacheOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an Anywhere Cache instance. Mutable fields include `ttl` and `admission_policy`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateAnywhereCacheRequest request =
+   *       UpdateAnywhereCacheRequest.newBuilder()
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       storageControlClient.updateAnywhereCacheOperationCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationCallable() { + return stub.updateAnywhereCacheOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an Anywhere Cache instance. Mutable fields include `ttl` and `admission_policy`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateAnywhereCacheRequest request =
+   *       UpdateAnywhereCacheRequest.newBuilder()
+   *           .setAnywhereCache(AnywhereCache.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.updateAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateAnywhereCacheCallable() { + return stub.updateAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Disables an Anywhere Cache instance. A disabled instance is read-only. The disablement could be + * revoked by calling ResumeAnywhereCache. The cache instance will be deleted automatically if it + * remains in the disabled state for at least one hour. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   AnywhereCache response = storageControlClient.disableAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache disableAnywhereCache(AnywhereCacheName name) { + DisableAnywhereCacheRequest request = + DisableAnywhereCacheRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return disableAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Disables an Anywhere Cache instance. A disabled instance is read-only. The disablement could be + * revoked by calling ResumeAnywhereCache. The cache instance will be deleted automatically if it + * remains in the disabled state for at least one hour. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   AnywhereCache response = storageControlClient.disableAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache disableAnywhereCache(String name) { + DisableAnywhereCacheRequest request = + DisableAnywhereCacheRequest.newBuilder().setName(name).build(); + return disableAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Disables an Anywhere Cache instance. A disabled instance is read-only. The disablement could be + * revoked by calling ResumeAnywhereCache. The cache instance will be deleted automatically if it + * remains in the disabled state for at least one hour. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DisableAnywhereCacheRequest request =
+   *       DisableAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.disableAnywhereCache(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache disableAnywhereCache(DisableAnywhereCacheRequest request) { + return disableAnywhereCacheCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Disables an Anywhere Cache instance. A disabled instance is read-only. The disablement could be + * revoked by calling ResumeAnywhereCache. The cache instance will be deleted automatically if it + * remains in the disabled state for at least one hour. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   DisableAnywhereCacheRequest request =
+   *       DisableAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.disableAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + disableAnywhereCacheCallable() { + return stub.disableAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   AnywhereCache response = storageControlClient.pauseAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache pauseAnywhereCache(AnywhereCacheName name) { + PauseAnywhereCacheRequest request = + PauseAnywhereCacheRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return pauseAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   AnywhereCache response = storageControlClient.pauseAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache pauseAnywhereCache(String name) { + PauseAnywhereCacheRequest request = + PauseAnywhereCacheRequest.newBuilder().setName(name).build(); + return pauseAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   PauseAnywhereCacheRequest request =
+   *       PauseAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.pauseAnywhereCache(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache pauseAnywhereCache(PauseAnywhereCacheRequest request) { + return pauseAnywhereCacheCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   PauseAnywhereCacheRequest request =
+   *       PauseAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.pauseAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + pauseAnywhereCacheCallable() { + return stub.pauseAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes a disabled or paused Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   AnywhereCache response = storageControlClient.resumeAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache resumeAnywhereCache(AnywhereCacheName name) { + ResumeAnywhereCacheRequest request = + ResumeAnywhereCacheRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return resumeAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes a disabled or paused Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   AnywhereCache response = storageControlClient.resumeAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache resumeAnywhereCache(String name) { + ResumeAnywhereCacheRequest request = + ResumeAnywhereCacheRequest.newBuilder().setName(name).build(); + return resumeAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes a disabled or paused Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ResumeAnywhereCacheRequest request =
+   *       ResumeAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.resumeAnywhereCache(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache resumeAnywhereCache(ResumeAnywhereCacheRequest request) { + return resumeAnywhereCacheCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes a disabled or paused Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ResumeAnywhereCacheRequest request =
+   *       ResumeAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.resumeAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + resumeAnywhereCacheCallable() { + return stub.resumeAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   AnywhereCache response = storageControlClient.getAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache getAnywhereCache(AnywhereCacheName name) { + GetAnywhereCacheRequest request = + GetAnywhereCacheRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   AnywhereCache response = storageControlClient.getAnywhereCache(name);
+   * }
+   * }
+ * + * @param name Required. The name field in the request should be: + * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache getAnywhereCache(String name) { + GetAnywhereCacheRequest request = GetAnywhereCacheRequest.newBuilder().setName(name).build(); + return getAnywhereCache(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetAnywhereCacheRequest request =
+   *       GetAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   AnywhereCache response = storageControlClient.getAnywhereCache(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AnywhereCache getAnywhereCache(GetAnywhereCacheRequest request) { + return getAnywhereCacheCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets an Anywhere Cache instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetAnywhereCacheRequest request =
+   *       GetAnywhereCacheRequest.newBuilder()
+   *           .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getAnywhereCacheCallable().futureCall(request);
+   *   // Do something.
+   *   AnywhereCache response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getAnywhereCacheCallable() { + return stub.getAnywhereCacheCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Anywhere Cache instances for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+   *   for (AnywhereCache element : storageControlClient.listAnywhereCaches(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The bucket to which this cache belongs. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListAnywhereCachesPagedResponse listAnywhereCaches(BucketName parent) { + ListAnywhereCachesRequest request = + ListAnywhereCachesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listAnywhereCaches(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Anywhere Cache instances for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String parent = BucketName.of("[PROJECT]", "[BUCKET]").toString();
+   *   for (AnywhereCache element : storageControlClient.listAnywhereCaches(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The bucket to which this cache belongs. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListAnywhereCachesPagedResponse listAnywhereCaches(String parent) { + ListAnywhereCachesRequest request = + ListAnywhereCachesRequest.newBuilder().setParent(parent).build(); + return listAnywhereCaches(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Anywhere Cache instances for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListAnywhereCachesRequest request =
+   *       ListAnywhereCachesRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   for (AnywhereCache element : storageControlClient.listAnywhereCaches(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListAnywhereCachesPagedResponse listAnywhereCaches( + ListAnywhereCachesRequest request) { + return listAnywhereCachesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Anywhere Cache instances for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListAnywhereCachesRequest request =
+   *       ListAnywhereCachesRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.listAnywhereCachesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (AnywhereCache element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listAnywhereCachesPagedCallable() { + return stub.listAnywhereCachesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Anywhere Cache instances for a given bucket. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ListAnywhereCachesRequest request =
+   *       ListAnywhereCachesRequest.newBuilder()
+   *           .setParent(BucketName.of("[PROJECT]", "[BUCKET]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   while (true) {
+   *     ListAnywhereCachesResponse response =
+   *         storageControlClient.listAnywhereCachesCallable().call(request);
+   *     for (AnywhereCache element : response.getAnywhereCachesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listAnywhereCachesCallable() { + return stub.listAnywhereCachesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfigName name =
+   *       IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]");
+   *   IntelligenceConfig response = storageControlClient.getProjectIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * project. + *

Format: `projects/{id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getProjectIntelligenceConfig(IntelligenceConfigName name) { + GetProjectIntelligenceConfigRequest request = + GetProjectIntelligenceConfigRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getProjectIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name =
+   *       IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString();
+   *   IntelligenceConfig response = storageControlClient.getProjectIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * project. + *

Format: `projects/{id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getProjectIntelligenceConfig(String name) { + GetProjectIntelligenceConfigRequest request = + GetProjectIntelligenceConfigRequest.newBuilder().setName(name).build(); + return getProjectIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetProjectIntelligenceConfigRequest request =
+   *       GetProjectIntelligenceConfigRequest.newBuilder()
+   *           .setName(
+   *               IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]")
+   *                   .toString())
+   *           .build();
+   *   IntelligenceConfig response = storageControlClient.getProjectIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getProjectIntelligenceConfig( + GetProjectIntelligenceConfigRequest request) { + return getProjectIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetProjectIntelligenceConfigRequest request =
+   *       GetProjectIntelligenceConfigRequest.newBuilder()
+   *           .setName(
+   *               IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getProjectIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + getProjectIntelligenceConfigCallable() { + return stub.getProjectIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   IntelligenceConfig response =
+   *       storageControlClient.updateProjectIntelligenceConfig(intelligenceConfig, updateMask);
+   * }
+   * }
+ * + * @param intelligenceConfig Required. The `IntelligenceConfig` resource to be updated. + * @param updateMask Required. The `update_mask` that specifies the fields within the + * `IntelligenceConfig` resource that should be modified by this update. Only the listed + * fields are updated. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateProjectIntelligenceConfig( + IntelligenceConfig intelligenceConfig, FieldMask updateMask) { + UpdateProjectIntelligenceConfigRequest request = + UpdateProjectIntelligenceConfigRequest.newBuilder() + .setIntelligenceConfig(intelligenceConfig) + .setUpdateMask(updateMask) + .build(); + return updateProjectIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateProjectIntelligenceConfigRequest request =
+   *       UpdateProjectIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   IntelligenceConfig response = storageControlClient.updateProjectIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateProjectIntelligenceConfig( + UpdateProjectIntelligenceConfigRequest request) { + return updateProjectIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Project scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateProjectIntelligenceConfigRequest request =
+   *       UpdateProjectIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.updateProjectIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateProjectIntelligenceConfigCallable() { + return stub.updateProjectIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfigName name =
+   *       IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]");
+   *   IntelligenceConfig response = storageControlClient.getFolderIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * folder. + *

Format: `folders/{id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getFolderIntelligenceConfig(IntelligenceConfigName name) { + GetFolderIntelligenceConfigRequest request = + GetFolderIntelligenceConfigRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getFolderIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name =
+   *       IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString();
+   *   IntelligenceConfig response = storageControlClient.getFolderIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * folder. + *

Format: `folders/{id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getFolderIntelligenceConfig(String name) { + GetFolderIntelligenceConfigRequest request = + GetFolderIntelligenceConfigRequest.newBuilder().setName(name).build(); + return getFolderIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetFolderIntelligenceConfigRequest request =
+   *       GetFolderIntelligenceConfigRequest.newBuilder()
+   *           .setName(
+   *               IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString())
+   *           .build();
+   *   IntelligenceConfig response = storageControlClient.getFolderIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getFolderIntelligenceConfig( + GetFolderIntelligenceConfigRequest request) { + return getFolderIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetFolderIntelligenceConfigRequest request =
+   *       GetFolderIntelligenceConfigRequest.newBuilder()
+   *           .setName(
+   *               IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getFolderIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + getFolderIntelligenceConfigCallable() { + return stub.getFolderIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   IntelligenceConfig response =
+   *       storageControlClient.updateFolderIntelligenceConfig(intelligenceConfig, updateMask);
+   * }
+   * }
+ * + * @param intelligenceConfig Required. The `IntelligenceConfig` resource to be updated. + * @param updateMask Required. The `update_mask` that specifies the fields within the + * `IntelligenceConfig` resource that should be modified by this update. Only the listed + * fields are updated. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateFolderIntelligenceConfig( + IntelligenceConfig intelligenceConfig, FieldMask updateMask) { + UpdateFolderIntelligenceConfigRequest request = + UpdateFolderIntelligenceConfigRequest.newBuilder() + .setIntelligenceConfig(intelligenceConfig) + .setUpdateMask(updateMask) + .build(); + return updateFolderIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateFolderIntelligenceConfigRequest request =
+   *       UpdateFolderIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   IntelligenceConfig response = storageControlClient.updateFolderIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateFolderIntelligenceConfig( + UpdateFolderIntelligenceConfigRequest request) { + return updateFolderIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Folder scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateFolderIntelligenceConfigRequest request =
+   *       UpdateFolderIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.updateFolderIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateFolderIntelligenceConfigCallable() { + return stub.updateFolderIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfigName name = IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]");
+   *   IntelligenceConfig response = storageControlClient.getOrganizationIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * organization. + *

Format: `organizations/{org_id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getOrganizationIntelligenceConfig(IntelligenceConfigName name) { + GetOrganizationIntelligenceConfigRequest request = + GetOrganizationIntelligenceConfigRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getOrganizationIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String name =
+   *       IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString();
+   *   IntelligenceConfig response = storageControlClient.getOrganizationIntelligenceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the `IntelligenceConfig` resource associated with your + * organization. + *

Format: `organizations/{org_id}/locations/global/intelligenceConfig` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getOrganizationIntelligenceConfig(String name) { + GetOrganizationIntelligenceConfigRequest request = + GetOrganizationIntelligenceConfigRequest.newBuilder().setName(name).build(); + return getOrganizationIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetOrganizationIntelligenceConfigRequest request =
+   *       GetOrganizationIntelligenceConfigRequest.newBuilder()
+   *           .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString())
+   *           .build();
+   *   IntelligenceConfig response = storageControlClient.getOrganizationIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig getOrganizationIntelligenceConfig( + GetOrganizationIntelligenceConfigRequest request) { + return getOrganizationIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetOrganizationIntelligenceConfigRequest request =
+   *       GetOrganizationIntelligenceConfigRequest.newBuilder()
+   *           .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.getOrganizationIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + getOrganizationIntelligenceConfigCallable() { + return stub.getOrganizationIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   IntelligenceConfig response =
+   *       storageControlClient.updateOrganizationIntelligenceConfig(intelligenceConfig, updateMask);
+   * }
+   * }
+ * + * @param intelligenceConfig Required. The `IntelligenceConfig` resource to be updated. + * @param updateMask Required. The `update_mask` that specifies the fields within the + * `IntelligenceConfig` resource that should be modified by this update. Only the listed + * fields are updated. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateOrganizationIntelligenceConfig( + IntelligenceConfig intelligenceConfig, FieldMask updateMask) { + UpdateOrganizationIntelligenceConfigRequest request = + UpdateOrganizationIntelligenceConfigRequest.newBuilder() + .setIntelligenceConfig(intelligenceConfig) + .setUpdateMask(updateMask) + .build(); + return updateOrganizationIntelligenceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateOrganizationIntelligenceConfigRequest request =
+   *       UpdateOrganizationIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   IntelligenceConfig response =
+   *       storageControlClient.updateOrganizationIntelligenceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final IntelligenceConfig updateOrganizationIntelligenceConfig( + UpdateOrganizationIntelligenceConfigRequest request) { + return updateOrganizationIntelligenceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the Organization scoped singleton IntelligenceConfig resource. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   UpdateOrganizationIntelligenceConfigRequest request =
+   *       UpdateOrganizationIntelligenceConfigRequest.newBuilder()
+   *           .setIntelligenceConfig(IntelligenceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.updateOrganizationIntelligenceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   IntelligenceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateOrganizationIntelligenceConfigCallable() { + return stub.updateOrganizationIntelligenceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   Policy response = storageControlClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(ResourceName resource) { + GetIamPolicyRequest request = + GetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String resource =
+   *       AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   Policy response = storageControlClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(String resource) { + GetIamPolicyRequest request = GetIamPolicyRequest.newBuilder().setResource(resource).build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   Policy response = storageControlClient.getIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(GetIamPolicyRequest request) { + return getIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the IAM policy for a specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageControlClient.getIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getIamPolicyCallable() { + return stub.getIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = storageControlClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(ResourceName resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .setPolicy(policy) + .build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String resource =
+   *       AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = storageControlClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(String resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Policy response = storageControlClient.setIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(SetIamPolicyRequest request) { + return setIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an IAM policy for the specified bucket. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, or + * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = storageControlClient.setIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable setIamPolicyCallable() { + return stub.setIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]");
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       storageControlClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + ResourceName resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   String resource =
+   *       AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString();
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       storageControlClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + String resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   TestIamPermissionsResponse response = storageControlClient.testIamPermissions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions(TestIamPermissionsRequest request) { + return testIamPermissionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Tests a set of permissions on the given bucket, object, or managed folder to see which, if any, + * are held by the caller. The `resource` field in the request should be + * `projects/_/buckets/{bucket}` for a bucket, `projects/_/buckets/{bucket}/objects/{object}` for + * an object, or `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` for a managed + * folder. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(
+   *               AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       storageControlClient.testIamPermissionsCallable().futureCall(request);
+   *   // Do something.
+   *   TestIamPermissionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + testIamPermissionsCallable() { + return stub.testIamPermissionsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListFoldersPagedResponse + extends AbstractPagedListResponse< + ListFoldersRequest, + ListFoldersResponse, + Folder, + ListFoldersPage, + ListFoldersFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListFoldersPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, input -> new ListFoldersPagedResponse(input), MoreExecutors.directExecutor()); + } + + private ListFoldersPagedResponse(ListFoldersPage page) { + super(page, ListFoldersFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListFoldersPage + extends AbstractPage { + + private ListFoldersPage( + PageContext context, + ListFoldersResponse response) { + super(context, response); + } + + private static ListFoldersPage createEmptyPage() { + return new ListFoldersPage(null, null); + } + + @Override + protected ListFoldersPage createPage( + PageContext context, + ListFoldersResponse response) { + return new ListFoldersPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListFoldersFixedSizeCollection + extends AbstractFixedSizeCollection< + ListFoldersRequest, + ListFoldersResponse, + Folder, + ListFoldersPage, + ListFoldersFixedSizeCollection> { + + private ListFoldersFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListFoldersFixedSizeCollection createEmptyCollection() { + return new ListFoldersFixedSizeCollection(null, 0); + } + + @Override + protected ListFoldersFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListFoldersFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListManagedFoldersPagedResponse + extends AbstractPagedListResponse< + ListManagedFoldersRequest, + ListManagedFoldersResponse, + ManagedFolder, + ListManagedFoldersPage, + ListManagedFoldersFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListManagedFoldersPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListManagedFoldersPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListManagedFoldersPagedResponse(ListManagedFoldersPage page) { + super(page, ListManagedFoldersFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListManagedFoldersPage + extends AbstractPage< + ListManagedFoldersRequest, + ListManagedFoldersResponse, + ManagedFolder, + ListManagedFoldersPage> { + + private ListManagedFoldersPage( + PageContext context, + ListManagedFoldersResponse response) { + super(context, response); + } + + private static ListManagedFoldersPage createEmptyPage() { + return new ListManagedFoldersPage(null, null); + } + + @Override + protected ListManagedFoldersPage createPage( + PageContext context, + ListManagedFoldersResponse response) { + return new ListManagedFoldersPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListManagedFoldersFixedSizeCollection + extends AbstractFixedSizeCollection< + ListManagedFoldersRequest, + ListManagedFoldersResponse, + ManagedFolder, + ListManagedFoldersPage, + ListManagedFoldersFixedSizeCollection> { + + private ListManagedFoldersFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListManagedFoldersFixedSizeCollection createEmptyCollection() { + return new ListManagedFoldersFixedSizeCollection(null, 0); + } + + @Override + protected ListManagedFoldersFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListManagedFoldersFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListAnywhereCachesPagedResponse + extends AbstractPagedListResponse< + ListAnywhereCachesRequest, + ListAnywhereCachesResponse, + AnywhereCache, + ListAnywhereCachesPage, + ListAnywhereCachesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListAnywhereCachesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListAnywhereCachesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListAnywhereCachesPagedResponse(ListAnywhereCachesPage page) { + super(page, ListAnywhereCachesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListAnywhereCachesPage + extends AbstractPage< + ListAnywhereCachesRequest, + ListAnywhereCachesResponse, + AnywhereCache, + ListAnywhereCachesPage> { + + private ListAnywhereCachesPage( + PageContext context, + ListAnywhereCachesResponse response) { + super(context, response); + } + + private static ListAnywhereCachesPage createEmptyPage() { + return new ListAnywhereCachesPage(null, null); + } + + @Override + protected ListAnywhereCachesPage createPage( + PageContext context, + ListAnywhereCachesResponse response) { + return new ListAnywhereCachesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListAnywhereCachesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListAnywhereCachesRequest, + ListAnywhereCachesResponse, + AnywhereCache, + ListAnywhereCachesPage, + ListAnywhereCachesFixedSizeCollection> { + + private ListAnywhereCachesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListAnywhereCachesFixedSizeCollection createEmptyCollection() { + return new ListAnywhereCachesFixedSizeCollection(null, 0); + } + + @Override + protected ListAnywhereCachesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListAnywhereCachesFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlSettings.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlSettings.java new file mode 100644 index 000000000000..46f4dabb26a3 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/StorageControlSettings.java @@ -0,0 +1,617 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import static com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListFoldersPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListManagedFoldersPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.storage.control.v2.stub.StorageControlStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link StorageControlClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (storage.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createFolder: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlSettings.Builder storageControlSettingsBuilder =
+ *     StorageControlSettings.newBuilder();
+ * storageControlSettingsBuilder
+ *     .createFolderSettings()
+ *     .setRetrySettings(
+ *         storageControlSettingsBuilder
+ *             .createFolderSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * StorageControlSettings storageControlSettings = storageControlSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for renameFolder: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlSettings.Builder storageControlSettingsBuilder =
+ *     StorageControlSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * storageControlSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class StorageControlSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createFolder. */ + public UnaryCallSettings createFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).createFolderSettings(); + } + + /** Returns the object with the settings used for calls to deleteFolder. */ + public UnaryCallSettings deleteFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).deleteFolderSettings(); + } + + /** Returns the object with the settings used for calls to getFolder. */ + public UnaryCallSettings getFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).getFolderSettings(); + } + + /** Returns the object with the settings used for calls to listFolders. */ + public PagedCallSettings + listFoldersSettings() { + return ((StorageControlStubSettings) getStubSettings()).listFoldersSettings(); + } + + /** Returns the object with the settings used for calls to renameFolder. */ + public UnaryCallSettings renameFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).renameFolderSettings(); + } + + /** Returns the object with the settings used for calls to renameFolder. */ + public OperationCallSettings + renameFolderOperationSettings() { + return ((StorageControlStubSettings) getStubSettings()).renameFolderOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteFolderRecursive. */ + public UnaryCallSettings + deleteFolderRecursiveSettings() { + return ((StorageControlStubSettings) getStubSettings()).deleteFolderRecursiveSettings(); + } + + /** Returns the object with the settings used for calls to deleteFolderRecursive. */ + public OperationCallSettings + deleteFolderRecursiveOperationSettings() { + return ((StorageControlStubSettings) getStubSettings()) + .deleteFolderRecursiveOperationSettings(); + } + + /** Returns the object with the settings used for calls to getStorageLayout. */ + public UnaryCallSettings getStorageLayoutSettings() { + return ((StorageControlStubSettings) getStubSettings()).getStorageLayoutSettings(); + } + + /** Returns the object with the settings used for calls to createManagedFolder. */ + public UnaryCallSettings + createManagedFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).createManagedFolderSettings(); + } + + /** Returns the object with the settings used for calls to deleteManagedFolder. */ + public UnaryCallSettings deleteManagedFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).deleteManagedFolderSettings(); + } + + /** Returns the object with the settings used for calls to getManagedFolder. */ + public UnaryCallSettings getManagedFolderSettings() { + return ((StorageControlStubSettings) getStubSettings()).getManagedFolderSettings(); + } + + /** Returns the object with the settings used for calls to listManagedFolders. */ + public PagedCallSettings< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings() { + return ((StorageControlStubSettings) getStubSettings()).listManagedFoldersSettings(); + } + + /** Returns the object with the settings used for calls to createAnywhereCache. */ + public UnaryCallSettings createAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).createAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to createAnywhereCache. */ + public OperationCallSettings< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings() { + return ((StorageControlStubSettings) getStubSettings()).createAnywhereCacheOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateAnywhereCache. */ + public UnaryCallSettings updateAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).updateAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to updateAnywhereCache. */ + public OperationCallSettings< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings() { + return ((StorageControlStubSettings) getStubSettings()).updateAnywhereCacheOperationSettings(); + } + + /** Returns the object with the settings used for calls to disableAnywhereCache. */ + public UnaryCallSettings + disableAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).disableAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to pauseAnywhereCache. */ + public UnaryCallSettings pauseAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).pauseAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to resumeAnywhereCache. */ + public UnaryCallSettings + resumeAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).resumeAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to getAnywhereCache. */ + public UnaryCallSettings getAnywhereCacheSettings() { + return ((StorageControlStubSettings) getStubSettings()).getAnywhereCacheSettings(); + } + + /** Returns the object with the settings used for calls to listAnywhereCaches. */ + public PagedCallSettings< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings() { + return ((StorageControlStubSettings) getStubSettings()).listAnywhereCachesSettings(); + } + + /** Returns the object with the settings used for calls to getProjectIntelligenceConfig. */ + public UnaryCallSettings + getProjectIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()).getProjectIntelligenceConfigSettings(); + } + + /** Returns the object with the settings used for calls to updateProjectIntelligenceConfig. */ + public UnaryCallSettings + updateProjectIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()) + .updateProjectIntelligenceConfigSettings(); + } + + /** Returns the object with the settings used for calls to getFolderIntelligenceConfig. */ + public UnaryCallSettings + getFolderIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()).getFolderIntelligenceConfigSettings(); + } + + /** Returns the object with the settings used for calls to updateFolderIntelligenceConfig. */ + public UnaryCallSettings + updateFolderIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()) + .updateFolderIntelligenceConfigSettings(); + } + + /** Returns the object with the settings used for calls to getOrganizationIntelligenceConfig. */ + public UnaryCallSettings + getOrganizationIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()) + .getOrganizationIntelligenceConfigSettings(); + } + + /** + * Returns the object with the settings used for calls to updateOrganizationIntelligenceConfig. + */ + public UnaryCallSettings + updateOrganizationIntelligenceConfigSettings() { + return ((StorageControlStubSettings) getStubSettings()) + .updateOrganizationIntelligenceConfigSettings(); + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return ((StorageControlStubSettings) getStubSettings()).getIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return ((StorageControlStubSettings) getStubSettings()).setIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return ((StorageControlStubSettings) getStubSettings()).testIamPermissionsSettings(); + } + + public static final StorageControlSettings create(StorageControlStubSettings stub) + throws IOException { + return new StorageControlSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return StorageControlStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return StorageControlStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return StorageControlStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return StorageControlStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return StorageControlStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return StorageControlStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return StorageControlStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return StorageControlStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected StorageControlSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for StorageControlSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(StorageControlStubSettings.newBuilder(clientContext)); + } + + protected Builder(StorageControlSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(StorageControlStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(StorageControlStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(StorageControlStubSettings.newHttpJsonBuilder()); + } + + public StorageControlStubSettings.Builder getStubSettingsBuilder() { + return ((StorageControlStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createFolder. */ + public UnaryCallSettings.Builder createFolderSettings() { + return getStubSettingsBuilder().createFolderSettings(); + } + + /** Returns the builder for the settings used for calls to deleteFolder. */ + public UnaryCallSettings.Builder deleteFolderSettings() { + return getStubSettingsBuilder().deleteFolderSettings(); + } + + /** Returns the builder for the settings used for calls to getFolder. */ + public UnaryCallSettings.Builder getFolderSettings() { + return getStubSettingsBuilder().getFolderSettings(); + } + + /** Returns the builder for the settings used for calls to listFolders. */ + public PagedCallSettings.Builder< + ListFoldersRequest, ListFoldersResponse, ListFoldersPagedResponse> + listFoldersSettings() { + return getStubSettingsBuilder().listFoldersSettings(); + } + + /** Returns the builder for the settings used for calls to renameFolder. */ + public UnaryCallSettings.Builder renameFolderSettings() { + return getStubSettingsBuilder().renameFolderSettings(); + } + + /** Returns the builder for the settings used for calls to renameFolder. */ + public OperationCallSettings.Builder + renameFolderOperationSettings() { + return getStubSettingsBuilder().renameFolderOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteFolderRecursive. */ + public UnaryCallSettings.Builder + deleteFolderRecursiveSettings() { + return getStubSettingsBuilder().deleteFolderRecursiveSettings(); + } + + /** Returns the builder for the settings used for calls to deleteFolderRecursive. */ + public OperationCallSettings.Builder< + DeleteFolderRecursiveRequest, Empty, DeleteFolderRecursiveMetadata> + deleteFolderRecursiveOperationSettings() { + return getStubSettingsBuilder().deleteFolderRecursiveOperationSettings(); + } + + /** Returns the builder for the settings used for calls to getStorageLayout. */ + public UnaryCallSettings.Builder + getStorageLayoutSettings() { + return getStubSettingsBuilder().getStorageLayoutSettings(); + } + + /** Returns the builder for the settings used for calls to createManagedFolder. */ + public UnaryCallSettings.Builder + createManagedFolderSettings() { + return getStubSettingsBuilder().createManagedFolderSettings(); + } + + /** Returns the builder for the settings used for calls to deleteManagedFolder. */ + public UnaryCallSettings.Builder + deleteManagedFolderSettings() { + return getStubSettingsBuilder().deleteManagedFolderSettings(); + } + + /** Returns the builder for the settings used for calls to getManagedFolder. */ + public UnaryCallSettings.Builder + getManagedFolderSettings() { + return getStubSettingsBuilder().getManagedFolderSettings(); + } + + /** Returns the builder for the settings used for calls to listManagedFolders. */ + public PagedCallSettings.Builder< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings() { + return getStubSettingsBuilder().listManagedFoldersSettings(); + } + + /** Returns the builder for the settings used for calls to createAnywhereCache. */ + public UnaryCallSettings.Builder + createAnywhereCacheSettings() { + return getStubSettingsBuilder().createAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to createAnywhereCache. */ + public OperationCallSettings.Builder< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings() { + return getStubSettingsBuilder().createAnywhereCacheOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateAnywhereCache. */ + public UnaryCallSettings.Builder + updateAnywhereCacheSettings() { + return getStubSettingsBuilder().updateAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to updateAnywhereCache. */ + public OperationCallSettings.Builder< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings() { + return getStubSettingsBuilder().updateAnywhereCacheOperationSettings(); + } + + /** Returns the builder for the settings used for calls to disableAnywhereCache. */ + public UnaryCallSettings.Builder + disableAnywhereCacheSettings() { + return getStubSettingsBuilder().disableAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to pauseAnywhereCache. */ + public UnaryCallSettings.Builder + pauseAnywhereCacheSettings() { + return getStubSettingsBuilder().pauseAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to resumeAnywhereCache. */ + public UnaryCallSettings.Builder + resumeAnywhereCacheSettings() { + return getStubSettingsBuilder().resumeAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to getAnywhereCache. */ + public UnaryCallSettings.Builder + getAnywhereCacheSettings() { + return getStubSettingsBuilder().getAnywhereCacheSettings(); + } + + /** Returns the builder for the settings used for calls to listAnywhereCaches. */ + public PagedCallSettings.Builder< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings() { + return getStubSettingsBuilder().listAnywhereCachesSettings(); + } + + /** Returns the builder for the settings used for calls to getProjectIntelligenceConfig. */ + public UnaryCallSettings.Builder + getProjectIntelligenceConfigSettings() { + return getStubSettingsBuilder().getProjectIntelligenceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to updateProjectIntelligenceConfig. */ + public UnaryCallSettings.Builder + updateProjectIntelligenceConfigSettings() { + return getStubSettingsBuilder().updateProjectIntelligenceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to getFolderIntelligenceConfig. */ + public UnaryCallSettings.Builder + getFolderIntelligenceConfigSettings() { + return getStubSettingsBuilder().getFolderIntelligenceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to updateFolderIntelligenceConfig. */ + public UnaryCallSettings.Builder + updateFolderIntelligenceConfigSettings() { + return getStubSettingsBuilder().updateFolderIntelligenceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to getOrganizationIntelligenceConfig. */ + public UnaryCallSettings.Builder + getOrganizationIntelligenceConfigSettings() { + return getStubSettingsBuilder().getOrganizationIntelligenceConfigSettings(); + } + + /** + * Returns the builder for the settings used for calls to updateOrganizationIntelligenceConfig. + */ + public UnaryCallSettings.Builder< + UpdateOrganizationIntelligenceConfigRequest, IntelligenceConfig> + updateOrganizationIntelligenceConfigSettings() { + return getStubSettingsBuilder().updateOrganizationIntelligenceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getStubSettingsBuilder().getIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return getStubSettingsBuilder().setIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return getStubSettingsBuilder().testIamPermissionsSettings(); + } + + @Override + public StorageControlSettings build() throws IOException { + return new StorageControlSettings(this); + } + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/gapic_metadata.json b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/gapic_metadata.json new file mode 100644 index 000000000000..0ca951cf144f --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/gapic_metadata.json @@ -0,0 +1,99 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.storage.control.v2", + "libraryPackage": "com.google.storage.control.v2", + "services": { + "StorageControl": { + "clients": { + "grpc": { + "libraryClient": "StorageControlClient", + "rpcs": { + "CreateAnywhereCache": { + "methods": ["createAnywhereCacheAsync", "createAnywhereCacheAsync", "createAnywhereCacheAsync", "createAnywhereCacheOperationCallable", "createAnywhereCacheCallable"] + }, + "CreateFolder": { + "methods": ["createFolder", "createFolder", "createFolder", "createFolderCallable"] + }, + "CreateManagedFolder": { + "methods": ["createManagedFolder", "createManagedFolder", "createManagedFolder", "createManagedFolderCallable"] + }, + "DeleteFolder": { + "methods": ["deleteFolder", "deleteFolder", "deleteFolder", "deleteFolderCallable"] + }, + "DeleteFolderRecursive": { + "methods": ["deleteFolderRecursiveAsync", "deleteFolderRecursiveAsync", "deleteFolderRecursiveAsync", "deleteFolderRecursiveOperationCallable", "deleteFolderRecursiveCallable"] + }, + "DeleteManagedFolder": { + "methods": ["deleteManagedFolder", "deleteManagedFolder", "deleteManagedFolder", "deleteManagedFolderCallable"] + }, + "DisableAnywhereCache": { + "methods": ["disableAnywhereCache", "disableAnywhereCache", "disableAnywhereCache", "disableAnywhereCacheCallable"] + }, + "GetAnywhereCache": { + "methods": ["getAnywhereCache", "getAnywhereCache", "getAnywhereCache", "getAnywhereCacheCallable"] + }, + "GetFolder": { + "methods": ["getFolder", "getFolder", "getFolder", "getFolderCallable"] + }, + "GetFolderIntelligenceConfig": { + "methods": ["getFolderIntelligenceConfig", "getFolderIntelligenceConfig", "getFolderIntelligenceConfig", "getFolderIntelligenceConfigCallable"] + }, + "GetIamPolicy": { + "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] + }, + "GetManagedFolder": { + "methods": ["getManagedFolder", "getManagedFolder", "getManagedFolder", "getManagedFolderCallable"] + }, + "GetOrganizationIntelligenceConfig": { + "methods": ["getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfigCallable"] + }, + "GetProjectIntelligenceConfig": { + "methods": ["getProjectIntelligenceConfig", "getProjectIntelligenceConfig", "getProjectIntelligenceConfig", "getProjectIntelligenceConfigCallable"] + }, + "GetStorageLayout": { + "methods": ["getStorageLayout", "getStorageLayout", "getStorageLayout", "getStorageLayoutCallable"] + }, + "ListAnywhereCaches": { + "methods": ["listAnywhereCaches", "listAnywhereCaches", "listAnywhereCaches", "listAnywhereCachesPagedCallable", "listAnywhereCachesCallable"] + }, + "ListFolders": { + "methods": ["listFolders", "listFolders", "listFolders", "listFoldersPagedCallable", "listFoldersCallable"] + }, + "ListManagedFolders": { + "methods": ["listManagedFolders", "listManagedFolders", "listManagedFolders", "listManagedFoldersPagedCallable", "listManagedFoldersCallable"] + }, + "PauseAnywhereCache": { + "methods": ["pauseAnywhereCache", "pauseAnywhereCache", "pauseAnywhereCache", "pauseAnywhereCacheCallable"] + }, + "RenameFolder": { + "methods": ["renameFolderAsync", "renameFolderAsync", "renameFolderAsync", "renameFolderOperationCallable", "renameFolderCallable"] + }, + "ResumeAnywhereCache": { + "methods": ["resumeAnywhereCache", "resumeAnywhereCache", "resumeAnywhereCache", "resumeAnywhereCacheCallable"] + }, + "SetIamPolicy": { + "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] + }, + "TestIamPermissions": { + "methods": ["testIamPermissions", "testIamPermissions", "testIamPermissions", "testIamPermissionsCallable"] + }, + "UpdateAnywhereCache": { + "methods": ["updateAnywhereCacheAsync", "updateAnywhereCacheAsync", "updateAnywhereCacheOperationCallable", "updateAnywhereCacheCallable"] + }, + "UpdateFolderIntelligenceConfig": { + "methods": ["updateFolderIntelligenceConfig", "updateFolderIntelligenceConfig", "updateFolderIntelligenceConfigCallable"] + }, + "UpdateOrganizationIntelligenceConfig": { + "methods": ["updateOrganizationIntelligenceConfig", "updateOrganizationIntelligenceConfig", "updateOrganizationIntelligenceConfigCallable"] + }, + "UpdateProjectIntelligenceConfig": { + "methods": ["updateProjectIntelligenceConfig", "updateProjectIntelligenceConfig", "updateProjectIntelligenceConfigCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/package-info.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/package-info.java new file mode 100644 index 000000000000..f7108400d198 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/package-info.java @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Storage Control API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= StorageControlClient ======================= + * + *

Service Description: StorageControl service includes selected control plane operations. + * + *

Sample for StorageControlClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (StorageControlClient storageControlClient = StorageControlClient.create()) {
+ *   BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]");
+ *   Folder folder = Folder.newBuilder().build();
+ *   String folderId = "folderId294109737";
+ *   Folder response = storageControlClient.createFolder(parent, folder, folderId);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.storage.control.v2; + +import javax.annotation.Generated; diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlCallableFactory.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlCallableFactory.java new file mode 100644 index 000000000000..61f4fd17b8c7 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the StorageControl service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcStorageControlCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java new file mode 100644 index 000000000000..69488edf15c7 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java @@ -0,0 +1,1375 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import static com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListFoldersPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListManagedFoldersPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.Strings; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.CreateAnywhereCacheMetadata; +import com.google.storage.control.v2.CreateAnywhereCacheRequest; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.DeleteFolderRecursiveMetadata; +import com.google.storage.control.v2.DeleteFolderRecursiveRequest; +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.DisableAnywhereCacheRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.GetAnywhereCacheRequest; +import com.google.storage.control.v2.GetFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.GetFolderRequest; +import com.google.storage.control.v2.GetManagedFolderRequest; +import com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.GetProjectIntelligenceConfigRequest; +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.IntelligenceConfig; +import com.google.storage.control.v2.ListAnywhereCachesRequest; +import com.google.storage.control.v2.ListAnywhereCachesResponse; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.ListFoldersResponse; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.ListManagedFoldersResponse; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.PauseAnywhereCacheRequest; +import com.google.storage.control.v2.RenameFolderMetadata; +import com.google.storage.control.v2.RenameFolderRequest; +import com.google.storage.control.v2.ResumeAnywhereCacheRequest; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.UpdateAnywhereCacheMetadata; +import com.google.storage.control.v2.UpdateAnywhereCacheRequest; +import com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the StorageControl service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcStorageControlStub extends StorageControlStub { + private static final MethodDescriptor createFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/CreateFolder") + .setRequestMarshaller(ProtoUtils.marshaller(CreateFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Folder.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor deleteFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/DeleteFolder") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/GetFolder") + .setRequestMarshaller(ProtoUtils.marshaller(GetFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Folder.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listFoldersMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/ListFolders") + .setRequestMarshaller(ProtoUtils.marshaller(ListFoldersRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListFoldersResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + renameFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/RenameFolder") + .setRequestMarshaller(ProtoUtils.marshaller(RenameFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteFolderRecursiveMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/DeleteFolderRecursive") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteFolderRecursiveRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getStorageLayoutMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/GetStorageLayout") + .setRequestMarshaller( + ProtoUtils.marshaller(GetStorageLayoutRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(StorageLayout.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createManagedFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/CreateManagedFolder") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateManagedFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ManagedFolder.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteManagedFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/DeleteManagedFolder") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteManagedFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getManagedFolderMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/GetManagedFolder") + .setRequestMarshaller( + ProtoUtils.marshaller(GetManagedFolderRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ManagedFolder.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listManagedFoldersMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/ListManagedFolders") + .setRequestMarshaller( + ProtoUtils.marshaller(ListManagedFoldersRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListManagedFoldersResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/CreateAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/UpdateAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + disableAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/DisableAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(DisableAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + pauseAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/PauseAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(PauseAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + resumeAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/ResumeAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(ResumeAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getAnywhereCacheMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/GetAnywhereCache") + .setRequestMarshaller( + ProtoUtils.marshaller(GetAnywhereCacheRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listAnywhereCachesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/ListAnywhereCaches") + .setRequestMarshaller( + ProtoUtils.marshaller(ListAnywhereCachesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListAnywhereCachesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getProjectIntelligenceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetProjectIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(GetProjectIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateProjectIntelligenceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateProjectIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller( + UpdateProjectIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getFolderIntelligenceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetFolderIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(GetFolderIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateFolderIntelligenceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateFolderIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateFolderIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + GetOrganizationIntelligenceConfigRequest, IntelligenceConfig> + getOrganizationIntelligenceConfigMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetOrganizationIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller( + GetOrganizationIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + UpdateOrganizationIntelligenceConfigRequest, IntelligenceConfig> + updateOrganizationIntelligenceConfigMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateOrganizationIntelligenceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller( + UpdateOrganizationIntelligenceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/GetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor setIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/SetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + testIamPermissionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.storage.control.v2.StorageControl/TestIamPermissions") + .setRequestMarshaller( + ProtoUtils.marshaller(TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(TestIamPermissionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createFolderCallable; + private final UnaryCallable deleteFolderCallable; + private final UnaryCallable getFolderCallable; + private final UnaryCallable listFoldersCallable; + private final UnaryCallable + listFoldersPagedCallable; + private final UnaryCallable renameFolderCallable; + private final OperationCallable + renameFolderOperationCallable; + private final UnaryCallable + deleteFolderRecursiveCallable; + private final OperationCallable< + DeleteFolderRecursiveRequest, Empty, DeleteFolderRecursiveMetadata> + deleteFolderRecursiveOperationCallable; + private final UnaryCallable getStorageLayoutCallable; + private final UnaryCallable + createManagedFolderCallable; + private final UnaryCallable deleteManagedFolderCallable; + private final UnaryCallable getManagedFolderCallable; + private final UnaryCallable + listManagedFoldersCallable; + private final UnaryCallable + listManagedFoldersPagedCallable; + private final UnaryCallable createAnywhereCacheCallable; + private final OperationCallable< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationCallable; + private final UnaryCallable updateAnywhereCacheCallable; + private final OperationCallable< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationCallable; + private final UnaryCallable + disableAnywhereCacheCallable; + private final UnaryCallable pauseAnywhereCacheCallable; + private final UnaryCallable + resumeAnywhereCacheCallable; + private final UnaryCallable getAnywhereCacheCallable; + private final UnaryCallable + listAnywhereCachesCallable; + private final UnaryCallable + listAnywhereCachesPagedCallable; + private final UnaryCallable + getProjectIntelligenceConfigCallable; + private final UnaryCallable + updateProjectIntelligenceConfigCallable; + private final UnaryCallable + getFolderIntelligenceConfigCallable; + private final UnaryCallable + updateFolderIntelligenceConfigCallable; + private final UnaryCallable + getOrganizationIntelligenceConfigCallable; + private final UnaryCallable + updateOrganizationIntelligenceConfigCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + private static final PathTemplate CREATE_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate DELETE_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate GET_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate LIST_FOLDERS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate RENAME_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate DELETE_FOLDER_RECURSIVE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate GET_STORAGE_LAYOUT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate CREATE_MANAGED_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate DELETE_MANAGED_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate GET_MANAGED_FOLDER_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate LIST_MANAGED_FOLDERS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate CREATE_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate UPDATE_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate DISABLE_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate PAUSE_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate RESUME_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate GET_ANYWHERE_CACHE_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate LIST_ANYWHERE_CACHES_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate GET_IAM_POLICY_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate GET_IAM_POLICY_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate SET_IAM_POLICY_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate SET_IAM_POLICY_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/**"); + private static final PathTemplate TEST_IAM_PERMISSIONS_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + private static final PathTemplate TEST_IAM_PERMISSIONS_1_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/objects/**"); + private static final PathTemplate TEST_IAM_PERMISSIONS_2_PATH_TEMPLATE = + PathTemplate.create("{bucket=projects/*/buckets/*}/managedFolders/**"); + + public static final GrpcStorageControlStub create(StorageControlStubSettings settings) + throws IOException { + return new GrpcStorageControlStub(settings, ClientContext.create(settings)); + } + + public static final GrpcStorageControlStub create(ClientContext clientContext) + throws IOException { + return new GrpcStorageControlStub( + StorageControlStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcStorageControlStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcStorageControlStub( + StorageControlStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcStorageControlStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcStorageControlStub(StorageControlStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcStorageControlCallableFactory()); + } + + /** + * Constructs an instance of GrpcStorageControlStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcStorageControlStub( + StorageControlStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "bucket", CREATE_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + CreateFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings deleteFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", DELETE_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + DeleteFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings getFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", GET_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + GetFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings listFoldersTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listFoldersMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "bucket", LIST_FOLDERS_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings renameFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(renameFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", RENAME_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + RenameFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + deleteFolderRecursiveTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteFolderRecursiveMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getName(), "bucket", DELETE_FOLDER_RECURSIVE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + DeleteFolderRecursiveRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings getStorageLayoutTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getStorageLayoutMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", GET_STORAGE_LAYOUT_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + GetStorageLayoutRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + createManagedFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createManagedFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getParent(), "bucket", CREATE_MANAGED_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + CreateManagedFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings deleteManagedFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteManagedFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", DELETE_MANAGED_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + DeleteManagedFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings getManagedFolderTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getManagedFolderMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", GET_MANAGED_FOLDER_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + GetManagedFolderRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + listManagedFoldersTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listManagedFoldersMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getParent(), "bucket", LIST_MANAGED_FOLDERS_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + ListManagedFoldersRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings createAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getParent(), "bucket", CREATE_ANYWHERE_CACHE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + CreateAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings updateAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + if (request.getAnywhereCache() != null) { + builder.add( + request.getAnywhereCache().getName(), + "bucket", + UPDATE_ANYWHERE_CACHE_0_PATH_TEMPLATE); + } + return builder.build(); + }) + .setRequestMutator( + request -> { + UpdateAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + disableAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(disableAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getName(), "bucket", DISABLE_ANYWHERE_CACHE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + DisableAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings pauseAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(pauseAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", PAUSE_ANYWHERE_CACHE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + PauseAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + resumeAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(resumeAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getName(), "bucket", RESUME_ANYWHERE_CACHE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + ResumeAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings getAnywhereCacheTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getAnywhereCacheMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getName(), "bucket", GET_ANYWHERE_CACHE_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + GetAnywhereCacheRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + listAnywhereCachesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listAnywhereCachesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getParent(), "bucket", LIST_ANYWHERE_CACHES_0_PATH_TEMPLATE); + return builder.build(); + }) + .setRequestMutator( + request -> { + ListAnywhereCachesRequest.Builder requestBuilder = request.toBuilder(); + if (Strings.isNullOrEmpty(request.getRequestId())) { + requestBuilder.setRequestId(UUID.randomUUID().toString()); + } + return requestBuilder.build(); + }) + .build(); + GrpcCallSettings + getProjectIntelligenceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getProjectIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + updateProjectIntelligenceConfigTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(updateProjectIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + getFolderIntelligenceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getFolderIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + updateFolderIntelligenceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateFolderIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + getOrganizationIntelligenceConfigTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(getOrganizationIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + updateOrganizationIntelligenceConfigTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(updateOrganizationIntelligenceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings getIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getResource(), "bucket", GET_IAM_POLICY_0_PATH_TEMPLATE); + builder.add(request.getResource(), "bucket", GET_IAM_POLICY_1_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings setIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add(request.getResource(), "bucket", SET_IAM_POLICY_0_PATH_TEMPLATE); + builder.add(request.getResource(), "bucket", SET_IAM_POLICY_1_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + GrpcCallSettings + testIamPermissionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_0_PATH_TEMPLATE); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_1_PATH_TEMPLATE); + builder.add( + request.getResource(), "bucket", TEST_IAM_PERMISSIONS_2_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + + this.createFolderCallable = + callableFactory.createUnaryCallable( + createFolderTransportSettings, settings.createFolderSettings(), clientContext); + this.deleteFolderCallable = + callableFactory.createUnaryCallable( + deleteFolderTransportSettings, settings.deleteFolderSettings(), clientContext); + this.getFolderCallable = + callableFactory.createUnaryCallable( + getFolderTransportSettings, settings.getFolderSettings(), clientContext); + this.listFoldersCallable = + callableFactory.createUnaryCallable( + listFoldersTransportSettings, settings.listFoldersSettings(), clientContext); + this.listFoldersPagedCallable = + callableFactory.createPagedCallable( + listFoldersTransportSettings, settings.listFoldersSettings(), clientContext); + this.renameFolderCallable = + callableFactory.createUnaryCallable( + renameFolderTransportSettings, settings.renameFolderSettings(), clientContext); + this.renameFolderOperationCallable = + callableFactory.createOperationCallable( + renameFolderTransportSettings, + settings.renameFolderOperationSettings(), + clientContext, + operationsStub); + this.deleteFolderRecursiveCallable = + callableFactory.createUnaryCallable( + deleteFolderRecursiveTransportSettings, + settings.deleteFolderRecursiveSettings(), + clientContext); + this.deleteFolderRecursiveOperationCallable = + callableFactory.createOperationCallable( + deleteFolderRecursiveTransportSettings, + settings.deleteFolderRecursiveOperationSettings(), + clientContext, + operationsStub); + this.getStorageLayoutCallable = + callableFactory.createUnaryCallable( + getStorageLayoutTransportSettings, settings.getStorageLayoutSettings(), clientContext); + this.createManagedFolderCallable = + callableFactory.createUnaryCallable( + createManagedFolderTransportSettings, + settings.createManagedFolderSettings(), + clientContext); + this.deleteManagedFolderCallable = + callableFactory.createUnaryCallable( + deleteManagedFolderTransportSettings, + settings.deleteManagedFolderSettings(), + clientContext); + this.getManagedFolderCallable = + callableFactory.createUnaryCallable( + getManagedFolderTransportSettings, settings.getManagedFolderSettings(), clientContext); + this.listManagedFoldersCallable = + callableFactory.createUnaryCallable( + listManagedFoldersTransportSettings, + settings.listManagedFoldersSettings(), + clientContext); + this.listManagedFoldersPagedCallable = + callableFactory.createPagedCallable( + listManagedFoldersTransportSettings, + settings.listManagedFoldersSettings(), + clientContext); + this.createAnywhereCacheCallable = + callableFactory.createUnaryCallable( + createAnywhereCacheTransportSettings, + settings.createAnywhereCacheSettings(), + clientContext); + this.createAnywhereCacheOperationCallable = + callableFactory.createOperationCallable( + createAnywhereCacheTransportSettings, + settings.createAnywhereCacheOperationSettings(), + clientContext, + operationsStub); + this.updateAnywhereCacheCallable = + callableFactory.createUnaryCallable( + updateAnywhereCacheTransportSettings, + settings.updateAnywhereCacheSettings(), + clientContext); + this.updateAnywhereCacheOperationCallable = + callableFactory.createOperationCallable( + updateAnywhereCacheTransportSettings, + settings.updateAnywhereCacheOperationSettings(), + clientContext, + operationsStub); + this.disableAnywhereCacheCallable = + callableFactory.createUnaryCallable( + disableAnywhereCacheTransportSettings, + settings.disableAnywhereCacheSettings(), + clientContext); + this.pauseAnywhereCacheCallable = + callableFactory.createUnaryCallable( + pauseAnywhereCacheTransportSettings, + settings.pauseAnywhereCacheSettings(), + clientContext); + this.resumeAnywhereCacheCallable = + callableFactory.createUnaryCallable( + resumeAnywhereCacheTransportSettings, + settings.resumeAnywhereCacheSettings(), + clientContext); + this.getAnywhereCacheCallable = + callableFactory.createUnaryCallable( + getAnywhereCacheTransportSettings, settings.getAnywhereCacheSettings(), clientContext); + this.listAnywhereCachesCallable = + callableFactory.createUnaryCallable( + listAnywhereCachesTransportSettings, + settings.listAnywhereCachesSettings(), + clientContext); + this.listAnywhereCachesPagedCallable = + callableFactory.createPagedCallable( + listAnywhereCachesTransportSettings, + settings.listAnywhereCachesSettings(), + clientContext); + this.getProjectIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getProjectIntelligenceConfigTransportSettings, + settings.getProjectIntelligenceConfigSettings(), + clientContext); + this.updateProjectIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateProjectIntelligenceConfigTransportSettings, + settings.updateProjectIntelligenceConfigSettings(), + clientContext); + this.getFolderIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getFolderIntelligenceConfigTransportSettings, + settings.getFolderIntelligenceConfigSettings(), + clientContext); + this.updateFolderIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateFolderIntelligenceConfigTransportSettings, + settings.updateFolderIntelligenceConfigSettings(), + clientContext); + this.getOrganizationIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getOrganizationIntelligenceConfigTransportSettings, + settings.getOrganizationIntelligenceConfigSettings(), + clientContext); + this.updateOrganizationIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateOrganizationIntelligenceConfigTransportSettings, + settings.updateOrganizationIntelligenceConfigSettings(), + clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createFolderCallable() { + return createFolderCallable; + } + + @Override + public UnaryCallable deleteFolderCallable() { + return deleteFolderCallable; + } + + @Override + public UnaryCallable getFolderCallable() { + return getFolderCallable; + } + + @Override + public UnaryCallable listFoldersCallable() { + return listFoldersCallable; + } + + @Override + public UnaryCallable listFoldersPagedCallable() { + return listFoldersPagedCallable; + } + + @Override + public UnaryCallable renameFolderCallable() { + return renameFolderCallable; + } + + @Override + public OperationCallable + renameFolderOperationCallable() { + return renameFolderOperationCallable; + } + + @Override + public UnaryCallable deleteFolderRecursiveCallable() { + return deleteFolderRecursiveCallable; + } + + @Override + public OperationCallable + deleteFolderRecursiveOperationCallable() { + return deleteFolderRecursiveOperationCallable; + } + + @Override + public UnaryCallable getStorageLayoutCallable() { + return getStorageLayoutCallable; + } + + @Override + public UnaryCallable createManagedFolderCallable() { + return createManagedFolderCallable; + } + + @Override + public UnaryCallable deleteManagedFolderCallable() { + return deleteManagedFolderCallable; + } + + @Override + public UnaryCallable getManagedFolderCallable() { + return getManagedFolderCallable; + } + + @Override + public UnaryCallable + listManagedFoldersCallable() { + return listManagedFoldersCallable; + } + + @Override + public UnaryCallable + listManagedFoldersPagedCallable() { + return listManagedFoldersPagedCallable; + } + + @Override + public UnaryCallable createAnywhereCacheCallable() { + return createAnywhereCacheCallable; + } + + @Override + public OperationCallable + createAnywhereCacheOperationCallable() { + return createAnywhereCacheOperationCallable; + } + + @Override + public UnaryCallable updateAnywhereCacheCallable() { + return updateAnywhereCacheCallable; + } + + @Override + public OperationCallable + updateAnywhereCacheOperationCallable() { + return updateAnywhereCacheOperationCallable; + } + + @Override + public UnaryCallable disableAnywhereCacheCallable() { + return disableAnywhereCacheCallable; + } + + @Override + public UnaryCallable pauseAnywhereCacheCallable() { + return pauseAnywhereCacheCallable; + } + + @Override + public UnaryCallable resumeAnywhereCacheCallable() { + return resumeAnywhereCacheCallable; + } + + @Override + public UnaryCallable getAnywhereCacheCallable() { + return getAnywhereCacheCallable; + } + + @Override + public UnaryCallable + listAnywhereCachesCallable() { + return listAnywhereCachesCallable; + } + + @Override + public UnaryCallable + listAnywhereCachesPagedCallable() { + return listAnywhereCachesPagedCallable; + } + + @Override + public UnaryCallable + getProjectIntelligenceConfigCallable() { + return getProjectIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateProjectIntelligenceConfigCallable() { + return updateProjectIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + getFolderIntelligenceConfigCallable() { + return getFolderIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateFolderIntelligenceConfigCallable() { + return updateFolderIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + getOrganizationIntelligenceConfigCallable() { + return getOrganizationIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateOrganizationIntelligenceConfigCallable() { + return updateOrganizationIntelligenceConfigCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlCallableFactory.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlCallableFactory.java new file mode 100644 index 000000000000..2da1ee4034fd --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlCallableFactory.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the StorageControl service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class HttpJsonStorageControlCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlStub.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlStub.java new file mode 100644 index 000000000000..2ba8b1669df6 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/HttpJsonStorageControlStub.java @@ -0,0 +1,765 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.httpjson.longrunning.stub.HttpJsonOperationsStub; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.CreateAnywhereCacheMetadata; +import com.google.storage.control.v2.CreateAnywhereCacheRequest; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.DeleteFolderRecursiveMetadata; +import com.google.storage.control.v2.DeleteFolderRecursiveRequest; +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.DisableAnywhereCacheRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.GetAnywhereCacheRequest; +import com.google.storage.control.v2.GetFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.GetFolderRequest; +import com.google.storage.control.v2.GetManagedFolderRequest; +import com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.GetProjectIntelligenceConfigRequest; +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.IntelligenceConfig; +import com.google.storage.control.v2.ListAnywhereCachesRequest; +import com.google.storage.control.v2.ListAnywhereCachesResponse; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.ListFoldersResponse; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.ListManagedFoldersResponse; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.PauseAnywhereCacheRequest; +import com.google.storage.control.v2.RenameFolderMetadata; +import com.google.storage.control.v2.RenameFolderRequest; +import com.google.storage.control.v2.ResumeAnywhereCacheRequest; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.UpdateAnywhereCacheMetadata; +import com.google.storage.control.v2.UpdateAnywhereCacheRequest; +import com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the StorageControl service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class HttpJsonStorageControlStub extends StorageControlStub { + private static final TypeRegistry typeRegistry = + TypeRegistry.newBuilder() + .add(Empty.getDescriptor()) + .add(RenameFolderMetadata.getDescriptor()) + .add(Folder.getDescriptor()) + .add(AnywhereCache.getDescriptor()) + .add(UpdateAnywhereCacheMetadata.getDescriptor()) + .add(DeleteFolderRecursiveMetadata.getDescriptor()) + .add(CreateAnywhereCacheMetadata.getDescriptor()) + .build(); + + private static final ApiMethodDescriptor + getProjectIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetProjectIntelligenceConfig") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v2/{name=projects/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + UpdateProjectIntelligenceConfigRequest, IntelligenceConfig> + updateProjectIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateProjectIntelligenceConfig") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v2/{intelligenceConfig.name=projects/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "intelligenceConfig.name", + request.getIntelligenceConfig().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody( + "intelligenceConfig", request.getIntelligenceConfig(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getFolderIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetFolderIntelligenceConfig") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v2/{name=folders/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + UpdateFolderIntelligenceConfigRequest, IntelligenceConfig> + updateFolderIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateFolderIntelligenceConfig") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v2/{intelligenceConfig.name=folders/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "intelligenceConfig.name", + request.getIntelligenceConfig().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody( + "intelligenceConfig", request.getIntelligenceConfig(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + GetOrganizationIntelligenceConfigRequest, IntelligenceConfig> + getOrganizationIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/GetOrganizationIntelligenceConfig") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter + .newBuilder() + .setPath( + "/v2/{name=organizations/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer + serializer = ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer + serializer = ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + UpdateOrganizationIntelligenceConfigRequest, IntelligenceConfig> + updateOrganizationIntelligenceConfigMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.storage.control.v2.StorageControl/UpdateOrganizationIntelligenceConfig") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter + .newBuilder() + .setPath( + "/v2/{intelligenceConfig.name=organizations/*/locations/*/intelligenceConfig}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer + serializer = ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "intelligenceConfig.name", + request.getIntelligenceConfig().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer + serializer = ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody( + "intelligenceConfig", request.getIntelligenceConfig(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(IntelligenceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private final UnaryCallable + getProjectIntelligenceConfigCallable; + private final UnaryCallable + updateProjectIntelligenceConfigCallable; + private final UnaryCallable + getFolderIntelligenceConfigCallable; + private final UnaryCallable + updateFolderIntelligenceConfigCallable; + private final UnaryCallable + getOrganizationIntelligenceConfigCallable; + private final UnaryCallable + updateOrganizationIntelligenceConfigCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonOperationsStub httpJsonOperationsStub; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonStorageControlStub create(StorageControlStubSettings settings) + throws IOException { + return new HttpJsonStorageControlStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonStorageControlStub create(ClientContext clientContext) + throws IOException { + return new HttpJsonStorageControlStub( + StorageControlStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonStorageControlStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonStorageControlStub( + StorageControlStubSettings.newHttpJsonBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of HttpJsonStorageControlStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonStorageControlStub( + StorageControlStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new HttpJsonStorageControlCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonStorageControlStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonStorageControlStub( + StorageControlStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.httpJsonOperationsStub = + HttpJsonOperationsStub.create(clientContext, callableFactory, typeRegistry); + + HttpJsonCallSettings + getProjectIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(getProjectIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateProjectIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(updateProjectIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + getFolderIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(getFolderIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateFolderIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(updateFolderIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + getOrganizationIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(getOrganizationIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateOrganizationIntelligenceConfigTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(updateOrganizationIntelligenceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "intelligence_config.name", + String.valueOf(request.getIntelligenceConfig().getName())); + return builder.build(); + }) + .build(); + + this.getProjectIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getProjectIntelligenceConfigTransportSettings, + settings.getProjectIntelligenceConfigSettings(), + clientContext); + this.updateProjectIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateProjectIntelligenceConfigTransportSettings, + settings.updateProjectIntelligenceConfigSettings(), + clientContext); + this.getFolderIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getFolderIntelligenceConfigTransportSettings, + settings.getFolderIntelligenceConfigSettings(), + clientContext); + this.updateFolderIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateFolderIntelligenceConfigTransportSettings, + settings.updateFolderIntelligenceConfigSettings(), + clientContext); + this.getOrganizationIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + getOrganizationIntelligenceConfigTransportSettings, + settings.getOrganizationIntelligenceConfigSettings(), + clientContext); + this.updateOrganizationIntelligenceConfigCallable = + callableFactory.createUnaryCallable( + updateOrganizationIntelligenceConfigTransportSettings, + settings.updateOrganizationIntelligenceConfigSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(getProjectIntelligenceConfigMethodDescriptor); + methodDescriptors.add(updateProjectIntelligenceConfigMethodDescriptor); + methodDescriptors.add(getFolderIntelligenceConfigMethodDescriptor); + methodDescriptors.add(updateFolderIntelligenceConfigMethodDescriptor); + methodDescriptors.add(getOrganizationIntelligenceConfigMethodDescriptor); + methodDescriptors.add(updateOrganizationIntelligenceConfigMethodDescriptor); + return methodDescriptors; + } + + public HttpJsonOperationsStub getHttpJsonOperationsStub() { + return httpJsonOperationsStub; + } + + @Override + public UnaryCallable + getProjectIntelligenceConfigCallable() { + return getProjectIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateProjectIntelligenceConfigCallable() { + return updateProjectIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + getFolderIntelligenceConfigCallable() { + return getFolderIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateFolderIntelligenceConfigCallable() { + return updateFolderIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + getOrganizationIntelligenceConfigCallable() { + return getOrganizationIntelligenceConfigCallable; + } + + @Override + public UnaryCallable + updateOrganizationIntelligenceConfigCallable() { + return updateOrganizationIntelligenceConfigCallable; + } + + @Override + public UnaryCallable createFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: createFolderCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable deleteFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: deleteFolderCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable getFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: getFolderCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable listFoldersCallable() { + throw new UnsupportedOperationException( + "Not implemented: listFoldersCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable renameFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: renameFolderCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable deleteFolderRecursiveCallable() { + throw new UnsupportedOperationException( + "Not implemented: deleteFolderRecursiveCallable(). REST transport is not implemented for" + + " this method yet."); + } + + @Override + public UnaryCallable getStorageLayoutCallable() { + throw new UnsupportedOperationException( + "Not implemented: getStorageLayoutCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable createManagedFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: createManagedFolderCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable deleteManagedFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: deleteManagedFolderCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable getManagedFolderCallable() { + throw new UnsupportedOperationException( + "Not implemented: getManagedFolderCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable + listManagedFoldersCallable() { + throw new UnsupportedOperationException( + "Not implemented: listManagedFoldersCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable createAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: createAnywhereCacheCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable updateAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateAnywhereCacheCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable disableAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: disableAnywhereCacheCallable(). REST transport is not implemented for" + + " this method yet."); + } + + @Override + public UnaryCallable pauseAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: pauseAnywhereCacheCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable resumeAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: resumeAnywhereCacheCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable getAnywhereCacheCallable() { + throw new UnsupportedOperationException( + "Not implemented: getAnywhereCacheCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable + listAnywhereCachesCallable() { + throw new UnsupportedOperationException( + "Not implemented: listAnywhereCachesCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public UnaryCallable getIamPolicyCallable() { + throw new UnsupportedOperationException( + "Not implemented: getIamPolicyCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable setIamPolicyCallable() { + throw new UnsupportedOperationException( + "Not implemented: setIamPolicyCallable(). REST transport is not implemented for this method" + + " yet."); + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: testIamPermissionsCallable(). REST transport is not implemented for this" + + " method yet."); + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStub.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStub.java new file mode 100644 index 000000000000..7463b7921b5d --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStub.java @@ -0,0 +1,251 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import static com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListFoldersPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListManagedFoldersPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.CreateAnywhereCacheMetadata; +import com.google.storage.control.v2.CreateAnywhereCacheRequest; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.DeleteFolderRecursiveMetadata; +import com.google.storage.control.v2.DeleteFolderRecursiveRequest; +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.DisableAnywhereCacheRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.GetAnywhereCacheRequest; +import com.google.storage.control.v2.GetFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.GetFolderRequest; +import com.google.storage.control.v2.GetManagedFolderRequest; +import com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.GetProjectIntelligenceConfigRequest; +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.IntelligenceConfig; +import com.google.storage.control.v2.ListAnywhereCachesRequest; +import com.google.storage.control.v2.ListAnywhereCachesResponse; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.ListFoldersResponse; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.ListManagedFoldersResponse; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.PauseAnywhereCacheRequest; +import com.google.storage.control.v2.RenameFolderMetadata; +import com.google.storage.control.v2.RenameFolderRequest; +import com.google.storage.control.v2.ResumeAnywhereCacheRequest; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.UpdateAnywhereCacheMetadata; +import com.google.storage.control.v2.UpdateAnywhereCacheRequest; +import com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the StorageControl service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class StorageControlStub implements BackgroundResource { + + public OperationsStub getOperationsStub() { + return null; + } + + public com.google.api.gax.httpjson.longrunning.stub.OperationsStub getHttpJsonOperationsStub() { + return null; + } + + public UnaryCallable createFolderCallable() { + throw new UnsupportedOperationException("Not implemented: createFolderCallable()"); + } + + public UnaryCallable deleteFolderCallable() { + throw new UnsupportedOperationException("Not implemented: deleteFolderCallable()"); + } + + public UnaryCallable getFolderCallable() { + throw new UnsupportedOperationException("Not implemented: getFolderCallable()"); + } + + public UnaryCallable listFoldersPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listFoldersPagedCallable()"); + } + + public UnaryCallable listFoldersCallable() { + throw new UnsupportedOperationException("Not implemented: listFoldersCallable()"); + } + + public OperationCallable + renameFolderOperationCallable() { + throw new UnsupportedOperationException("Not implemented: renameFolderOperationCallable()"); + } + + public UnaryCallable renameFolderCallable() { + throw new UnsupportedOperationException("Not implemented: renameFolderCallable()"); + } + + public OperationCallable + deleteFolderRecursiveOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: deleteFolderRecursiveOperationCallable()"); + } + + public UnaryCallable deleteFolderRecursiveCallable() { + throw new UnsupportedOperationException("Not implemented: deleteFolderRecursiveCallable()"); + } + + public UnaryCallable getStorageLayoutCallable() { + throw new UnsupportedOperationException("Not implemented: getStorageLayoutCallable()"); + } + + public UnaryCallable createManagedFolderCallable() { + throw new UnsupportedOperationException("Not implemented: createManagedFolderCallable()"); + } + + public UnaryCallable deleteManagedFolderCallable() { + throw new UnsupportedOperationException("Not implemented: deleteManagedFolderCallable()"); + } + + public UnaryCallable getManagedFolderCallable() { + throw new UnsupportedOperationException("Not implemented: getManagedFolderCallable()"); + } + + public UnaryCallable + listManagedFoldersPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listManagedFoldersPagedCallable()"); + } + + public UnaryCallable + listManagedFoldersCallable() { + throw new UnsupportedOperationException("Not implemented: listManagedFoldersCallable()"); + } + + public OperationCallable + createAnywhereCacheOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: createAnywhereCacheOperationCallable()"); + } + + public UnaryCallable createAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: createAnywhereCacheCallable()"); + } + + public OperationCallable + updateAnywhereCacheOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateAnywhereCacheOperationCallable()"); + } + + public UnaryCallable updateAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: updateAnywhereCacheCallable()"); + } + + public UnaryCallable disableAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: disableAnywhereCacheCallable()"); + } + + public UnaryCallable pauseAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: pauseAnywhereCacheCallable()"); + } + + public UnaryCallable resumeAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: resumeAnywhereCacheCallable()"); + } + + public UnaryCallable getAnywhereCacheCallable() { + throw new UnsupportedOperationException("Not implemented: getAnywhereCacheCallable()"); + } + + public UnaryCallable + listAnywhereCachesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listAnywhereCachesPagedCallable()"); + } + + public UnaryCallable + listAnywhereCachesCallable() { + throw new UnsupportedOperationException("Not implemented: listAnywhereCachesCallable()"); + } + + public UnaryCallable + getProjectIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: getProjectIntelligenceConfigCallable()"); + } + + public UnaryCallable + updateProjectIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateProjectIntelligenceConfigCallable()"); + } + + public UnaryCallable + getFolderIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: getFolderIntelligenceConfigCallable()"); + } + + public UnaryCallable + updateFolderIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateFolderIntelligenceConfigCallable()"); + } + + public UnaryCallable + getOrganizationIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: getOrganizationIntelligenceConfigCallable()"); + } + + public UnaryCallable + updateOrganizationIntelligenceConfigCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateOrganizationIntelligenceConfigCallable()"); + } + + public UnaryCallable getIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: getIamPolicyCallable()"); + } + + public UnaryCallable setIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: setIamPolicyCallable()"); + } + + public UnaryCallable + testIamPermissionsCallable() { + throw new UnsupportedOperationException("Not implemented: testIamPermissionsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStubSettings.java b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStubSettings.java new file mode 100644 index 000000000000..d196138eff86 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/StorageControlStubSettings.java @@ -0,0 +1,1486 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2.stub; + +import static com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListFoldersPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListManagedFoldersPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.CreateAnywhereCacheMetadata; +import com.google.storage.control.v2.CreateAnywhereCacheRequest; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.DeleteFolderRecursiveMetadata; +import com.google.storage.control.v2.DeleteFolderRecursiveRequest; +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.DisableAnywhereCacheRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.GetAnywhereCacheRequest; +import com.google.storage.control.v2.GetFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.GetFolderRequest; +import com.google.storage.control.v2.GetManagedFolderRequest; +import com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.GetProjectIntelligenceConfigRequest; +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.IntelligenceConfig; +import com.google.storage.control.v2.ListAnywhereCachesRequest; +import com.google.storage.control.v2.ListAnywhereCachesResponse; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.ListFoldersResponse; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.ListManagedFoldersResponse; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.PauseAnywhereCacheRequest; +import com.google.storage.control.v2.RenameFolderMetadata; +import com.google.storage.control.v2.RenameFolderRequest; +import com.google.storage.control.v2.ResumeAnywhereCacheRequest; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.UpdateAnywhereCacheMetadata; +import com.google.storage.control.v2.UpdateAnywhereCacheRequest; +import com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest; +import com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link StorageControlStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (storage.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createFolder: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlStubSettings.Builder storageControlSettingsBuilder =
+ *     StorageControlStubSettings.newBuilder();
+ * storageControlSettingsBuilder
+ *     .createFolderSettings()
+ *     .setRetrySettings(
+ *         storageControlSettingsBuilder
+ *             .createFolderSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * StorageControlStubSettings storageControlSettings = storageControlSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for renameFolder: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * StorageControlStubSettings.Builder storageControlSettingsBuilder =
+ *     StorageControlStubSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * storageControlSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class StorageControlStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/cloud-platform") + .add("https://www.googleapis.com/auth/cloud-platform.read-only") + .add("https://www.googleapis.com/auth/devstorage.full_control") + .add("https://www.googleapis.com/auth/devstorage.read_only") + .add("https://www.googleapis.com/auth/devstorage.read_write") + .build(); + + private final UnaryCallSettings createFolderSettings; + private final UnaryCallSettings deleteFolderSettings; + private final UnaryCallSettings getFolderSettings; + private final PagedCallSettings + listFoldersSettings; + private final UnaryCallSettings renameFolderSettings; + private final OperationCallSettings + renameFolderOperationSettings; + private final UnaryCallSettings + deleteFolderRecursiveSettings; + private final OperationCallSettings< + DeleteFolderRecursiveRequest, Empty, DeleteFolderRecursiveMetadata> + deleteFolderRecursiveOperationSettings; + private final UnaryCallSettings getStorageLayoutSettings; + private final UnaryCallSettings + createManagedFolderSettings; + private final UnaryCallSettings deleteManagedFolderSettings; + private final UnaryCallSettings getManagedFolderSettings; + private final PagedCallSettings< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings; + private final UnaryCallSettings + createAnywhereCacheSettings; + private final OperationCallSettings< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings; + private final UnaryCallSettings + updateAnywhereCacheSettings; + private final OperationCallSettings< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings; + private final UnaryCallSettings + disableAnywhereCacheSettings; + private final UnaryCallSettings + pauseAnywhereCacheSettings; + private final UnaryCallSettings + resumeAnywhereCacheSettings; + private final UnaryCallSettings getAnywhereCacheSettings; + private final PagedCallSettings< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings; + private final UnaryCallSettings + getProjectIntelligenceConfigSettings; + private final UnaryCallSettings + updateProjectIntelligenceConfigSettings; + private final UnaryCallSettings + getFolderIntelligenceConfigSettings; + private final UnaryCallSettings + updateFolderIntelligenceConfigSettings; + private final UnaryCallSettings + getOrganizationIntelligenceConfigSettings; + private final UnaryCallSettings + updateOrganizationIntelligenceConfigSettings; + private final UnaryCallSettings getIamPolicySettings; + private final UnaryCallSettings setIamPolicySettings; + private final UnaryCallSettings + testIamPermissionsSettings; + + private static final PagedListDescriptor + LIST_FOLDERS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListFoldersRequest injectToken(ListFoldersRequest payload, String token) { + return ListFoldersRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListFoldersRequest injectPageSize(ListFoldersRequest payload, int pageSize) { + return ListFoldersRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListFoldersRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListFoldersResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListFoldersResponse payload) { + return payload.getFoldersList(); + } + }; + + private static final PagedListDescriptor< + ListManagedFoldersRequest, ListManagedFoldersResponse, ManagedFolder> + LIST_MANAGED_FOLDERS_PAGE_STR_DESC = + new PagedListDescriptor< + ListManagedFoldersRequest, ListManagedFoldersResponse, ManagedFolder>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListManagedFoldersRequest injectToken( + ListManagedFoldersRequest payload, String token) { + return ListManagedFoldersRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListManagedFoldersRequest injectPageSize( + ListManagedFoldersRequest payload, int pageSize) { + return ListManagedFoldersRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListManagedFoldersRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListManagedFoldersResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListManagedFoldersResponse payload) { + return payload.getManagedFoldersList(); + } + }; + + private static final PagedListDescriptor< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, AnywhereCache> + LIST_ANYWHERE_CACHES_PAGE_STR_DESC = + new PagedListDescriptor< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, AnywhereCache>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListAnywhereCachesRequest injectToken( + ListAnywhereCachesRequest payload, String token) { + return ListAnywhereCachesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListAnywhereCachesRequest injectPageSize( + ListAnywhereCachesRequest payload, int pageSize) { + return ListAnywhereCachesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListAnywhereCachesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListAnywhereCachesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListAnywhereCachesResponse payload) { + return payload.getAnywhereCachesList(); + } + }; + + private static final PagedListResponseFactory< + ListFoldersRequest, ListFoldersResponse, ListFoldersPagedResponse> + LIST_FOLDERS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListFoldersRequest, ListFoldersResponse, ListFoldersPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListFoldersRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_FOLDERS_PAGE_STR_DESC, request, context); + return ListFoldersPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + LIST_MANAGED_FOLDERS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListManagedFoldersRequest, + ListManagedFoldersResponse, + ListManagedFoldersPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListManagedFoldersRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_MANAGED_FOLDERS_PAGE_STR_DESC, request, context); + return ListManagedFoldersPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + LIST_ANYWHERE_CACHES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListAnywhereCachesRequest, + ListAnywhereCachesResponse, + ListAnywhereCachesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListAnywhereCachesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_ANYWHERE_CACHES_PAGE_STR_DESC, request, context); + return ListAnywhereCachesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to createFolder. */ + public UnaryCallSettings createFolderSettings() { + return createFolderSettings; + } + + /** Returns the object with the settings used for calls to deleteFolder. */ + public UnaryCallSettings deleteFolderSettings() { + return deleteFolderSettings; + } + + /** Returns the object with the settings used for calls to getFolder. */ + public UnaryCallSettings getFolderSettings() { + return getFolderSettings; + } + + /** Returns the object with the settings used for calls to listFolders. */ + public PagedCallSettings + listFoldersSettings() { + return listFoldersSettings; + } + + /** Returns the object with the settings used for calls to renameFolder. */ + public UnaryCallSettings renameFolderSettings() { + return renameFolderSettings; + } + + /** Returns the object with the settings used for calls to renameFolder. */ + public OperationCallSettings + renameFolderOperationSettings() { + return renameFolderOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteFolderRecursive. */ + public UnaryCallSettings + deleteFolderRecursiveSettings() { + return deleteFolderRecursiveSettings; + } + + /** Returns the object with the settings used for calls to deleteFolderRecursive. */ + public OperationCallSettings + deleteFolderRecursiveOperationSettings() { + return deleteFolderRecursiveOperationSettings; + } + + /** Returns the object with the settings used for calls to getStorageLayout. */ + public UnaryCallSettings getStorageLayoutSettings() { + return getStorageLayoutSettings; + } + + /** Returns the object with the settings used for calls to createManagedFolder. */ + public UnaryCallSettings + createManagedFolderSettings() { + return createManagedFolderSettings; + } + + /** Returns the object with the settings used for calls to deleteManagedFolder. */ + public UnaryCallSettings deleteManagedFolderSettings() { + return deleteManagedFolderSettings; + } + + /** Returns the object with the settings used for calls to getManagedFolder. */ + public UnaryCallSettings getManagedFolderSettings() { + return getManagedFolderSettings; + } + + /** Returns the object with the settings used for calls to listManagedFolders. */ + public PagedCallSettings< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings() { + return listManagedFoldersSettings; + } + + /** Returns the object with the settings used for calls to createAnywhereCache. */ + public UnaryCallSettings createAnywhereCacheSettings() { + return createAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to createAnywhereCache. */ + public OperationCallSettings< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings() { + return createAnywhereCacheOperationSettings; + } + + /** Returns the object with the settings used for calls to updateAnywhereCache. */ + public UnaryCallSettings updateAnywhereCacheSettings() { + return updateAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to updateAnywhereCache. */ + public OperationCallSettings< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings() { + return updateAnywhereCacheOperationSettings; + } + + /** Returns the object with the settings used for calls to disableAnywhereCache. */ + public UnaryCallSettings + disableAnywhereCacheSettings() { + return disableAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to pauseAnywhereCache. */ + public UnaryCallSettings pauseAnywhereCacheSettings() { + return pauseAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to resumeAnywhereCache. */ + public UnaryCallSettings + resumeAnywhereCacheSettings() { + return resumeAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to getAnywhereCache. */ + public UnaryCallSettings getAnywhereCacheSettings() { + return getAnywhereCacheSettings; + } + + /** Returns the object with the settings used for calls to listAnywhereCaches. */ + public PagedCallSettings< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings() { + return listAnywhereCachesSettings; + } + + /** Returns the object with the settings used for calls to getProjectIntelligenceConfig. */ + public UnaryCallSettings + getProjectIntelligenceConfigSettings() { + return getProjectIntelligenceConfigSettings; + } + + /** Returns the object with the settings used for calls to updateProjectIntelligenceConfig. */ + public UnaryCallSettings + updateProjectIntelligenceConfigSettings() { + return updateProjectIntelligenceConfigSettings; + } + + /** Returns the object with the settings used for calls to getFolderIntelligenceConfig. */ + public UnaryCallSettings + getFolderIntelligenceConfigSettings() { + return getFolderIntelligenceConfigSettings; + } + + /** Returns the object with the settings used for calls to updateFolderIntelligenceConfig. */ + public UnaryCallSettings + updateFolderIntelligenceConfigSettings() { + return updateFolderIntelligenceConfigSettings; + } + + /** Returns the object with the settings used for calls to getOrganizationIntelligenceConfig. */ + public UnaryCallSettings + getOrganizationIntelligenceConfigSettings() { + return getOrganizationIntelligenceConfigSettings; + } + + /** + * Returns the object with the settings used for calls to updateOrganizationIntelligenceConfig. + */ + public UnaryCallSettings + updateOrganizationIntelligenceConfigSettings() { + return updateOrganizationIntelligenceConfigSettings; + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + public StorageControlStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcStorageControlStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonStorageControlStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "storage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "storage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "storage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(StorageControlStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(StorageControlStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return StorageControlStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected StorageControlStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createFolderSettings = settingsBuilder.createFolderSettings().build(); + deleteFolderSettings = settingsBuilder.deleteFolderSettings().build(); + getFolderSettings = settingsBuilder.getFolderSettings().build(); + listFoldersSettings = settingsBuilder.listFoldersSettings().build(); + renameFolderSettings = settingsBuilder.renameFolderSettings().build(); + renameFolderOperationSettings = settingsBuilder.renameFolderOperationSettings().build(); + deleteFolderRecursiveSettings = settingsBuilder.deleteFolderRecursiveSettings().build(); + deleteFolderRecursiveOperationSettings = + settingsBuilder.deleteFolderRecursiveOperationSettings().build(); + getStorageLayoutSettings = settingsBuilder.getStorageLayoutSettings().build(); + createManagedFolderSettings = settingsBuilder.createManagedFolderSettings().build(); + deleteManagedFolderSettings = settingsBuilder.deleteManagedFolderSettings().build(); + getManagedFolderSettings = settingsBuilder.getManagedFolderSettings().build(); + listManagedFoldersSettings = settingsBuilder.listManagedFoldersSettings().build(); + createAnywhereCacheSettings = settingsBuilder.createAnywhereCacheSettings().build(); + createAnywhereCacheOperationSettings = + settingsBuilder.createAnywhereCacheOperationSettings().build(); + updateAnywhereCacheSettings = settingsBuilder.updateAnywhereCacheSettings().build(); + updateAnywhereCacheOperationSettings = + settingsBuilder.updateAnywhereCacheOperationSettings().build(); + disableAnywhereCacheSettings = settingsBuilder.disableAnywhereCacheSettings().build(); + pauseAnywhereCacheSettings = settingsBuilder.pauseAnywhereCacheSettings().build(); + resumeAnywhereCacheSettings = settingsBuilder.resumeAnywhereCacheSettings().build(); + getAnywhereCacheSettings = settingsBuilder.getAnywhereCacheSettings().build(); + listAnywhereCachesSettings = settingsBuilder.listAnywhereCachesSettings().build(); + getProjectIntelligenceConfigSettings = + settingsBuilder.getProjectIntelligenceConfigSettings().build(); + updateProjectIntelligenceConfigSettings = + settingsBuilder.updateProjectIntelligenceConfigSettings().build(); + getFolderIntelligenceConfigSettings = + settingsBuilder.getFolderIntelligenceConfigSettings().build(); + updateFolderIntelligenceConfigSettings = + settingsBuilder.updateFolderIntelligenceConfigSettings().build(); + getOrganizationIntelligenceConfigSettings = + settingsBuilder.getOrganizationIntelligenceConfigSettings().build(); + updateOrganizationIntelligenceConfigSettings = + settingsBuilder.updateOrganizationIntelligenceConfigSettings().build(); + getIamPolicySettings = settingsBuilder.getIamPolicySettings().build(); + setIamPolicySettings = settingsBuilder.setIamPolicySettings().build(); + testIamPermissionsSettings = settingsBuilder.testIamPermissionsSettings().build(); + } + + /** Builder for StorageControlStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder createFolderSettings; + private final UnaryCallSettings.Builder deleteFolderSettings; + private final UnaryCallSettings.Builder getFolderSettings; + private final PagedCallSettings.Builder< + ListFoldersRequest, ListFoldersResponse, ListFoldersPagedResponse> + listFoldersSettings; + private final UnaryCallSettings.Builder renameFolderSettings; + private final OperationCallSettings.Builder + renameFolderOperationSettings; + private final UnaryCallSettings.Builder + deleteFolderRecursiveSettings; + private final OperationCallSettings.Builder< + DeleteFolderRecursiveRequest, Empty, DeleteFolderRecursiveMetadata> + deleteFolderRecursiveOperationSettings; + private final UnaryCallSettings.Builder + getStorageLayoutSettings; + private final UnaryCallSettings.Builder + createManagedFolderSettings; + private final UnaryCallSettings.Builder + deleteManagedFolderSettings; + private final UnaryCallSettings.Builder + getManagedFolderSettings; + private final PagedCallSettings.Builder< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings; + private final UnaryCallSettings.Builder + createAnywhereCacheSettings; + private final OperationCallSettings.Builder< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings; + private final UnaryCallSettings.Builder + updateAnywhereCacheSettings; + private final OperationCallSettings.Builder< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings; + private final UnaryCallSettings.Builder + disableAnywhereCacheSettings; + private final UnaryCallSettings.Builder + pauseAnywhereCacheSettings; + private final UnaryCallSettings.Builder + resumeAnywhereCacheSettings; + private final UnaryCallSettings.Builder + getAnywhereCacheSettings; + private final PagedCallSettings.Builder< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings; + private final UnaryCallSettings.Builder + getProjectIntelligenceConfigSettings; + private final UnaryCallSettings.Builder< + UpdateProjectIntelligenceConfigRequest, IntelligenceConfig> + updateProjectIntelligenceConfigSettings; + private final UnaryCallSettings.Builder + getFolderIntelligenceConfigSettings; + private final UnaryCallSettings.Builder< + UpdateFolderIntelligenceConfigRequest, IntelligenceConfig> + updateFolderIntelligenceConfigSettings; + private final UnaryCallSettings.Builder< + GetOrganizationIntelligenceConfigRequest, IntelligenceConfig> + getOrganizationIntelligenceConfigSettings; + private final UnaryCallSettings.Builder< + UpdateOrganizationIntelligenceConfigRequest, IntelligenceConfig> + updateOrganizationIntelligenceConfigSettings; + private final UnaryCallSettings.Builder getIamPolicySettings; + private final UnaryCallSettings.Builder setIamPolicySettings; + private final UnaryCallSettings.Builder + testIamPermissionsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.RESOURCE_EXHAUSTED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.INTERNAL, + StatusCode.Code.UNKNOWN))); + definitions.put( + "no_retry_1_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("no_retry_1_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listFoldersSettings = PagedCallSettings.newBuilder(LIST_FOLDERS_PAGE_STR_FACT); + renameFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + renameFolderOperationSettings = OperationCallSettings.newBuilder(); + deleteFolderRecursiveSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteFolderRecursiveOperationSettings = OperationCallSettings.newBuilder(); + getStorageLayoutSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createManagedFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteManagedFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getManagedFolderSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listManagedFoldersSettings = PagedCallSettings.newBuilder(LIST_MANAGED_FOLDERS_PAGE_STR_FACT); + createAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createAnywhereCacheOperationSettings = OperationCallSettings.newBuilder(); + updateAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateAnywhereCacheOperationSettings = OperationCallSettings.newBuilder(); + disableAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + pauseAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + resumeAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getAnywhereCacheSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listAnywhereCachesSettings = PagedCallSettings.newBuilder(LIST_ANYWHERE_CACHES_PAGE_STR_FACT); + getProjectIntelligenceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateProjectIntelligenceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getFolderIntelligenceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateFolderIntelligenceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getOrganizationIntelligenceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateOrganizationIntelligenceConfigSettings = + UnaryCallSettings.newUnaryCallSettingsBuilder(); + getIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + setIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + testIamPermissionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createFolderSettings, + deleteFolderSettings, + getFolderSettings, + listFoldersSettings, + renameFolderSettings, + deleteFolderRecursiveSettings, + getStorageLayoutSettings, + createManagedFolderSettings, + deleteManagedFolderSettings, + getManagedFolderSettings, + listManagedFoldersSettings, + createAnywhereCacheSettings, + updateAnywhereCacheSettings, + disableAnywhereCacheSettings, + pauseAnywhereCacheSettings, + resumeAnywhereCacheSettings, + getAnywhereCacheSettings, + listAnywhereCachesSettings, + getProjectIntelligenceConfigSettings, + updateProjectIntelligenceConfigSettings, + getFolderIntelligenceConfigSettings, + updateFolderIntelligenceConfigSettings, + getOrganizationIntelligenceConfigSettings, + updateOrganizationIntelligenceConfigSettings, + getIamPolicySettings, + setIamPolicySettings, + testIamPermissionsSettings); + initDefaults(this); + } + + protected Builder(StorageControlStubSettings settings) { + super(settings); + + createFolderSettings = settings.createFolderSettings.toBuilder(); + deleteFolderSettings = settings.deleteFolderSettings.toBuilder(); + getFolderSettings = settings.getFolderSettings.toBuilder(); + listFoldersSettings = settings.listFoldersSettings.toBuilder(); + renameFolderSettings = settings.renameFolderSettings.toBuilder(); + renameFolderOperationSettings = settings.renameFolderOperationSettings.toBuilder(); + deleteFolderRecursiveSettings = settings.deleteFolderRecursiveSettings.toBuilder(); + deleteFolderRecursiveOperationSettings = + settings.deleteFolderRecursiveOperationSettings.toBuilder(); + getStorageLayoutSettings = settings.getStorageLayoutSettings.toBuilder(); + createManagedFolderSettings = settings.createManagedFolderSettings.toBuilder(); + deleteManagedFolderSettings = settings.deleteManagedFolderSettings.toBuilder(); + getManagedFolderSettings = settings.getManagedFolderSettings.toBuilder(); + listManagedFoldersSettings = settings.listManagedFoldersSettings.toBuilder(); + createAnywhereCacheSettings = settings.createAnywhereCacheSettings.toBuilder(); + createAnywhereCacheOperationSettings = + settings.createAnywhereCacheOperationSettings.toBuilder(); + updateAnywhereCacheSettings = settings.updateAnywhereCacheSettings.toBuilder(); + updateAnywhereCacheOperationSettings = + settings.updateAnywhereCacheOperationSettings.toBuilder(); + disableAnywhereCacheSettings = settings.disableAnywhereCacheSettings.toBuilder(); + pauseAnywhereCacheSettings = settings.pauseAnywhereCacheSettings.toBuilder(); + resumeAnywhereCacheSettings = settings.resumeAnywhereCacheSettings.toBuilder(); + getAnywhereCacheSettings = settings.getAnywhereCacheSettings.toBuilder(); + listAnywhereCachesSettings = settings.listAnywhereCachesSettings.toBuilder(); + getProjectIntelligenceConfigSettings = + settings.getProjectIntelligenceConfigSettings.toBuilder(); + updateProjectIntelligenceConfigSettings = + settings.updateProjectIntelligenceConfigSettings.toBuilder(); + getFolderIntelligenceConfigSettings = + settings.getFolderIntelligenceConfigSettings.toBuilder(); + updateFolderIntelligenceConfigSettings = + settings.updateFolderIntelligenceConfigSettings.toBuilder(); + getOrganizationIntelligenceConfigSettings = + settings.getOrganizationIntelligenceConfigSettings.toBuilder(); + updateOrganizationIntelligenceConfigSettings = + settings.updateOrganizationIntelligenceConfigSettings.toBuilder(); + getIamPolicySettings = settings.getIamPolicySettings.toBuilder(); + setIamPolicySettings = settings.setIamPolicySettings.toBuilder(); + testIamPermissionsSettings = settings.testIamPermissionsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createFolderSettings, + deleteFolderSettings, + getFolderSettings, + listFoldersSettings, + renameFolderSettings, + deleteFolderRecursiveSettings, + getStorageLayoutSettings, + createManagedFolderSettings, + deleteManagedFolderSettings, + getManagedFolderSettings, + listManagedFoldersSettings, + createAnywhereCacheSettings, + updateAnywhereCacheSettings, + disableAnywhereCacheSettings, + pauseAnywhereCacheSettings, + resumeAnywhereCacheSettings, + getAnywhereCacheSettings, + listAnywhereCachesSettings, + getProjectIntelligenceConfigSettings, + updateProjectIntelligenceConfigSettings, + getFolderIntelligenceConfigSettings, + updateFolderIntelligenceConfigSettings, + getOrganizationIntelligenceConfigSettings, + updateOrganizationIntelligenceConfigSettings, + getIamPolicySettings, + setIamPolicySettings, + testIamPermissionsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .getFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listFoldersSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .renameFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteFolderRecursiveSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getStorageLayoutSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createManagedFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .deleteManagedFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .getManagedFolderSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listManagedFoldersSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .disableAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .pauseAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .resumeAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getAnywhereCacheSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listAnywhereCachesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getProjectIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateProjectIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getFolderIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateFolderIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getOrganizationIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateOrganizationIntelligenceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .setIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .testIamPermissionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .renameFolderOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Folder.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(RenameFolderMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .deleteFolderRecursiveOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + DeleteFolderRecursiveMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .createAnywhereCacheOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(AnywhereCache.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + CreateAnywhereCacheMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .updateAnywhereCacheOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(AnywhereCache.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + UpdateAnywhereCacheMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createFolder. */ + public UnaryCallSettings.Builder createFolderSettings() { + return createFolderSettings; + } + + /** Returns the builder for the settings used for calls to deleteFolder. */ + public UnaryCallSettings.Builder deleteFolderSettings() { + return deleteFolderSettings; + } + + /** Returns the builder for the settings used for calls to getFolder. */ + public UnaryCallSettings.Builder getFolderSettings() { + return getFolderSettings; + } + + /** Returns the builder for the settings used for calls to listFolders. */ + public PagedCallSettings.Builder< + ListFoldersRequest, ListFoldersResponse, ListFoldersPagedResponse> + listFoldersSettings() { + return listFoldersSettings; + } + + /** Returns the builder for the settings used for calls to renameFolder. */ + public UnaryCallSettings.Builder renameFolderSettings() { + return renameFolderSettings; + } + + /** Returns the builder for the settings used for calls to renameFolder. */ + public OperationCallSettings.Builder + renameFolderOperationSettings() { + return renameFolderOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteFolderRecursive. */ + public UnaryCallSettings.Builder + deleteFolderRecursiveSettings() { + return deleteFolderRecursiveSettings; + } + + /** Returns the builder for the settings used for calls to deleteFolderRecursive. */ + public OperationCallSettings.Builder< + DeleteFolderRecursiveRequest, Empty, DeleteFolderRecursiveMetadata> + deleteFolderRecursiveOperationSettings() { + return deleteFolderRecursiveOperationSettings; + } + + /** Returns the builder for the settings used for calls to getStorageLayout. */ + public UnaryCallSettings.Builder + getStorageLayoutSettings() { + return getStorageLayoutSettings; + } + + /** Returns the builder for the settings used for calls to createManagedFolder. */ + public UnaryCallSettings.Builder + createManagedFolderSettings() { + return createManagedFolderSettings; + } + + /** Returns the builder for the settings used for calls to deleteManagedFolder. */ + public UnaryCallSettings.Builder + deleteManagedFolderSettings() { + return deleteManagedFolderSettings; + } + + /** Returns the builder for the settings used for calls to getManagedFolder. */ + public UnaryCallSettings.Builder + getManagedFolderSettings() { + return getManagedFolderSettings; + } + + /** Returns the builder for the settings used for calls to listManagedFolders. */ + public PagedCallSettings.Builder< + ListManagedFoldersRequest, ListManagedFoldersResponse, ListManagedFoldersPagedResponse> + listManagedFoldersSettings() { + return listManagedFoldersSettings; + } + + /** Returns the builder for the settings used for calls to createAnywhereCache. */ + public UnaryCallSettings.Builder + createAnywhereCacheSettings() { + return createAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to createAnywhereCache. */ + public OperationCallSettings.Builder< + CreateAnywhereCacheRequest, AnywhereCache, CreateAnywhereCacheMetadata> + createAnywhereCacheOperationSettings() { + return createAnywhereCacheOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateAnywhereCache. */ + public UnaryCallSettings.Builder + updateAnywhereCacheSettings() { + return updateAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to updateAnywhereCache. */ + public OperationCallSettings.Builder< + UpdateAnywhereCacheRequest, AnywhereCache, UpdateAnywhereCacheMetadata> + updateAnywhereCacheOperationSettings() { + return updateAnywhereCacheOperationSettings; + } + + /** Returns the builder for the settings used for calls to disableAnywhereCache. */ + public UnaryCallSettings.Builder + disableAnywhereCacheSettings() { + return disableAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to pauseAnywhereCache. */ + public UnaryCallSettings.Builder + pauseAnywhereCacheSettings() { + return pauseAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to resumeAnywhereCache. */ + public UnaryCallSettings.Builder + resumeAnywhereCacheSettings() { + return resumeAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to getAnywhereCache. */ + public UnaryCallSettings.Builder + getAnywhereCacheSettings() { + return getAnywhereCacheSettings; + } + + /** Returns the builder for the settings used for calls to listAnywhereCaches. */ + public PagedCallSettings.Builder< + ListAnywhereCachesRequest, ListAnywhereCachesResponse, ListAnywhereCachesPagedResponse> + listAnywhereCachesSettings() { + return listAnywhereCachesSettings; + } + + /** Returns the builder for the settings used for calls to getProjectIntelligenceConfig. */ + public UnaryCallSettings.Builder + getProjectIntelligenceConfigSettings() { + return getProjectIntelligenceConfigSettings; + } + + /** Returns the builder for the settings used for calls to updateProjectIntelligenceConfig. */ + public UnaryCallSettings.Builder + updateProjectIntelligenceConfigSettings() { + return updateProjectIntelligenceConfigSettings; + } + + /** Returns the builder for the settings used for calls to getFolderIntelligenceConfig. */ + public UnaryCallSettings.Builder + getFolderIntelligenceConfigSettings() { + return getFolderIntelligenceConfigSettings; + } + + /** Returns the builder for the settings used for calls to updateFolderIntelligenceConfig. */ + public UnaryCallSettings.Builder + updateFolderIntelligenceConfigSettings() { + return updateFolderIntelligenceConfigSettings; + } + + /** Returns the builder for the settings used for calls to getOrganizationIntelligenceConfig. */ + public UnaryCallSettings.Builder + getOrganizationIntelligenceConfigSettings() { + return getOrganizationIntelligenceConfigSettings; + } + + /** + * Returns the builder for the settings used for calls to updateOrganizationIntelligenceConfig. + */ + public UnaryCallSettings.Builder< + UpdateOrganizationIntelligenceConfigRequest, IntelligenceConfig> + updateOrganizationIntelligenceConfigSettings() { + return updateOrganizationIntelligenceConfigSettings; + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + @Override + public StorageControlStubSettings build() throws IOException { + return new StorageControlStubSettings(this); + } + } +} diff --git a/java-storage/google-cloud-storage-control/src/main/resources/META-INF/native-image/com.google.storage.control.v2/reflect-config.json b/java-storage/google-cloud-storage-control/src/main/resources/META-INF/native-image/com.google.storage.control.v2/reflect-config.json new file mode 100644 index 000000000000..10825b551109 --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/resources/META-INF/native-image/com.google.storage.control.v2/reflect-config.json @@ -0,0 +1,2630 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldInfo$Format", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingParameter", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingParameter$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RoutingRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.TypeReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.TypeReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$LogType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.AnywhereCache", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.AnywhereCache$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CommonLongRunningOperationMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CommonLongRunningOperationMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateAnywhereCacheMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateAnywhereCacheMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateManagedFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.CreateManagedFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRecursiveMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRecursiveMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRecursiveRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRecursiveRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteManagedFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DeleteManagedFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DisableAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.DisableAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.Folder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.Folder$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetFolderIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetFolderIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetManagedFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetManagedFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetProjectIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetProjectIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetStorageLayoutRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.GetStorageLayoutRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$EditionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$EffectiveIntelligenceConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$EffectiveIntelligenceConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$EffectiveIntelligenceConfig$EffectiveEdition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter$CloudStorageBuckets", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter$CloudStorageBuckets$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter$CloudStorageLocations", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$Filter$CloudStorageLocations$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$TrialConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.IntelligenceConfig$TrialConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListAnywhereCachesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListAnywhereCachesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListAnywhereCachesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListAnywhereCachesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListFoldersRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListFoldersRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListFoldersResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListFoldersResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListManagedFoldersRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListManagedFoldersRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListManagedFoldersResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ListManagedFoldersResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ManagedFolder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ManagedFolder$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.PauseAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.PauseAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.PendingRenameInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.PendingRenameInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.RenameFolderMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.RenameFolderMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.RenameFolderRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.RenameFolderRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ResumeAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.ResumeAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout$CustomPlacementConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout$CustomPlacementConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout$HierarchicalNamespace", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.StorageLayout$HierarchicalNamespace$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateAnywhereCacheMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateAnywhereCacheMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateAnywhereCacheRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateAnywhereCacheRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-storage/google-cloud-storage-control/src/main/resources/com/google/storage/control/v2/gapic_metadata.json b/java-storage/google-cloud-storage-control/src/main/resources/com/google/storage/control/v2/gapic_metadata.json new file mode 100644 index 000000000000..0ca951cf144f --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/main/resources/com/google/storage/control/v2/gapic_metadata.json @@ -0,0 +1,99 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.storage.control.v2", + "libraryPackage": "com.google.storage.control.v2", + "services": { + "StorageControl": { + "clients": { + "grpc": { + "libraryClient": "StorageControlClient", + "rpcs": { + "CreateAnywhereCache": { + "methods": ["createAnywhereCacheAsync", "createAnywhereCacheAsync", "createAnywhereCacheAsync", "createAnywhereCacheOperationCallable", "createAnywhereCacheCallable"] + }, + "CreateFolder": { + "methods": ["createFolder", "createFolder", "createFolder", "createFolderCallable"] + }, + "CreateManagedFolder": { + "methods": ["createManagedFolder", "createManagedFolder", "createManagedFolder", "createManagedFolderCallable"] + }, + "DeleteFolder": { + "methods": ["deleteFolder", "deleteFolder", "deleteFolder", "deleteFolderCallable"] + }, + "DeleteFolderRecursive": { + "methods": ["deleteFolderRecursiveAsync", "deleteFolderRecursiveAsync", "deleteFolderRecursiveAsync", "deleteFolderRecursiveOperationCallable", "deleteFolderRecursiveCallable"] + }, + "DeleteManagedFolder": { + "methods": ["deleteManagedFolder", "deleteManagedFolder", "deleteManagedFolder", "deleteManagedFolderCallable"] + }, + "DisableAnywhereCache": { + "methods": ["disableAnywhereCache", "disableAnywhereCache", "disableAnywhereCache", "disableAnywhereCacheCallable"] + }, + "GetAnywhereCache": { + "methods": ["getAnywhereCache", "getAnywhereCache", "getAnywhereCache", "getAnywhereCacheCallable"] + }, + "GetFolder": { + "methods": ["getFolder", "getFolder", "getFolder", "getFolderCallable"] + }, + "GetFolderIntelligenceConfig": { + "methods": ["getFolderIntelligenceConfig", "getFolderIntelligenceConfig", "getFolderIntelligenceConfig", "getFolderIntelligenceConfigCallable"] + }, + "GetIamPolicy": { + "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] + }, + "GetManagedFolder": { + "methods": ["getManagedFolder", "getManagedFolder", "getManagedFolder", "getManagedFolderCallable"] + }, + "GetOrganizationIntelligenceConfig": { + "methods": ["getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfig", "getOrganizationIntelligenceConfigCallable"] + }, + "GetProjectIntelligenceConfig": { + "methods": ["getProjectIntelligenceConfig", "getProjectIntelligenceConfig", "getProjectIntelligenceConfig", "getProjectIntelligenceConfigCallable"] + }, + "GetStorageLayout": { + "methods": ["getStorageLayout", "getStorageLayout", "getStorageLayout", "getStorageLayoutCallable"] + }, + "ListAnywhereCaches": { + "methods": ["listAnywhereCaches", "listAnywhereCaches", "listAnywhereCaches", "listAnywhereCachesPagedCallable", "listAnywhereCachesCallable"] + }, + "ListFolders": { + "methods": ["listFolders", "listFolders", "listFolders", "listFoldersPagedCallable", "listFoldersCallable"] + }, + "ListManagedFolders": { + "methods": ["listManagedFolders", "listManagedFolders", "listManagedFolders", "listManagedFoldersPagedCallable", "listManagedFoldersCallable"] + }, + "PauseAnywhereCache": { + "methods": ["pauseAnywhereCache", "pauseAnywhereCache", "pauseAnywhereCache", "pauseAnywhereCacheCallable"] + }, + "RenameFolder": { + "methods": ["renameFolderAsync", "renameFolderAsync", "renameFolderAsync", "renameFolderOperationCallable", "renameFolderCallable"] + }, + "ResumeAnywhereCache": { + "methods": ["resumeAnywhereCache", "resumeAnywhereCache", "resumeAnywhereCache", "resumeAnywhereCacheCallable"] + }, + "SetIamPolicy": { + "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] + }, + "TestIamPermissions": { + "methods": ["testIamPermissions", "testIamPermissions", "testIamPermissions", "testIamPermissionsCallable"] + }, + "UpdateAnywhereCache": { + "methods": ["updateAnywhereCacheAsync", "updateAnywhereCacheAsync", "updateAnywhereCacheOperationCallable", "updateAnywhereCacheCallable"] + }, + "UpdateFolderIntelligenceConfig": { + "methods": ["updateFolderIntelligenceConfig", "updateFolderIntelligenceConfig", "updateFolderIntelligenceConfigCallable"] + }, + "UpdateOrganizationIntelligenceConfig": { + "methods": ["updateOrganizationIntelligenceConfig", "updateOrganizationIntelligenceConfig", "updateOrganizationIntelligenceConfigCallable"] + }, + "UpdateProjectIntelligenceConfig": { + "methods": ["updateProjectIntelligenceConfig", "updateProjectIntelligenceConfig", "updateProjectIntelligenceConfigCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControl.java b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControl.java new file mode 100644 index 000000000000..cbba4d71cdaf --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControl.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockStorageControl implements MockGrpcService { + private final MockStorageControlImpl serviceImpl; + + public MockStorageControl() { + serviceImpl = new MockStorageControlImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControlImpl.java b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControlImpl.java new file mode 100644 index 000000000000..83ad040c026a --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/MockStorageControlImpl.java @@ -0,0 +1,646 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.core.BetaApi; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.storage.control.v2.StorageControlGrpc.StorageControlImplBase; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockStorageControlImpl extends StorageControlImplBase { + private List requests; + private Queue responses; + + public MockStorageControlImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createFolder(CreateFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Folder) { + requests.add(request); + responseObserver.onNext(((Folder) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Folder.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteFolder(DeleteFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getFolder(GetFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Folder) { + requests.add(request); + responseObserver.onNext(((Folder) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Folder.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listFolders( + ListFoldersRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListFoldersResponse) { + requests.add(request); + responseObserver.onNext(((ListFoldersResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListFolders, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListFoldersResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void renameFolder( + RenameFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method RenameFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteFolderRecursive( + DeleteFolderRecursiveRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteFolderRecursive, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getStorageLayout( + GetStorageLayoutRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof StorageLayout) { + requests.add(request); + responseObserver.onNext(((StorageLayout) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetStorageLayout, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + StorageLayout.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createManagedFolder( + CreateManagedFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ManagedFolder) { + requests.add(request); + responseObserver.onNext(((ManagedFolder) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateManagedFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ManagedFolder.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteManagedFolder( + DeleteManagedFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteManagedFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getManagedFolder( + GetManagedFolderRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ManagedFolder) { + requests.add(request); + responseObserver.onNext(((ManagedFolder) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetManagedFolder, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ManagedFolder.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listManagedFolders( + ListManagedFoldersRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListManagedFoldersResponse) { + requests.add(request); + responseObserver.onNext(((ListManagedFoldersResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListManagedFolders, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListManagedFoldersResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createAnywhereCache( + CreateAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateAnywhereCache, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateAnywhereCache( + UpdateAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateAnywhereCache, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void disableAnywhereCache( + DisableAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof AnywhereCache) { + requests.add(request); + responseObserver.onNext(((AnywhereCache) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DisableAnywhereCache, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + AnywhereCache.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void pauseAnywhereCache( + PauseAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof AnywhereCache) { + requests.add(request); + responseObserver.onNext(((AnywhereCache) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method PauseAnywhereCache, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AnywhereCache.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void resumeAnywhereCache( + ResumeAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof AnywhereCache) { + requests.add(request); + responseObserver.onNext(((AnywhereCache) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ResumeAnywhereCache, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AnywhereCache.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getAnywhereCache( + GetAnywhereCacheRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof AnywhereCache) { + requests.add(request); + responseObserver.onNext(((AnywhereCache) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetAnywhereCache, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AnywhereCache.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listAnywhereCaches( + ListAnywhereCachesRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListAnywhereCachesResponse) { + requests.add(request); + responseObserver.onNext(((ListAnywhereCachesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListAnywhereCaches, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListAnywhereCachesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getProjectIntelligenceConfig( + GetProjectIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetProjectIntelligenceConfig, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateProjectIntelligenceConfig( + UpdateProjectIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateProjectIntelligenceConfig," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getFolderIntelligenceConfig( + GetFolderIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetFolderIntelligenceConfig, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateFolderIntelligenceConfig( + UpdateFolderIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateFolderIntelligenceConfig," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getOrganizationIntelligenceConfig( + GetOrganizationIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetOrganizationIntelligenceConfig," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateOrganizationIntelligenceConfig( + UpdateOrganizationIntelligenceConfigRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof IntelligenceConfig) { + requests.add(request); + responseObserver.onNext(((IntelligenceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateOrganizationIntelligenceConfig," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + IntelligenceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof TestIamPermissionsResponse) { + requests.add(request); + responseObserver.onNext(((TestIamPermissionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method TestIamPermissions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + TestIamPermissionsResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientHttpJsonTest.java b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientHttpJsonTest.java new file mode 100644 index 000000000000..953fa9b1eecc --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientHttpJsonTest.java @@ -0,0 +1,714 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.storage.control.v2.stub.HttpJsonStorageControlStub; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class StorageControlClientHttpJsonTest { + private static MockHttpService mockService; + private static StorageControlClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonStorageControlStub.getMethodDescriptors(), + StorageControlSettings.getDefaultEndpoint()); + StorageControlSettings settings = + StorageControlSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + StorageControlSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = StorageControlClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void createFolderUnsupportedMethodTest() throws Exception { + // The createFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void deleteFolderUnsupportedMethodTest() throws Exception { + // The deleteFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void getFolderUnsupportedMethodTest() throws Exception { + // The getFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void listFoldersUnsupportedMethodTest() throws Exception { + // The listFolders() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void renameFolderUnsupportedMethodTest() throws Exception { + // The renameFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void deleteFolderRecursiveUnsupportedMethodTest() throws Exception { + // The deleteFolderRecursive() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void getStorageLayoutUnsupportedMethodTest() throws Exception { + // The getStorageLayout() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void createManagedFolderUnsupportedMethodTest() throws Exception { + // The createManagedFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void deleteManagedFolderUnsupportedMethodTest() throws Exception { + // The deleteManagedFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void getManagedFolderUnsupportedMethodTest() throws Exception { + // The getManagedFolder() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void listManagedFoldersUnsupportedMethodTest() throws Exception { + // The listManagedFolders() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void createAnywhereCacheUnsupportedMethodTest() throws Exception { + // The createAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void updateAnywhereCacheUnsupportedMethodTest() throws Exception { + // The updateAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void disableAnywhereCacheUnsupportedMethodTest() throws Exception { + // The disableAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void pauseAnywhereCacheUnsupportedMethodTest() throws Exception { + // The pauseAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void resumeAnywhereCacheUnsupportedMethodTest() throws Exception { + // The resumeAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void getAnywhereCacheUnsupportedMethodTest() throws Exception { + // The getAnywhereCache() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void listAnywhereCachesUnsupportedMethodTest() throws Exception { + // The listAnywhereCaches() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void getProjectIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfigName name = + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getProjectIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getProjectIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfigName name = + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]"); + client.getProjectIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getProjectIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-8842/locations/location-8842/intelligenceConfig"; + + IntelligenceConfig actualResponse = client.getProjectIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getProjectIntelligenceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-8842/locations/location-8842/intelligenceConfig"; + client.getProjectIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateProjectIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateProjectIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateProjectIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]") + .toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateProjectIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfigName name = + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getFolderIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getFolderIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfigName name = + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]"); + client.getFolderIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "folders/folder-8383/locations/location-8383/intelligenceConfig"; + + IntelligenceConfig actualResponse = client.getFolderIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getFolderIntelligenceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "folders/folder-8383/locations/location-8383/intelligenceConfig"; + client.getFolderIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateFolderIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateFolderIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateFolderIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateFolderIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getOrganizationIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfigName name = IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getOrganizationIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getOrganizationIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfigName name = IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]"); + client.getOrganizationIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getOrganizationIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "organizations/organization-1958/locations/location-1958/intelligenceConfig"; + + IntelligenceConfig actualResponse = client.getOrganizationIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getOrganizationIntelligenceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "organizations/organization-1958/locations/location-1958/intelligenceConfig"; + client.getOrganizationIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateOrganizationIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateOrganizationIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateOrganizationIntelligenceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateOrganizationIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyUnsupportedMethodTest() throws Exception { + // The getIamPolicy() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void setIamPolicyUnsupportedMethodTest() throws Exception { + // The setIamPolicy() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } + + @Test + public void testIamPermissionsUnsupportedMethodTest() throws Exception { + // The testIamPermissions() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } +} diff --git a/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientTest.java b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientTest.java new file mode 100644 index 000000000000..1a158bdcc8af --- /dev/null +++ b/java-storage/google-cloud-storage-control/src/test/java/com/google/storage/control/v2/StorageControlClientTest.java @@ -0,0 +1,2330 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import static com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListFoldersPagedResponse; +import static com.google.storage.control.v2.StorageControlClient.ListManagedFoldersPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class StorageControlClientTest { + private static MockServiceHelper mockServiceHelper; + private static MockStorageControl mockStorageControl; + private LocalChannelProvider channelProvider; + private StorageControlClient client; + + @BeforeClass + public static void startStaticServer() { + mockStorageControl = new MockStorageControl(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockStorageControl)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + StorageControlSettings settings = + StorageControlSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = StorageControlClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createFolderTest() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + Folder folder = Folder.newBuilder().build(); + String folderId = "folderId294109737"; + + Folder actualResponse = client.createFolder(parent, folder, folderId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateFolderRequest actualRequest = ((CreateFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(folder, actualRequest.getFolder()); + Assert.assertEquals(folderId, actualRequest.getFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + Folder folder = Folder.newBuilder().build(); + String folderId = "folderId294109737"; + client.createFolder(parent, folder, folderId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createFolderTest2() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String parent = "parent-995424086"; + Folder folder = Folder.newBuilder().build(); + String folderId = "folderId294109737"; + + Folder actualResponse = client.createFolder(parent, folder, folderId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateFolderRequest actualRequest = ((CreateFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(folder, actualRequest.getFolder()); + Assert.assertEquals(folderId, actualRequest.getFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + Folder folder = Folder.newBuilder().build(); + String folderId = "folderId294109737"; + client.createFolder(parent, folder, folderId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteFolderTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorageControl.addResponse(expectedResponse); + + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + + client.deleteFolder(name); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteFolderRequest actualRequest = ((DeleteFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + client.deleteFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteFolderTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteFolder(name); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteFolderRequest actualRequest = ((DeleteFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.deleteFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderTest() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + + Folder actualResponse = client.getFolder(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetFolderRequest actualRequest = ((GetFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + client.getFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderTest2() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + Folder actualResponse = client.getFolder(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetFolderRequest actualRequest = ((GetFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listFoldersTest() throws Exception { + Folder responsesElement = Folder.newBuilder().build(); + ListFoldersResponse expectedResponse = + ListFoldersResponse.newBuilder() + .setNextPageToken("") + .addAllFolders(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + + ListFoldersPagedResponse pagedListResponse = client.listFolders(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getFoldersList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListFoldersRequest actualRequest = ((ListFoldersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listFoldersExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + client.listFolders(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listFoldersTest2() throws Exception { + Folder responsesElement = Folder.newBuilder().build(); + ListFoldersResponse expectedResponse = + ListFoldersResponse.newBuilder() + .setNextPageToken("") + .addAllFolders(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListFoldersPagedResponse pagedListResponse = client.listFolders(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getFoldersList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListFoldersRequest actualRequest = ((ListFoldersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listFoldersExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + client.listFolders(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void renameFolderTest() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("renameFolderTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + String destinationFolderId = "destinationFolderId-480084905"; + + Folder actualResponse = client.renameFolderAsync(name, destinationFolderId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RenameFolderRequest actualRequest = ((RenameFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertEquals(destinationFolderId, actualRequest.getDestinationFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void renameFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + String destinationFolderId = "destinationFolderId-480084905"; + client.renameFolderAsync(name, destinationFolderId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void renameFolderTest2() throws Exception { + Folder expectedResponse = + Folder.newBuilder() + .setName(FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingRenameInfo(PendingRenameInfo.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("renameFolderTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + String name = "name3373707"; + String destinationFolderId = "destinationFolderId-480084905"; + + Folder actualResponse = client.renameFolderAsync(name, destinationFolderId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RenameFolderRequest actualRequest = ((RenameFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertEquals(destinationFolderId, actualRequest.getDestinationFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void renameFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + String destinationFolderId = "destinationFolderId-480084905"; + client.renameFolderAsync(name, destinationFolderId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteFolderRecursiveTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteFolderRecursiveTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + + client.deleteFolderRecursiveAsync(name).get(); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteFolderRecursiveRequest actualRequest = + ((DeleteFolderRecursiveRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteFolderRecursiveExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + FolderName name = FolderName.of("[PROJECT]", "[BUCKET]", "[FOLDER]"); + client.deleteFolderRecursiveAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteFolderRecursiveTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteFolderRecursiveTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + String name = "name3373707"; + + client.deleteFolderRecursiveAsync(name).get(); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteFolderRecursiveRequest actualRequest = + ((DeleteFolderRecursiveRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteFolderRecursiveExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.deleteFolderRecursiveAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getStorageLayoutTest() throws Exception { + StorageLayout expectedResponse = + StorageLayout.newBuilder() + .setName(StorageLayoutName.of("[PROJECT]", "[BUCKET]").toString()) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setCustomPlacementConfig(StorageLayout.CustomPlacementConfig.newBuilder().build()) + .setHierarchicalNamespace(StorageLayout.HierarchicalNamespace.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + StorageLayoutName name = StorageLayoutName.of("[PROJECT]", "[BUCKET]"); + + StorageLayout actualResponse = client.getStorageLayout(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetStorageLayoutRequest actualRequest = ((GetStorageLayoutRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getStorageLayoutExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + StorageLayoutName name = StorageLayoutName.of("[PROJECT]", "[BUCKET]"); + client.getStorageLayout(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getStorageLayoutTest2() throws Exception { + StorageLayout expectedResponse = + StorageLayout.newBuilder() + .setName(StorageLayoutName.of("[PROJECT]", "[BUCKET]").toString()) + .setLocation("location1901043637") + .setLocationType("locationType-58277745") + .setCustomPlacementConfig(StorageLayout.CustomPlacementConfig.newBuilder().build()) + .setHierarchicalNamespace(StorageLayout.HierarchicalNamespace.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + StorageLayout actualResponse = client.getStorageLayout(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetStorageLayoutRequest actualRequest = ((GetStorageLayoutRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getStorageLayoutExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getStorageLayout(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createManagedFolderTest() throws Exception { + ManagedFolder expectedResponse = + ManagedFolder.newBuilder() + .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + ManagedFolder managedFolder = ManagedFolder.newBuilder().build(); + String managedFolderId = "managedFolderId-2027084056"; + + ManagedFolder actualResponse = + client.createManagedFolder(parent, managedFolder, managedFolderId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateManagedFolderRequest actualRequest = ((CreateManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(managedFolder, actualRequest.getManagedFolder()); + Assert.assertEquals(managedFolderId, actualRequest.getManagedFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createManagedFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + ManagedFolder managedFolder = ManagedFolder.newBuilder().build(); + String managedFolderId = "managedFolderId-2027084056"; + client.createManagedFolder(parent, managedFolder, managedFolderId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createManagedFolderTest2() throws Exception { + ManagedFolder expectedResponse = + ManagedFolder.newBuilder() + .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ManagedFolder managedFolder = ManagedFolder.newBuilder().build(); + String managedFolderId = "managedFolderId-2027084056"; + + ManagedFolder actualResponse = + client.createManagedFolder(parent, managedFolder, managedFolderId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateManagedFolderRequest actualRequest = ((CreateManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(managedFolder, actualRequest.getManagedFolder()); + Assert.assertEquals(managedFolderId, actualRequest.getManagedFolderId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createManagedFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + ManagedFolder managedFolder = ManagedFolder.newBuilder().build(); + String managedFolderId = "managedFolderId-2027084056"; + client.createManagedFolder(parent, managedFolder, managedFolderId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteManagedFolderTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorageControl.addResponse(expectedResponse); + + ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]"); + + client.deleteManagedFolder(name); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteManagedFolderRequest actualRequest = ((DeleteManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteManagedFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]"); + client.deleteManagedFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteManagedFolderTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteManagedFolder(name); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteManagedFolderRequest actualRequest = ((DeleteManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteManagedFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.deleteManagedFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getManagedFolderTest() throws Exception { + ManagedFolder expectedResponse = + ManagedFolder.newBuilder() + .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]"); + + ManagedFolder actualResponse = client.getManagedFolder(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetManagedFolderRequest actualRequest = ((GetManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getManagedFolderExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + ManagedFolderName name = ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]"); + client.getManagedFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getManagedFolderTest2() throws Exception { + ManagedFolder expectedResponse = + ManagedFolder.newBuilder() + .setName(ManagedFolderName.of("[PROJECT]", "[BUCKET]", "[MANAGED_FOLDER]").toString()) + .setMetageneration(1048558813) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + ManagedFolder actualResponse = client.getManagedFolder(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetManagedFolderRequest actualRequest = ((GetManagedFolderRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getManagedFolderExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getManagedFolder(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listManagedFoldersTest() throws Exception { + ManagedFolder responsesElement = ManagedFolder.newBuilder().build(); + ListManagedFoldersResponse expectedResponse = + ListManagedFoldersResponse.newBuilder() + .setNextPageToken("") + .addAllManagedFolders(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + + ListManagedFoldersPagedResponse pagedListResponse = client.listManagedFolders(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getManagedFoldersList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListManagedFoldersRequest actualRequest = ((ListManagedFoldersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listManagedFoldersExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + client.listManagedFolders(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listManagedFoldersTest2() throws Exception { + ManagedFolder responsesElement = ManagedFolder.newBuilder().build(); + ListManagedFoldersResponse expectedResponse = + ListManagedFoldersResponse.newBuilder() + .setNextPageToken("") + .addAllManagedFolders(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListManagedFoldersPagedResponse pagedListResponse = client.listManagedFolders(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getManagedFoldersList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListManagedFoldersRequest actualRequest = ((ListManagedFoldersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listManagedFoldersExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + client.listManagedFolders(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createAnywhereCacheTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + + AnywhereCache actualResponse = client.createAnywhereCacheAsync(parent, anywhereCache).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateAnywhereCacheRequest actualRequest = ((CreateAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(anywhereCache, actualRequest.getAnywhereCache()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + client.createAnywhereCacheAsync(parent, anywhereCache).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createAnywhereCacheTest2() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createAnywhereCacheTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + String parent = "parent-995424086"; + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + + AnywhereCache actualResponse = client.createAnywhereCacheAsync(parent, anywhereCache).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateAnywhereCacheRequest actualRequest = ((CreateAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(anywhereCache, actualRequest.getAnywhereCache()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createAnywhereCacheExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + client.createAnywhereCacheAsync(parent, anywhereCache).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateAnywhereCacheTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockStorageControl.addResponse(resultOperation); + + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + AnywhereCache actualResponse = client.updateAnywhereCacheAsync(anywhereCache, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateAnywhereCacheRequest actualRequest = ((UpdateAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(anywhereCache, actualRequest.getAnywhereCache()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + AnywhereCache anywhereCache = AnywhereCache.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateAnywhereCacheAsync(anywhereCache, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void disableAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + + AnywhereCache actualResponse = client.disableAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DisableAnywhereCacheRequest actualRequest = + ((DisableAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void disableAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + client.disableAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void disableAnywhereCacheTest2() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + AnywhereCache actualResponse = client.disableAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DisableAnywhereCacheRequest actualRequest = + ((DisableAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void disableAnywhereCacheExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.disableAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + + AnywhereCache actualResponse = client.pauseAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PauseAnywhereCacheRequest actualRequest = ((PauseAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void pauseAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + client.pauseAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseAnywhereCacheTest2() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + AnywhereCache actualResponse = client.pauseAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PauseAnywhereCacheRequest actualRequest = ((PauseAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void pauseAnywhereCacheExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.pauseAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + + AnywhereCache actualResponse = client.resumeAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ResumeAnywhereCacheRequest actualRequest = ((ResumeAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void resumeAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + client.resumeAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeAnywhereCacheTest2() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + AnywhereCache actualResponse = client.resumeAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ResumeAnywhereCacheRequest actualRequest = ((ResumeAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void resumeAnywhereCacheExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.resumeAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getAnywhereCacheTest() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + + AnywhereCache actualResponse = client.getAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetAnywhereCacheRequest actualRequest = ((GetAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getAnywhereCacheExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + AnywhereCacheName name = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + client.getAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getAnywhereCacheTest2() throws Exception { + AnywhereCache expectedResponse = + AnywhereCache.newBuilder() + .setName(AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]").toString()) + .setZone("zone3744684") + .setTtl(Duration.newBuilder().build()) + .setAdmissionPolicy("admissionPolicy-1063600485") + .setState("state109757585") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setPendingUpdate(true) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + AnywhereCache actualResponse = client.getAnywhereCache(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetAnywhereCacheRequest actualRequest = ((GetAnywhereCacheRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getAnywhereCacheExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getAnywhereCache(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listAnywhereCachesTest() throws Exception { + AnywhereCache responsesElement = AnywhereCache.newBuilder().build(); + ListAnywhereCachesResponse expectedResponse = + ListAnywhereCachesResponse.newBuilder() + .setNextPageToken("") + .addAllAnywhereCaches(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + + ListAnywhereCachesPagedResponse pagedListResponse = client.listAnywhereCaches(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getAnywhereCachesList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListAnywhereCachesRequest actualRequest = ((ListAnywhereCachesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listAnywhereCachesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + BucketName parent = BucketName.of("[PROJECT]", "[BUCKET]"); + client.listAnywhereCaches(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listAnywhereCachesTest2() throws Exception { + AnywhereCache responsesElement = AnywhereCache.newBuilder().build(); + ListAnywhereCachesResponse expectedResponse = + ListAnywhereCachesResponse.newBuilder() + .setNextPageToken("") + .addAllAnywhereCaches(Arrays.asList(responsesElement)) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListAnywhereCachesPagedResponse pagedListResponse = client.listAnywhereCaches(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getAnywhereCachesList().get(0), resources.get(0)); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListAnywhereCachesRequest actualRequest = ((ListAnywhereCachesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listAnywhereCachesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String parent = "parent-995424086"; + client.listAnywhereCaches(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getProjectIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfigName name = + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getProjectIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetProjectIntelligenceConfigRequest actualRequest = + ((GetProjectIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getProjectIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfigName name = + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]"); + client.getProjectIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getProjectIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + IntelligenceConfig actualResponse = client.getProjectIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetProjectIntelligenceConfigRequest actualRequest = + ((GetProjectIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getProjectIntelligenceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getProjectIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateProjectIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofProjectLocationName("[PROJECT]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateProjectIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateProjectIntelligenceConfigRequest actualRequest = + ((UpdateProjectIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(intelligenceConfig, actualRequest.getIntelligenceConfig()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateProjectIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateProjectIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfigName name = + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getFolderIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetFolderIntelligenceConfigRequest actualRequest = + ((GetFolderIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getFolderIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfigName name = + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]"); + client.getFolderIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getFolderIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + IntelligenceConfig actualResponse = client.getFolderIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetFolderIntelligenceConfigRequest actualRequest = + ((GetFolderIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getFolderIntelligenceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getFolderIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateFolderIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName( + IntelligenceConfigName.ofFolderLocationName("[FOLDER]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateFolderIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateFolderIntelligenceConfigRequest actualRequest = + ((UpdateFolderIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(intelligenceConfig, actualRequest.getIntelligenceConfig()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateFolderIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateFolderIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getOrganizationIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfigName name = IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]"); + + IntelligenceConfig actualResponse = client.getOrganizationIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetOrganizationIntelligenceConfigRequest actualRequest = + ((GetOrganizationIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getOrganizationIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfigName name = IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]"); + client.getOrganizationIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getOrganizationIntelligenceConfigTest2() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String name = "name3373707"; + + IntelligenceConfig actualResponse = client.getOrganizationIntelligenceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetOrganizationIntelligenceConfigRequest actualRequest = + ((GetOrganizationIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getOrganizationIntelligenceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String name = "name3373707"; + client.getOrganizationIntelligenceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateOrganizationIntelligenceConfigTest() throws Exception { + IntelligenceConfig expectedResponse = + IntelligenceConfig.newBuilder() + .setName(IntelligenceConfigName.ofOrgLocationName("[ORG]", "[LOCATION]").toString()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFilter(IntelligenceConfig.Filter.newBuilder().build()) + .setEffectiveIntelligenceConfig( + IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder().build()) + .setTrialConfig(IntelligenceConfig.TrialConfig.newBuilder().build()) + .build(); + mockStorageControl.addResponse(expectedResponse); + + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + IntelligenceConfig actualResponse = + client.updateOrganizationIntelligenceConfig(intelligenceConfig, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateOrganizationIntelligenceConfigRequest actualRequest = + ((UpdateOrganizationIntelligenceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(intelligenceConfig, actualRequest.getIntelligenceConfig()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateOrganizationIntelligenceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + IntelligenceConfig intelligenceConfig = IntelligenceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateOrganizationIntelligenceConfig(intelligenceConfig, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorageControl.addResponse(expectedResponse); + + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String resource = "resource-341064690"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String resource = "resource-341064690"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorageControl.addResponse(expectedResponse); + + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockStorageControl.addResponse(expectedResponse); + + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockStorageControl.addResponse(expectedResponse); + + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + ResourceName resource = AnywhereCacheName.of("[PROJECT]", "[BUCKET]", "[ANYWHERE_CACHE]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockStorageControl.addResponse(expectedResponse); + + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockStorageControl.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockStorageControl.addException(exception); + + try { + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.png b/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.png new file mode 100644 index 000000000000..e0051e9c78dc Binary files /dev/null and b/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.png differ diff --git a/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.txt b/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.txt new file mode 100644 index 000000000000..1146f2e2abfa --- /dev/null +++ b/java-storage/google-cloud-storage/assets/retry-conformance-tests-diagram.txt @@ -0,0 +1,59 @@ +# This is a text representation of retry-conformance-tests.diagram.png generated +# using https://www.websequencediagrams.com/ + +participant ITRetryConformanceTest +participant ITRetryConformanceTest.Static +participant RetryTestCaseResolver +participant GracefulConformanceEnforcement +participant RetryTestFixture +participant TestBench +participant Docker +participant RpcMethodMappings + +ITRetryConformanceTest->+ITRetryConformanceTest.Static: testCases + ITRetryConformanceTest.Static->RpcMethodMappings: + ITRetryConformanceTest.Static->+RetryTestCaseResolver: getRetryTestCases + RetryTestCaseResolver->RetryTestCaseResolver: loadRetryTestDefinitions + RetryTestCaseResolver->RetryTestCaseResolver: generateTestCases + RetryTestCaseResolver->RetryTestCaseResolver: shuffle + RetryTestCaseResolver->RetryTestCaseResolver: validateGeneratedTestCases + RetryTestCaseResolver->-ITRetryConformanceTest.Static: +ITRetryConformanceTest.Static->-ITRetryConformanceTest: + +ITRetryConformanceTest->+TestBench: apply + TestBench->TestBench: mktemp stdout + TestBench->TestBench: mktemp stderr + TestBench->+Docker: pull + Docker->-TestBench: + TestBench->+Docker: run + TestBench->+TestBench: await testbench up + TestBench->+Docker: GET /retry_tests + Docker->-TestBench: + deactivate TestBench + loop forEach test + ITRetryConformanceTest->+GracefulConformanceEnforcement: apply + ITRetryConformanceTest->+RetryTestFixture: apply + RetryTestFixture->+TestBench: createRetryTest + TestBench->+Docker: POST /retry_test + Docker->-TestBench: + TestBench->-RetryTestFixture: + ITRetryConformanceTest->ITRetryConformanceTest: test + RetryTestFixture->+TestBench: getRetryTest + TestBench->+Docker: GET /retry_test/{id} + Docker->-TestBench: + TestBench->-RetryTestFixture: + RetryTestFixture->RetryTestFixture: assert completion + RetryTestFixture->+TestBench: deleteRetryTest + TestBench->+Docker: DELETE /retry_test/{id} + Docker->-TestBench: + TestBench->-RetryTestFixture: + RetryTestFixture->-ITRetryConformanceTest: + opt if running in CI + GracefulConformanceEnforcement->GracefulConformanceEnforcement: check allow list + end + GracefulConformanceEnforcement->-ITRetryConformanceTest: + end + Docker->-TestBench: docker stop + TestBench->TestBench: rmtemp stdout + TestBench->TestBench: rmtemp stderr +TestBench->-ITRetryConformanceTest: diff --git a/java-storage/google-cloud-storage/clirr-ignored-differences.xml b/java-storage/google-cloud-storage/clirr-ignored-differences.xml new file mode 100644 index 000000000000..99ba9b179995 --- /dev/null +++ b/java-storage/google-cloud-storage/clirr-ignored-differences.xml @@ -0,0 +1,369 @@ + + + + + + 7012 + com/google/cloud/storage/Storage + com.google.cloud.storage.BlobWriteSession blobWriteSession(com.google.cloud.storage.BlobInfo, com.google.cloud.storage.Storage$BlobWriteOption[]) + + + + + 7012 + com/google/cloud/storage/UnbufferedWritableByteChannelSession$UnbufferedWritableByteChannel + * writeAndClose(*) + + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setHierarchicalNamespace(com.google.cloud.storage.BucketInfo$HierarchicalNamespace) + + + + 7013 + com/google/cloud/storage/BlobInfo$Builder + com.google.cloud.storage.BlobInfo$Builder setContexts(com.google.cloud.storage.BlobInfo$ObjectContexts) + + + + 7013 + com/google/cloud/storage/BlobInfo$Builder + com.google.cloud.storage.BlobInfo$Builder setRetention(com.google.cloud.storage.BlobInfo$Retention) + + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setSoftDeletePolicy(com.google.cloud.storage.BucketInfo$SoftDeletePolicy) + + + + 7012 + com/google/cloud/storage/Storage + com.google.cloud.storage.Blob restore(com.google.cloud.storage.BlobId, com.google.cloud.storage.Storage$BlobRestoreOption[]) + + + + 7012 + com/google/cloud/storage/spi/v1/StorageRpc + com.google.api.services.storage.model.StorageObject restore(com.google.api.services.storage.model.StorageObject, java.util.Map) + + + + + 7009 + com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig$PartCleanupStrategy + boolean isDeleteAllOnError() + + + 7009 + com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig$PartCleanupStrategy + boolean isDeletePartsOnSuccess() + + + 7009 + com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig$PartNamingStrategy + java.lang.String fmtFields(*) + + + 7002 + com/google/cloud/storage/transfermanager/TransferManagerConfig + boolean isAllowDivideAndConquer() + + + 7002 + com/google/cloud/storage/transfermanager/TransferManagerConfig$Builder + * setAllowDivideAndConquer(boolean) + + + + 7013 + com/google/cloud/storage/StorageOptions$Builder + com.google.cloud.storage.StorageOptions$Builder setBlobWriteSessionConfig(com.google.cloud.storage.BlobWriteSessionConfig) + + + + + 8001 + com/google/cloud/storage/WriteFlushStrategy$DefaultBidiFlusher + + + + + 7013 + com/google/cloud/storage/BlobWriteSessionConfig + int hashCode() + + + 7013 + com/google/cloud/storage/BlobWriteSessionConfig + boolean equals(java.lang.Object) + + + + 7013 + com/google/cloud/storage/StorageOptions$Builder + com.google.cloud.storage.StorageOptions$Builder setOpenTelemetry(io.opentelemetry.api.OpenTelemetry) + + + + 7013 + com/google/cloud/storage/StorageOptions + io.opentelemetry.api.OpenTelemetry getOpenTelemetry() + + + + + 7012 + com/google/cloud/storage/Storage + * moveBlob(*) + + + 7012 + com/google/cloud/storage/spi/v1/StorageRpc + * moveObject(*) + + + + + 7012 + com/google/cloud/storage/Storage + com.google.api.core.ApiFuture blobReadSession(com.google.cloud.storage.BlobId, com.google.cloud.storage.Storage$BlobSourceOption[]) + + + + 7012 + com/google/cloud/storage/Storage + com.google.cloud.storage.BlobAppendableUpload blobAppendableUpload(com.google.cloud.storage.BlobInfo, com.google.cloud.storage.BlobAppendableUploadConfig, com.google.cloud.storage.Storage$BlobWriteOption[]) + + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setIpFilter(com.google.cloud.storage.BucketInfo$IpFilter) + + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setCustomerManagedEncryptionEnforcementConfig(com.google.cloud.storage.BucketInfo$CustomerManagedEncryptionEnforcementConfig) + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setCustomerSuppliedEncryptionEnforcementConfig(com.google.cloud.storage.BucketInfo$CustomerSuppliedEncryptionEnforcementConfig) + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setGoogleManagedEncryptionEnforcementConfig(com.google.cloud.storage.BucketInfo$GoogleManagedEncryptionEnforcementConfig) + + + + 7013 + com/google/cloud/storage/BucketInfo$Builder + com.google.cloud.storage.BucketInfo$Builder setIsUnreachable(java.lang.Boolean) + + + + + 7004 + com/google/cloud/storage/FlushPolicy$MinFlushSizeFlushPolicy + FlushPolicy$MinFlushSizeFlushPolicy(int) + + + 7009 + com/google/cloud/storage/FlushPolicy$MinFlushSizeFlushPolicy + FlushPolicy$MinFlushSizeFlushPolicy(int) + + + 7009 + com/google/cloud/storage/FlushPolicy$MaxFlushSizeFlushPolicy + FlushPolicy$MaxFlushSizeFlushPolicy(int) + + + + 7012 + com/google/cloud/storage/BlobAppendableUpload$AppendableUploadWriteableByteChannel + int write(java.nio.ByteBuffer) + + + + + 7012 + com/google/cloud/storage/BlobAppendableUpload$AppendableUploadWriteableByteChannel + void flush() + + + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + com.google.cloud.storage.Storage$PredefinedAcl getCannedAcl() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + java.lang.String getContentType() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + java.time.OffsetDateTime getCustomTime() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + java.lang.String getKmsKeyName() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + java.util.Map getMetadata() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + com.google.cloud.storage.multipartupload.model.ObjectLockMode getObjectLockMode() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + java.time.OffsetDateTime getObjectLockRetainUntilDate() + + + 7002 + com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest + com.google.cloud.storage.StorageClass getStorageClass() + + + + + 7013 + com/google/cloud/storage/MultipartUploadClient + * *(*) + + + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsRequest + java.lang.Integer getMaxParts() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsRequest + java.lang.Integer getPartNumberMarker() + + + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + java.lang.String getBucket() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + java.lang.String getKey() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + int getMaxParts() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + int getNextPartNumberMarker() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + int getPartNumberMarker() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + java.util.List getParts() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + com.google.cloud.storage.StorageClass getStorageClass() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + java.lang.String getUploadId() + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse + boolean isTruncated() + + + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setBucket(java.lang.String) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setIsTruncated(boolean) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setKey(java.lang.String) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setMaxParts(int) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setNextPartNumberMarker(int) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setPartNumberMarker(int) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setParts(java.util.List) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setStorageClass(com.google.cloud.storage.StorageClass) + + + 7002 + com/google/cloud/storage/multipartupload/model/ListPartsResponse$Builder + com.google.cloud.storage.multipartupload.model.ListPartsResponse$Builder setUploadId(java.lang.String) + + + + + 7006 + com/google/cloud/storage/Hasher* + * nullSafeConcat(*) + ** + + + 7005 + com/google/cloud/storage/Hasher* + ** + ** + + + diff --git a/java-storage/google-cloud-storage/conformance-testing.md b/java-storage/google-cloud-storage/conformance-testing.md new file mode 100644 index 000000000000..6b75bb502add --- /dev/null +++ b/java-storage/google-cloud-storage/conformance-testing.md @@ -0,0 +1,45 @@ +# Conformance Testing + +This library leverages the conformance tests defined in [googleapis/conformance-tests](https://github.com/googleapis/conformance-tests) +to ensure adherence to expected behaviors. + +Access to the conformance tests is achieved via dependencies on +[`com.google.cloud:google-cloud-conformance-tests`](https://github.com/googleapis/java-conformance-tests) +which contains all generated protos and associated files necessary for loading +and accessing the tests. + +## Running the Conformance Tests + +Conformance tests are written and run as part of the JUnit tests suite. + +## Suites + +### Automatic Retries + +The JUnit tests class is [`ITRetryConformanceTest.java`](./src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java) +and is considered part of the integration test suite. + +This tests suite ensures that automatic retries for operations are properly defined +and handled to ensure data integrity. + +#### Prerequisites +1. Java 8+ +2. Maven +3. Docker (Docker for MacOS has been tested and verified to work as well) + +#### Test Suite Overview + +The test suite uses the [storage-testbench](https://github.com/googleapis/storage-testbench) +to configure and generate tests cases which use fault injection to ensure conformance. + +`ITRetryConformanceTest` encapsulates all the necessary lifecycle points needed +to run the test suite, including: +1. Running the testbench server via docker +2. Setup, validation, cleanup of individual test cases with the testbench +3. CI Graceful enforcement of test failures (enforce no regressions, but allow + for some cases to not pass without failing the whole run) + +A sequence diagram of how the tests are loaded run, and interact with testbench +can be seen below. Time moves from top to bottom, while component interactions +are shown via arrows laterally. +![](./assets/retry-conformance-tests-diagram.png) diff --git a/java-storage/google-cloud-storage/pom.xml b/java-storage/google-cloud-storage/pom.xml new file mode 100644 index 000000000000..ceeed919bfc3 --- /dev/null +++ b/java-storage/google-cloud-storage/pom.xml @@ -0,0 +1,528 @@ + + + 4.0.0 + google-cloud-storage + 2.64.1-SNAPSHOT + jar + Google Cloud Storage + https://github.com/googleapis/google-cloud-java + + Java idiomatic client for Google Cloud Storage. + + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + google-cloud-storage + 1.130.0 + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-annotations + + + com.google.guava + guava + + + com.google.http-client + google-http-client + + + com.google.http-client + google-http-client-jackson2 + + + com.google.http-client + google-http-client-gson + + + com.google.api-client + google-api-client + + + com.google.apis + google-api-services-storage + + + com.google.code.gson + gson + + + com.google.cloud + google-cloud-core + + + com.google.cloud + google-cloud-core-http + + + com.google.cloud + google-cloud-core-grpc + + + com.google.api + gax + + + com.google.api + gax-grpc + + + com.google.auth + google-auth-library-credentials + + + com.google.auth + google-auth-library-oauth2-http + + + com.google.api + api-common + + + io.opencensus + opencensus-api + + + io.opentelemetry + opentelemetry-context + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.protobuf + protobuf-java + + + com.google.protobuf + protobuf-java-util + + + io.grpc + grpc-core + + + io.grpc + grpc-protobuf + + + com.google.api.grpc + proto-google-common-protos + + + org.threeten + threetenbp + + + com.google.api.grpc + proto-google-cloud-storage-v2 + + + com.google.api.grpc + grpc-google-cloud-storage-v2 + + + com.google.api.grpc + gapic-google-cloud-storage-v2 + + + io.opentelemetry + opentelemetry-sdk + + + io.grpc + grpc-opentelemetry + + + + io.opentelemetry + opentelemetry-api + + + + + io.opentelemetry + opentelemetry-api + + + + io.opentelemetry + opentelemetry-sdk-metrics + + + io.opentelemetry + opentelemetry-sdk-common + + + + io.opentelemetry + opentelemetry-sdk-extension-autoconfigure-spi + + + + io.opentelemetry.semconv + opentelemetry-semconv + + + + com.google.cloud.opentelemetry + exporter-metrics + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + test + + + + io.opentelemetry.contrib + opentelemetry-gcp-resources + + + + org.checkerframework + checker-qual + + + + com.fasterxml.jackson.core + jackson-core + + + com.google.code.findbugs + jsr305 + + + + junit + junit + test + + + io.grpc + grpc-api + + + io.grpc + grpc-netty-shaded + runtime + + + io.grpc + grpc-stub + + + io.grpc + grpc-googleapis + runtime + + + io.opentelemetry + opentelemetry-sdk-trace + test + + + + io.grpc + grpc-xds + runtime + + + io.grpc + grpc-rls + runtime + + + com.google.api.grpc + proto-google-cloud-kms-v1 + 0.181.0 + test + + + + com.google.cloud + google-cloud-kms + 2.90.0 + test + + + com.google.api.grpc + proto-google-cloud-pubsub-v1 + test + ${pubsub-proto.version} + + + com.google.cloud + google-cloud-core + test-jar + test + + + com.google.truth + truth + test + + + com.google.cloud + google-cloud-pubsub + test + + + com.google.errorprone + error_prone_annotations + + + + + com.google.api.grpc + proto-google-cloud-storage-control-v2 + test + + + com.google.cloud + google-cloud-storage-control + test + + + org.mockito + mockito-core + test + + + com.google.cloud + google-cloud-conformance-tests + test + + + org.apache.httpcomponents + httpclient + test + + + org.apache.httpcomponents + httpmime + test + + + org.apache.httpcomponents + httpcore + test + + + com.google.errorprone + error_prone_annotations + test + + + org.junit.vintage + junit-vintage-engine + test + + + net.jqwik + jqwik + 1.9.3 + test + + + io.github.classgraph + classgraph + 4.8.184 + test + + + org.slf4j + jul-to-slf4j + 2.0.17 + test + + + ch.qos.logback + logback-classic + 1.3.16 + test + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + + io.grpc:grpc-netty-shaded + + io.grpc:grpc-googleapis + io.grpc:grpc-xds + io.grpc:grpc-rls + + net.jqwik:jqwik + net.jqwik:jqwik-api + net.jqwik:jqwik-engine + net.jqwik:jqwik-time + net.jqwik:jqwik-web + + com.google.http-client:google-http-client-gson + + org.junit.vintage:junit-vintage-engine + + + io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi + io.opentelemetry.semconv:opentelemetry-semconv + + org.slf4j:slf4j-api + org.slf4j:jul-to-slf4j + ch.qos.logback:logback-classic + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + org.apache.maven.plugins + maven-surefire-plugin + ${surefire.version} + + + + **/*SmokeTest.java + **/IT*.java + + sponge_log + + + + org.apache.maven.surefire + surefire-junit-platform + ${surefire.version} + + + + + + + + + idea-jqwik + + + org.junit.vintage + junit-vintage-engine + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + + + native + + + com.google.cloud.storage.it.StorageNativeCanary + + + + regen-grpc-graalvm-reflect-config + + + + + org.codehaus.mojo + exec-maven-plugin + 3.6.3 + + + + exec + + + + + test + true + java + + -classpath + + com.google.cloud.storage.GenerateGrpcProtobufReflectConfig + + + + + + + + diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Acl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Acl.java new file mode 100644 index 000000000000..e81bf7894e7e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Acl.java @@ -0,0 +1,430 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.core.ApiFunction; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; +import com.google.common.base.MoreObjects; +import java.io.Serializable; +import java.util.Objects; + +/** + * Access Control List for buckets or blobs. + * + * @see + * About Access Control Lists + */ +public final class Acl implements Serializable { + + private static final long serialVersionUID = -1000021464049679956L; + + private final Entity entity; + private final Role role; + private final String id; + private final String etag; + + public static final class Role extends StringEnumValue { + private static final long serialVersionUID = 2067949416720207403L; + + private Role(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = Role::new; + + private static final StringEnumType type = new StringEnumType<>(Role.class, CONSTRUCTOR); + + public static final Role OWNER = type.createAndRegister("OWNER"); + public static final Role READER = type.createAndRegister("READER"); + public static final Role WRITER = type.createAndRegister("WRITER"); + + /** + * Get the Role for the given String constant, and throw an exception if the constant is not + * recognized. + */ + public static Role valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** Get the Role for the given String constant, and allow unrecognized values. */ + public static Role valueOf(String constant) { + return type.valueOf(constant); + } + + /** Return the known values for Role. */ + public static Role[] values() { + return type.values(); + } + } + + /** Builder for {@code Acl} objects. */ + public static class Builder { + + private Entity entity; + private Role role; + private String id; + private String etag; + + private Builder(Entity entity, Role role) { + this.entity = entity; + this.role = role; + } + + private Builder(Acl acl) { + this.entity = acl.entity; + this.role = acl.role; + this.id = acl.id; + this.etag = acl.etag; + } + + /** Sets the entity for the ACL object. */ + public Builder setEntity(Entity entity) { + this.entity = entity; + return this; + } + + /** Sets the role to associate to the {@code entity} object. */ + public Builder setRole(Role role) { + this.role = role; + return this; + } + + Builder setId(String id) { + this.id = id; + return this; + } + + Builder setEtag(String etag) { + this.etag = etag; + return this; + } + + /** Creates an {@code Acl} object from this builder. */ + public Acl build() { + return new Acl(this); + } + } + + /** Base class for Access Control List entities. */ + public abstract static class Entity implements Serializable { + + private static final long serialVersionUID = 2321254094152522444L; + + private final Type type; + private final String value; + + public enum Type { + DOMAIN, + GROUP, + USER, + PROJECT, + UNKNOWN + } + + Entity(Type type, String value) { + this.type = type; + this.value = value; + } + + /** Returns the type of entity. */ + public Type getType() { + return type; + } + + /** Returns the entity's value. */ + protected String getValue() { + return value; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Entity entity = (Entity) obj; + return Objects.equals(type, entity.type) && Objects.equals(value, entity.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, value); + } + + @Override + public String toString() { + return Conversions.json().entity().encode(this); + } + } + + /** Class for ACL Domain entities. */ + public static final class Domain extends Entity { + + private static final long serialVersionUID = 6852649665598880139L; + + /** + * Creates a domain entity. + * + * @param domain the domain associated to this entity + */ + public Domain(String domain) { + super(Type.DOMAIN, domain); + } + + /** Returns the domain associated to this entity. */ + public String getDomain() { + return getValue(); + } + } + + /** Class for ACL Group entities. */ + public static final class Group extends Entity { + + private static final long serialVersionUID = 5642929747944714384L; + + /** + * Creates a group entity. + * + * @param email the group email + */ + public Group(String email) { + super(Type.GROUP, email); + } + + /** Returns the group email. */ + public String getEmail() { + return getValue(); + } + } + + /** Class for ACL User entities. */ + public static final class User extends Entity { + + private static final long serialVersionUID = -4113630416489429660L; + static final String ALL_USERS = "allUsers"; + static final String ALL_AUTHENTICATED_USERS = "allAuthenticatedUsers"; + + /** + * Creates a user entity. + * + * @param email the user email + */ + public User(String email) { + super(Type.USER, email); + } + + /** Returns the user email. */ + public String getEmail() { + return getValue(); + } + + public static User ofAllUsers() { + return new User(ALL_USERS); + } + + public static User ofAllAuthenticatedUsers() { + return new User(ALL_AUTHENTICATED_USERS); + } + } + + /** Class for ACL Project entities. */ + public static final class Project extends Entity { + + private static final long serialVersionUID = -743189540406339074L; + + private final ProjectRole projectRole; + private final String projectId; + + public static final class ProjectRole extends StringEnumValue { + private static final long serialVersionUID = 1284991422168016498L; + + private ProjectRole(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = ProjectRole::new; + + private static final StringEnumType type = + new StringEnumType<>(ProjectRole.class, CONSTRUCTOR); + + public static final ProjectRole OWNERS = type.createAndRegister("OWNERS"); + public static final ProjectRole EDITORS = type.createAndRegister("EDITORS"); + public static final ProjectRole VIEWERS = type.createAndRegister("VIEWERS"); + + /** + * Get the ProjectRole for the given String constant, and throw an exception if the constant + * is not recognized. + */ + public static ProjectRole valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** Get the ProjectRole for the given String constant, and allow unrecognized values. */ + public static ProjectRole valueOf(String constant) { + return type.valueOf(constant); + } + + /** Return the known values for ProjectRole. */ + public static ProjectRole[] values() { + return type.values(); + } + } + + /** + * Creates a project entity. + * + * @param projectRole a role in the project, used to select project's teams + * @param projectId id of the project + */ + public Project(ProjectRole projectRole, String projectId) { + super(Type.PROJECT, projectRole.name().toLowerCase() + "-" + projectId); + this.projectRole = projectRole; + this.projectId = projectId; + } + + /** Returns the role in the project for this entity. */ + public ProjectRole getProjectRole() { + return projectRole; + } + + /** Returns the project id for this entity. */ + public String getProjectId() { + return projectId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Project)) { + return false; + } + if (!super.equals(o)) { + return false; + } + Project project = (Project) o; + return Objects.equals(projectRole, project.projectRole) + && Objects.equals(projectId, project.projectId); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), projectRole, projectId); + } + } + + public static final class RawEntity extends Entity { + + private static final long serialVersionUID = -3049252571732490102L; + + RawEntity(String entity) { + super(Type.UNKNOWN, entity); + } + } + + private Acl(Builder builder) { + this.entity = checkNotNull(builder.entity); + this.role = checkNotNull(builder.role); + this.id = builder.id; + this.etag = builder.etag; + } + + /** Returns the entity for this ACL object. */ + public Entity getEntity() { + return entity; + } + + /** Returns the role associated to the entity in this ACL object. */ + public Role getRole() { + return role; + } + + /** Returns the ID of the ACL entry. */ + public String getId() { + return id; + } + + /** + * Returns HTTP 1.1 Entity tag for the ACL entry. + * + * @see Entity Tags + */ + public String getEtag() { + return etag; + } + + /** Returns a builder for this {@code Acl} object. */ + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns an {@code Acl} object. + * + * @param entity the entity for this ACL object + * @param role the role to associate to the {@code entity} object + */ + public static Acl of(Entity entity, Role role) { + return newBuilder(entity, role).build(); + } + + /** + * Returns a builder for {@code Acl} objects. + * + * @param entity the entity for this ACL object + * @param role the role to associate to the {@code entity} object + */ + public static Builder newBuilder(Entity entity, Role role) { + return new Builder(entity, role); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("entity", entity) + .add("role", role) + .add("etag", etag) + .add("id", id) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(entity, role); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final Acl other = (Acl) obj; + return Objects.equals(this.entity, other.entity) + && Objects.equals(this.role, other.role) + && Objects.equals(this.etag, other.etag) + && Objects.equals(this.id, other.id); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AndThenRangeSpecFunction.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AndThenRangeSpecFunction.java new file mode 100644 index 000000000000..e3f2ae925242 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AndThenRangeSpecFunction.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class AndThenRangeSpecFunction extends RangeSpecFunction { + + private final RangeSpecFunction first; + private final RangeSpecFunction second; + + AndThenRangeSpecFunction(RangeSpecFunction first, RangeSpecFunction second) { + this.first = first; + this.second = second; + } + + @Override + RangeSpec apply(long offset, @Nullable RangeSpec prev) { + return second.apply(offset, first.apply(offset, prev)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof AndThenRangeSpecFunction)) { + return false; + } + AndThenRangeSpecFunction that = (AndThenRangeSpecFunction) o; + return Objects.equals(first, that.first) && Objects.equals(second, that.second); + } + + @Override + public int hashCode() { + return Objects.hash(first, second); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("first", first).add("second", second).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiFutureUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiFutureUtils.java new file mode 100644 index 000000000000..3e35c4cdf7b4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiFutureUtils.java @@ -0,0 +1,136 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.UncheckedExecutionException; +import java.util.List; +import java.util.stream.Collectors; + +/** + * A set of utility methods for working with {@link com.google.api.core.ApiFuture ApiFutures} that + * aren't already provided by {@link com.google.api.core.ApiFutures} + */ +final class ApiFutureUtils { + + private ApiFutureUtils() {} + + /** + * Similar to {@link com.google.api.gax.rpc.ApiExceptions#callAndTranslateApiException(ApiFuture)} + * except that it doesn't add a suppressed exception. + * + *

This should only be used in a context in which awaiting the future is on a thread that + * already contains the stack frames which originated the work. + */ + static T await(ApiFuture future) { + try { + return Futures.getUnchecked(future); + } catch (UncheckedExecutionException exception) { + if (exception.getCause() instanceof RuntimeException) { + throw (RuntimeException) exception.getCause(); + } + throw exception; + } + } + + static ApiFuture just(T value) { + return ApiFutures.immediateFuture(value); + } + + /** + * @see SmugglingException + */ + static ApiFuture> quietAllAsList(List> futures) { + List> pending = + futures.stream().map(ApiFutureUtils::smuggleThrowable).collect(Collectors.toList()); + ApiFuture> futureValues = ApiFutures.allAsList(pending); + return unwrapSmuggling(futureValues); + } + + @FunctionalInterface + interface OnSuccessApiFutureCallback extends ApiFutureCallback { + @Override + default void onFailure(Throwable t) { + // noop + } + } + + @FunctionalInterface + interface OnFailureApiFutureCallback extends ApiFutureCallback { + @Override + default void onSuccess(T result) { + // noop + } + } + + private static ApiFuture smuggleThrowable(ApiFuture future) { + return ApiFutures.catchingAsync( + future, + Throwable.class, + throwable -> ApiFutures.immediateFailedFuture(new SmugglingException(throwable)), + MoreExecutors.directExecutor()); + } + + private static ApiFuture unwrapSmuggling(ApiFuture future) { + return ApiFutures.catchingAsync( + future, + SmugglingException.class, + smuggled -> ApiFutures.immediateFailedFuture(smuggled.smuggledCause), + MoreExecutors.directExecutor()); + } + + /** + * Guava's AggregateFuture attempts to help let you know when multiple futures fail while + * resolving via {@link com.google.common.util.concurrent.Futures#allAsList(Iterable)}. + * + *

This is detrimental to our use case, because we don't want to be spamming our customers with + * error message they can do nothing about. In an effort to prevent this spam we define a custom + * exception wrapper class that is able to smuggle the context we care about past the detection + * mechanism (instances are added to a ConcurrentHashSet). + * + *

To accomplish this smuggling, we abuse the following: + * + *

    + *
  1. hashCode is hardcoded to a constant value + *
  2. equals(Object) returns true if the classes are equal + *
  3. we define our own field to carry the cause (guava looks at the cause too) + *
+ * + * For our purposes we don't need to distinguish between different instances of our exception, as + * we track error at a different level. + */ + private static final class SmugglingException extends RuntimeException { + private final Throwable smuggledCause; + + private SmugglingException(Throwable smuggledCause) { + super(""); + this.smuggledCause = smuggledCause; + } + + public int hashCode() { + return 1; + } + + public boolean equals(Object other) { + return other.getClass().equals(this.getClass()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java new file mode 100644 index 000000000000..7907aafe9fca --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java @@ -0,0 +1,422 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.ifNonNull; +import static java.util.Objects.requireNonNull; + +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.Storage.Objects; +import com.google.api.services.storage.Storage.Objects.Get; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.gson.Gson; +import com.google.gson.stream.JsonReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.io.StringReader; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +class ApiaryUnbufferedReadableByteChannel implements UnbufferedReadableByteChannel { + + private final ApiaryReadRequest apiaryReadRequest; + private final Storage storage; + private final SettableApiFuture result; + private final ResultRetryAlgorithm resultRetryAlgorithm; + private final Retrier retrier; + + private long position; + private ScatteringByteChannel sbc; + private boolean open; + private boolean returnEOF; + + // returned X-Goog-Generation header value + private Long xGoogGeneration; + + ApiaryUnbufferedReadableByteChannel( + ApiaryReadRequest apiaryReadRequest, + Storage storage, + SettableApiFuture result, + Retrier retrier, + ResultRetryAlgorithm resultRetryAlgorithm) { + this.apiaryReadRequest = apiaryReadRequest; + this.storage = storage; + this.result = result; + this.retrier = retrier; + this.resultRetryAlgorithm = + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + boolean shouldRetry = resultRetryAlgorithm.shouldRetry(previousThrowable, null); + if (previousThrowable != null && !shouldRetry) { + result.setException(previousThrowable); + } + return shouldRetry; + } + }; + this.open = true; + this.returnEOF = false; + this.position = apiaryReadRequest.getByteRangeSpec().beginOffset(); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (returnEOF) { + close(); + return -1; + } else if (!open) { + throw new ClosedChannelException(); + } + long totalRead = 0; + do { + if (sbc == null) { + sbc = retrier.run(resultRetryAlgorithm, this::open, Decoder.identity()); + } + + long totalRemaining = Buffers.totalRemaining(dsts, offset, length); + try { + // According to the contract of Retrying#run it's possible for sbc to be null even after + // invocation. However, the function we provide is guaranteed to return non-null or throw + // an exception. So we suppress the warning from intellij here. + //noinspection ConstantConditions + long read = sbc.read(dsts, offset, length); + if (read == -1) { + returnEOF = true; + } else { + totalRead += read; + } + return totalRead; + } catch (Exception t) { + if (resultRetryAlgorithm.shouldRetry(t, null)) { + // if our retry algorithm COULD allow a retry, continue the loop and allow trying to + // open the stream again. + sbc = null; + } else if (t instanceof IOException) { + IOException ioE = (IOException) t; + if (resultRetryAlgorithm.shouldRetry(StorageException.translate(ioE), null)) { + sbc = null; + } else { + throw ioE; + } + } else { + throw new IOException(StorageException.coalesce(t)); + } + } finally { + long totalRemainingAfter = Buffers.totalRemaining(dsts, offset, length); + long delta = totalRemaining - totalRemainingAfter; + if (delta > 0) { + position += delta; + totalRead += delta; + } + } + } while (true); + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + open = false; + if (sbc != null) { + sbc.close(); + } + } + + private void setXGoogGeneration(long xGoogGeneration) { + this.xGoogGeneration = xGoogGeneration; + } + + private ScatteringByteChannel open() { + try { + ApiaryReadRequest request = apiaryReadRequest.withNewBeginOffset(position); + Get get = createGetRequest(request, storage.objects(), xGoogGeneration); + + HttpResponse media = get.executeMedia(); + InputStream content = media.getContent(); + if (xGoogGeneration == null) { + HttpHeaders responseHeaders = media.getHeaders(); + + String xGoogGenHeader = getHeaderValue(responseHeaders, "x-goog-generation"); + if (xGoogGenHeader != null) { + StorageObject clone = apiaryReadRequest.getObject().clone(); + ifNonNull(xGoogGenHeader, Long::valueOf, clone::setGeneration); + // store xGoogGeneration ourselves incase we need to retry + ifNonNull(xGoogGenHeader, Long::valueOf, this::setXGoogGeneration); + ifNonNull( + getHeaderValue(responseHeaders, "x-goog-metageneration"), + Long::valueOf, + clone::setMetageneration); + ifNonNull( + getHeaderValue(responseHeaders, "x-goog-storage-class"), clone::setStorageClass); + ifNonNull( + getHeaderValue(responseHeaders, "x-goog-stored-content-length"), + BigInteger::new, + clone::setSize); + ifNonNull( + getHeaderValue(responseHeaders, "content-disposition"), clone::setContentDisposition); + ifNonNull(getHeaderValue(responseHeaders, "content-type"), clone::setContentType); + ifNonNull(getHeaderValue(responseHeaders, "etag"), clone::setEtag); + + String encoding = getHeaderValue(responseHeaders, "x-goog-stored-content-encoding"); + if (encoding != null && !encoding.equalsIgnoreCase("identity")) { + clone.setContentEncoding(encoding); + } + if (!result.isDone()) { + result.set(clone); + } + } + } + + ReadableByteChannel rbc = Channels.newChannel(content); + return StorageByteChannels.readable().asScatteringByteChannel(rbc); + } catch (HttpResponseException e) { + if (xGoogGeneration != null) { + int statusCode = e.getStatusCode(); + if (statusCode == 404) { + throw new StorageException(404, "Failure while trying to resume download", e); + } + } else if (e.getStatusCode() == 416) { + returnEOF = true; + } + throw StorageException.translate(e); + } catch (IOException e) { + throw StorageException.translate(e); + } catch (Throwable t) { + throw StorageException.coalesce(t); + } + } + + @VisibleForTesting + static Get createGetRequest( + ApiaryReadRequest apiaryReadRequest, Objects objects, Long xGoogGeneration) + throws IOException { + StorageObject from = apiaryReadRequest.getObject(); + Map options = apiaryReadRequest.getOptions(); + Get get = objects.get(from.getBucket(), from.getName()); + if (from.getGeneration() != null) { + get.setGeneration(from.getGeneration()); + } else if (xGoogGeneration != null) { + get.setGeneration(xGoogGeneration); + } + ifNonNull( + options.get(StorageRpc.Option.IF_GENERATION_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfGenerationMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_GENERATION_NOT_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfGenerationNotMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_METAGENERATION_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfMetagenerationMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_METAGENERATION_NOT_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfMetagenerationNotMatch); + ifNonNull( + options.get(StorageRpc.Option.USER_PROJECT), + ApiaryUnbufferedReadableByteChannel::cast, + get::setUserProject); + HttpHeaders headers = get.getRequestHeaders(); + ifNonNull( + options.get(StorageRpc.Option.CUSTOMER_SUPPLIED_KEY), + ApiaryUnbufferedReadableByteChannel::cast, + (String key) -> { + BaseEncoding base64 = BaseEncoding.base64(); + HashFunction hashFunction = Hashing.sha256(); + headers.set("x-goog-encryption-algorithm", "AES256"); + headers.set("x-goog-encryption-key", key); + headers.set( + "x-goog-encryption-key-sha256", + base64.encode(hashFunction.hashBytes(base64.decode(key)).asBytes())); + }); + ifNonNull( + options.get(StorageRpc.Option.EXTRA_HEADERS), + ApiaryUnbufferedReadableByteChannel::cast, + (ImmutableMap extraHeaders) -> { + for (Entry e : extraHeaders.entrySet()) { + headers.set(e.getKey(), e.getValue()); + } + }); + + // gzip handling is performed upstream of here. Ensure we always get the raw input stream from + // the request + get.setReturnRawInputStream(true); + String range = apiaryReadRequest.getByteRangeSpec().getHttpRangeHeader(); + if (range != null) { + get.getRequestHeaders().setRange(range); + } + get.getMediaHttpDownloader().setDirectDownloadEnabled(true); + + return get; + } + + @SuppressWarnings("unchecked") + private static T cast(Object o) { + return (T) o; + } + + @Nullable + @SuppressWarnings("unchecked") + private static String getHeaderValue(@NonNull HttpHeaders headers, @NonNull String headerName) { + Object o = headers.get(headerName); + if (o == null) { + return null; + } else if (o instanceof List) { + List list = (List) o; + if (list.isEmpty()) { + return null; + } else { + return Utils.headerNameToLowerCase(list.get(0).trim()); + } + } else if (o instanceof String) { + return (String) o; + } else { + throw new IllegalStateException( + String.format( + Locale.US, + "Unexpected header type '%s' for header %s", + o.getClass().getName(), + headerName)); + } + } + + @Immutable + static final class ApiaryReadRequest implements Serializable { + private static final long serialVersionUID = -4059435314115374448L; + private static final Gson gson = new Gson(); + @NonNull private transient StorageObject object; + @NonNull private final Map options; + @NonNull private final ByteRangeSpec byteRangeSpec; + + private volatile String objectJson; + + ApiaryReadRequest( + @NonNull StorageObject object, + @NonNull Map options, + @NonNull ByteRangeSpec byteRangeSpec) { + this.object = requireNonNull(object, "object must be non null"); + this.options = requireNonNull(options, "options must be non null"); + this.byteRangeSpec = requireNonNull(byteRangeSpec, "byteRangeSpec must be non null"); + } + + @NonNull StorageObject getObject() { + return object; + } + + @NonNull Map getOptions() { + return options; + } + + @NonNull ByteRangeSpec getByteRangeSpec() { + return byteRangeSpec; + } + + ApiaryReadRequest withNewBeginOffset(long beginOffset) { + if (beginOffset > 0 && beginOffset != byteRangeSpec.beginOffset()) { + return new ApiaryReadRequest( + object, options, byteRangeSpec.withNewBeginOffset(beginOffset)); + } else { + return this; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ApiaryReadRequest)) { + return false; + } + ApiaryReadRequest that = (ApiaryReadRequest) o; + return java.util.Objects.equals(object, that.object) + && java.util.Objects.equals(options, that.options) + && java.util.Objects.equals(byteRangeSpec, that.byteRangeSpec); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(object, options, byteRangeSpec); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("byteRangeSpec", byteRangeSpec) + .add("options", options) + .add("object", getObjectJson()) + .toString(); + } + + private String getObjectJson() { + if (objectJson == null) { + synchronized (this) { + if (objectJson == null) { + objectJson = gson.toJson(object); + } + } + } + return objectJson; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + String ignore = getObjectJson(); + out.defaultWriteObject(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + JsonReader jsonReader = gson.newJsonReader(new StringReader(this.objectJson)); + this.object = gson.fromJson(jsonReader, StorageObject.class); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedWritableByteChannel.java new file mode 100644 index 000000000000..4b922a42829a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedWritableByteChannel.java @@ -0,0 +1,128 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.SettableApiFuture; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.function.LongConsumer; +import javax.annotation.ParametersAreNonnullByDefault; +import org.checkerframework.checker.nullness.qual.Nullable; + +@ParametersAreNonnullByDefault +final class ApiaryUnbufferedWritableByteChannel implements UnbufferedWritableByteChannel { + + private final JsonResumableSession session; + + private final SettableApiFuture result; + private final LongConsumer committedBytesCallback; + + private boolean open; + private long cumulativeByteCount; + private boolean finished; + + ApiaryUnbufferedWritableByteChannel( + HttpClientContext httpClientContext, + RetrierWithAlg retrier, + JsonResumableWrite resumableWrite, + SettableApiFuture result, + LongConsumer committedBytesCallback) { + this.session = ResumableSession.json(httpClientContext, retrier, resumableWrite); + this.result = result; + this.committedBytesCallback = committedBytesCallback; + this.open = true; + this.cumulativeByteCount = resumableWrite.getBeginOffset(); + this.finished = false; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return internalWrite(srcs, offset, length, false); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long write = internalWrite(srcs, offset, length, true); + close(); + return write; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + open = false; + if (!finished) { + try { + ResumableOperationResult<@Nullable StorageObject> operationResult = + session.put(RewindableContent.empty(), HttpContentRange.of(cumulativeByteCount)); + long persistedSize = operationResult.getPersistedSize(); + committedBytesCallback.accept(persistedSize); + result.set(operationResult.getObject()); + } catch (Exception e) { + result.setException(e); + throw StorageException.coalesce(e); + } + } + } + + private long internalWrite(ByteBuffer[] srcs, int offset, int length, boolean finalize) + throws ClosedChannelException { + if (!open) { + throw new ClosedChannelException(); + } + RewindableContent content = RewindableContent.of(Utils.subArray(srcs, offset, length)); + long available = content.getLength(); + // as long as request has at least 256KiB GCS will accept bytes in 256KiB increments, + // however if a request is smaller than 256KiB it MUST be the finalization request. + if (!finalize && available < ByteSizeConstants._256KiB) { + return 0; + } + long newFinalByteOffset = cumulativeByteCount + available; + final HttpContentRange header; + ByteRangeSpec rangeSpec = ByteRangeSpec.explicit(cumulativeByteCount, newFinalByteOffset); + if (finalize) { + header = HttpContentRange.of(rangeSpec, newFinalByteOffset); + finished = true; + } else { + header = HttpContentRange.of(rangeSpec); + } + try { + ResumableOperationResult<@Nullable StorageObject> operationResult = + session.put(content, header); + long persistedSize = operationResult.getPersistedSize(); + committedBytesCallback.accept(persistedSize); + long written = persistedSize - cumulativeByteCount; + this.cumulativeByteCount = persistedSize; + if (finished) { + StorageObject storageObject = operationResult.getObject(); + result.set(storageObject); + } + return written; + } catch (Exception e) { + result.setException(e); + throw StorageException.coalesce(e); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncAppendingQueue.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncAppendingQueue.java new file mode 100644 index 000000000000..a00c8d03599a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncAppendingQueue.java @@ -0,0 +1,231 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.cloud.storage.ApiFutureUtils.OnFailureApiFutureCallback; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.PriorityQueue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Define a queue where enqueued items are async tasks. When a limit is reached, compact all values + * into a new value. + */ +final class AsyncAppendingQueue<@NonNull T> implements AutoCloseable { + + private enum State { + OPEN, + CLOSING, + CLOSED; + + boolean isOpen() { + return this == OPEN; + } + } + + private final Executor exec; + private final int maxElementsPerCompact; + private final ApiFunction, T> compactFunction; + private final AtomicInteger orderSequence; + private final SettableApiFuture finalResult; + private final PriorityQueue> queue; + private final AtomicReference shortCircuitFailure; + private final ApiFutureCallback shortCircuitRegistrationCallback; + + private final ReentrantLock lock; + private volatile State state; + + private AsyncAppendingQueue( + Executor exec, int maxElementsPerCompact, ApiFunction, T> compactFunction) { + this.exec = exec; + this.maxElementsPerCompact = maxElementsPerCompact; + this.compactFunction = compactFunction; + this.orderSequence = new AtomicInteger(0); + this.finalResult = SettableApiFuture.create(); + this.queue = new PriorityQueue<>(maxElementsPerCompact, Element.COMP); + this.state = State.OPEN; + this.shortCircuitFailure = new AtomicReference<>(null); + this.shortCircuitRegistrationCallback = + (OnFailureApiFutureCallback) + throwable -> { + if (state.isOpen()) { + shortCircuitFailure.compareAndSet(null, throwable); + } + }; + lock = new ReentrantLock(); + } + + AsyncAppendingQueue append(ApiFuture value) throws ShortCircuitException { + lock.lock(); + try { + checkState(state.isOpen(), "already closed"); + Throwable throwable = shortCircuitFailure.get(); + if (throwable != null) { + ShortCircuitException shortCircuitException = new ShortCircuitException(throwable); + finalResult.cancel(true); + throw shortCircuitException; + } + checkNotNull(value, "value must not be null"); + + Element newElement = newElement(value); + queue.offer(newElement); + boolean isFull = queue.size() == maxElementsPerCompact; + if (isFull) { + Element compact = compact(exec); + queue.offer(compact); + } + return this; + } finally { + lock.unlock(); + } + } + + ApiFuture getResult() { + return finalResult; + } + + T await() { + return ApiExceptions.callAndTranslateApiException(finalResult); + } + + @Override + public void close() { + lock.lock(); + try { + if (!state.isOpen()) { + return; + } + state = State.CLOSING; + + if (queue.isEmpty()) { + NoSuchElementException neverAppendedTo = new NoSuchElementException("Never appended to"); + finalResult.setException(neverAppendedTo); + throw neverAppendedTo; + } else { + Element transform = compact(exec); + + ApiFutures.addCallback( + transform.getValue(), + new ApiFutureCallback() { + @Override + public void onFailure(Throwable err) { + finalResult.setException(err); + } + + @Override + public void onSuccess(T t) { + finalResult.set(t); + } + }, + exec); + } + state = State.CLOSED; + } finally { + lock.unlock(); + } + } + + @NonNull + private Element newElement(ApiFuture value) { + ApiFutures.addCallback(value, shortCircuitRegistrationCallback, MoreExecutors.directExecutor()); + return new Element<>(orderSequence.getAndIncrement(), value); + } + + @NonNull + private Element compact(Executor executor) { + ArrayList> elements = new ArrayList<>(); + Element peek = queue.peek(); + checkState(peek != null, "attempt to compact empty queue"); + int order = peek.getOrder(); + + Element curr; + while ((curr = queue.poll()) != null) { + elements.add(curr); + } + + List> pending = + elements.stream().map(Element::getValue).collect(Collectors.toList()); + ApiFuture> futureTs = ApiFutureUtils.quietAllAsList(pending); + ApiFuture transform = + ApiFutures.transform( + futureTs, ts -> compactFunction.apply(ImmutableList.copyOf(ts)), executor); + return new Element<>(order, transform); + } + + public static AsyncAppendingQueue of( + Executor exec, int maxElementsPerCompact, ApiFunction, T> compactFunction) { + checkNotNull(exec, "exec must be non-null"); + checkArgument(maxElementsPerCompact > 1, "maxElementsPerCompact must be > 1"); + checkNotNull(compactFunction, "compactFunction must be non-null"); + return new AsyncAppendingQueue<>(exec, maxElementsPerCompact, compactFunction); + } + + static final class ShortCircuitException extends RuntimeException { + private ShortCircuitException(Throwable instigator) { + super("Short Circuiting due to previously failed future", instigator); + } + } + + /** + * The order in which elements are compacted is important. Define a class that allows defining an + * order property for use by the {@link PriorityQueue} + */ + private static final class Element { + private static final Comparator> COMP = Comparator.comparing(Element::getOrder); + private final int order; + private final ApiFuture value; + + public Element(int order, ApiFuture value) { + this.order = order; + this.value = value; + } + + public int getOrder() { + return order; + } + + public ApiFuture getValue() { + return value; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("order", order).add("value", value).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncSessionClosedException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncSessionClosedException.java new file mode 100644 index 000000000000..982796f1c4cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncSessionClosedException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; + +/** + * Root exception for async tasks which fail due to a session being closed. + * + * @see BlobReadSession + */ +@BetaApi +public final class AsyncSessionClosedException extends RuntimeException { + AsyncSessionClosedException(String msg) { + super(msg); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncStorageTaskException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncStorageTaskException.java new file mode 100644 index 000000000000..7d7b585db74a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/AsyncStorageTaskException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +/** + * This exception is used to preserve the caller's stacktrace when invoking an async task in a sync + * context. It will be added as a suppressed exception when propagating the async exception. This + * allows callers to catch ApiException thrown in an async operation, while still maintaining the + * call site. + */ +public final class AsyncStorageTaskException extends RuntimeException { + // mimic of com.google.api.gax.rpc.AsyncTaskException which doesn't have a public constructor + // if that class is ever made public, make this class extend it + AsyncStorageTaskException() { + super("Asynchronous task failed"); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Backoff.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Backoff.java new file mode 100644 index 000000000000..f127995772bc --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Backoff.java @@ -0,0 +1,316 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.time.Duration.ZERO; +import static java.util.Objects.requireNonNull; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import java.time.Duration; +import java.util.Objects; +import java.util.concurrent.ThreadLocalRandom; + +/** + * Encapsulated class to track a timeout and calculate a backoff. + * + *

Error tracking is explicitly not tracked here. This class only tracks elapsed duration and + * timeout and whether there is budget for backoff. + * + *

This class does not use a clock, instead everything is tracked as durations provided by the + * user. This has the advantage that tests of it and anything that depends on it are able to be 100% + * reproducible. + * + *

This class also allows for a jittering algorithm to be provided to it, rather than being hard + * coded against a random number generator like {@link ThreadLocalRandom}. + * + *

This class is not thread safe. + */ +final class Backoff { + + private final Duration initialBackoff; + private final Duration maxBackoff; + private final Duration timeout; + private final double retryDelayMultiplier; + private final Jitterer jitterer; + + private Duration cumulativeBackoff; + private Duration previousBackoff; + + private Backoff( + Duration initialBackoff, + double backoffDelayMultiplier, + Duration maxBackoff, + Duration timeout, + Jitterer jitterer) { + this.initialBackoff = initialBackoff; + this.maxBackoff = maxBackoff; + this.timeout = timeout; + this.jitterer = jitterer; + this.retryDelayMultiplier = backoffDelayMultiplier; + this.cumulativeBackoff = ZERO; + this.previousBackoff = ZERO; + } + + /** + * Compute the next backoff given the provide {@code elapsed} duration between any previous + * invocation and this one. + * + *

If there is remaining backoff budget, a backoff will be computed and returned as a {@link + * BackoffDuration}. If the backoff budget doesn't have enough to allow for another backoff an + * {@link BackoffResults#EXHAUSTED} will be returned. + * + *

{@code EXHAUSTED} can happen in the following circumstances + * + *

    + *
  1. If the existing {@link #cumulativeBackoff} + {@code elapsed} is >= {@link #timeout} + *
+ */ + BackoffResult nextBackoff(Duration elapsed) { + checkArgument( + Durations.gtEq(elapsed, ZERO), "elapsed must be >= PT0S (%s >= %s)", elapsed, ZERO); + Duration cumulativeAndElapsed = cumulativeBackoff.plus(elapsed); + cumulativeBackoff = cumulativeAndElapsed; + if (Durations.gtEq(cumulativeAndElapsed, timeout)) { + return BackoffResults.EXHAUSTED; + } + Duration nextDelay = + Duration.ofNanos(Math.round(previousBackoff.toNanos() * retryDelayMultiplier)); + if (Durations.eq(nextDelay, ZERO)) { + nextDelay = initialBackoff; + } + Duration nextBackoffWithJitter = jitterer.jitter(nextDelay); + Duration remainingUtilTimeout = timeout.minus(cumulativeAndElapsed); + Duration cappedBackoff = Durations.min(nextBackoffWithJitter, maxBackoff, remainingUtilTimeout); + previousBackoff = cappedBackoff; + + return BackoffDuration.of(cappedBackoff); + } + + /** + * Reset all state. + * + *

After calling this method, backoff durations will reset to their initial values. + */ + void reset() { + cumulativeBackoff = ZERO; + previousBackoff = ZERO; + } + + Duration getCumulativeBackoff() { + return cumulativeBackoff; + } + + Duration getTimeout() { + return timeout; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Backoff)) { + return false; + } + Backoff backoff = (Backoff) o; + return Double.compare(retryDelayMultiplier, backoff.retryDelayMultiplier) == 0 + && Objects.equals(initialBackoff, backoff.initialBackoff) + && Objects.equals(maxBackoff, backoff.maxBackoff) + && Objects.equals(timeout, backoff.timeout) + && Objects.equals(jitterer, backoff.jitterer) + && Objects.equals(cumulativeBackoff, backoff.cumulativeBackoff); + } + + @Override + public int hashCode() { + return Objects.hash( + initialBackoff, maxBackoff, timeout, retryDelayMultiplier, jitterer, cumulativeBackoff); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("previousBackoff", previousBackoff) + .add("cumulativeBackoff", cumulativeBackoff) + .add("initialBackoff", initialBackoff) + .add("maxBackoff", maxBackoff) + .add("timeout", timeout) + .add("retryDelayMultiplier", retryDelayMultiplier) + .add("jitterer", jitterer) + .toString(); + } + + /** Convenience method to create a Backoff from RetrySettings. */ + static Backoff.Builder from(RetrySettings retrySettings) { + return newBuilder() + .setInitialBackoff(retrySettings.getInitialRetryDelayDuration()) + .setRetryDelayMultiplier(retrySettings.getRetryDelayMultiplier()) + .setMaxBackoff(retrySettings.getMaxRetryDelayDuration()) + .setTimeout(retrySettings.getTotalTimeoutDuration()); + } + + static Builder newBuilder() { + return new Builder(); + } + + static final class Builder { + private Duration initialBackoff; + private Duration maxBackoff; + private Duration timeout; + private double retryDelayMultiplier; + private Jitterer jitterer; + + private Builder() {} + + Builder setInitialBackoff(Duration initialBackoff) { + this.initialBackoff = initialBackoff; + return this; + } + + Builder setMaxBackoff(Duration maxBackoff) { + this.maxBackoff = maxBackoff; + return this; + } + + Builder setTimeout(Duration timeout) { + this.timeout = timeout; + return this; + } + + Builder setRetryDelayMultiplier(double retryDelayMultiplier) { + this.retryDelayMultiplier = retryDelayMultiplier; + return this; + } + + Builder setJitterer(Jitterer jitterer) { + this.jitterer = jitterer; + return this; + } + + Backoff build() { + checkState(retryDelayMultiplier >= 1.0, "retryDelayMultiplier must be >= 1.0"); + Duration effectiveTimeout = requireNonNull(timeout, "timeout must be non null"); + if (Durations.ltEq(effectiveTimeout, ZERO)) { + effectiveTimeout = Durations.EFFECTIVE_INFINITY; + } + return new Backoff( + requireNonNull(initialBackoff, "initialBackoff must be non null"), + retryDelayMultiplier, + requireNonNull(maxBackoff, "maxBackoff must be non null"), + effectiveTimeout, + requireNonNull(jitterer, "jitterer must be non null")); + } + } + + interface BackoffResult { + String errorString(); + } + + enum BackoffResults implements BackoffResult { + EXHAUSTED; + + @Override + public String errorString() { + return name(); + } + } + + static final class BackoffDuration implements BackoffResult { + private final Duration duration; + + private BackoffDuration(Duration duration) { + this.duration = duration; + } + + Duration getDuration() { + return duration; + } + + @Override + public String errorString() { + return duration.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BackoffDuration)) { + return false; + } + BackoffDuration that = (BackoffDuration) o; + return Objects.equals(duration, that.duration); + } + + @Override + public int hashCode() { + return Objects.hashCode(duration); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("duration", duration).toString(); + } + + static BackoffDuration of(Duration duration) { + return new BackoffDuration(duration); + } + } + + /** Simple API to allow for the definition of a Jittering algorithm. */ + @FunctionalInterface + interface Jitterer { + Duration jitter(Duration baseline); + + static Jitterer threadLocalRandom() { + return ThreadLocalRandomJitterer.INSTANCE; + } + + @VisibleForTesting + static Jitterer noJitter() { + return NoJitter.INSTANCE; + } + } + + private static final class ThreadLocalRandomJitterer implements Jitterer { + private static final ThreadLocalRandomJitterer INSTANCE = new ThreadLocalRandomJitterer(); + + @Override + public Duration jitter(Duration baseline) { + if (Durations.gt(baseline, ZERO)) { + long nanos = baseline.toNanos(); + long randNanos = ThreadLocalRandom.current().nextLong(nanos); + return baseline.plusNanos(randNanos); + } + return baseline; + } + } + + private static final class NoJitter implements Jitterer { + private static final NoJitter INSTANCE = new NoJitter(); + + @Override + public Duration jitter(Duration baseline) { + return baseline; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BackwardCompatibilityUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BackwardCompatibilityUtils.java new file mode 100644 index 000000000000..f996b252ddc0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BackwardCompatibilityUtils.java @@ -0,0 +1,158 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule; +import com.google.cloud.storage.BucketInfo.AgeDeleteRule; +import com.google.cloud.storage.BucketInfo.CreatedBeforeDeleteRule; +import com.google.cloud.storage.BucketInfo.DeleteRule; +import com.google.cloud.storage.BucketInfo.IsLiveDeleteRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.BucketInfo.NumNewerVersionsDeleteRule; +import com.google.cloud.storage.BucketInfo.RawDeleteRule; +import com.google.cloud.storage.Conversions.Codec; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A collection of utilities that only exist to enable backward compatibility. + * + *

In general, the expectation is that any references to this class only come from @Deprecated + * things. + */ +final class BackwardCompatibilityUtils { + + @SuppressWarnings("RedundantTypeArguments") + // the doesn't auto carry all the way through like intellij thinks it + // would. + static final Codec<@Nullable Long, @Nullable OffsetDateTime> millisOffsetDateTimeCodec = + Codec.of( + l -> + Instant.ofEpochMilli(requireNonNull(l, "l must be non null")) + .atOffset(ZoneOffset.systemDefault().getRules().getOffset(Instant.now())), + odt -> requireNonNull(odt, "odt must be non null").toInstant().toEpochMilli()) + .nullable(); + + static final Codec millisUtcCodec = + Codec.of( + l -> + Instant.ofEpochMilli(requireNonNull(l, "l must be non null")) + .atOffset(ZoneOffset.UTC), + odt -> requireNonNull(odt, "odt must be non null").toInstant().toEpochMilli()); + + static final Codec<@Nullable Duration, @Nullable Long> nullableDurationSecondsCodec = + Utils.durationSecondsCodec.nullable(); + + @SuppressWarnings("deprecation") + static final Codec deleteRuleCodec = + Codec.of( + BackwardCompatibilityUtils::deleteRuleEncode, + BackwardCompatibilityUtils::deleteRuleDecode); + + private BackwardCompatibilityUtils() {} + + @SuppressWarnings("deprecation") + private static LifecycleRule deleteRuleEncode(DeleteRule from) { + if (from instanceof RawDeleteRule) { + RawDeleteRule raw = (RawDeleteRule) from; + Rule rule = raw.getRule(); + String msg = + "The lifecycle condition " + + resolveRuleActionType(from) + + " is not currently supported. Please update to the latest version of" + + " google-cloud-java. Also, use LifecycleRule rather than the deprecated" + + " DeleteRule."; + // manually construct a log record, so we maintain class name and method name + // from the old implicit values. + LogRecord record = new LogRecord(Level.WARNING, msg); + record.setLoggerName(BucketInfo.RawDeleteRule.class.getName()); + record.setSourceClassName(BucketInfo.RawDeleteRule.class.getName()); + record.setSourceMethodName("populateCondition"); + BucketInfo.log.log(record); + + LifecycleCondition condition = + Conversions.json().lifecycleCondition().decode(rule.getCondition()); + return new LifecycleRule(LifecycleAction.newDeleteAction(), condition); + } + LifecycleCondition.Builder condition = LifecycleCondition.newBuilder(); + if (from instanceof CreatedBeforeDeleteRule) { + CreatedBeforeDeleteRule r = (CreatedBeforeDeleteRule) from; + condition.setCreatedBeforeOffsetDateTime(r.getTime()); + } else if (from instanceof AgeDeleteRule) { + AgeDeleteRule r = (AgeDeleteRule) from; + condition.setAge(r.getDaysToLive()); + } else if (from instanceof NumNewerVersionsDeleteRule) { + NumNewerVersionsDeleteRule r = (NumNewerVersionsDeleteRule) from; + condition.setNumberOfNewerVersions(r.getNumNewerVersions()); + } else if (from instanceof IsLiveDeleteRule) { + IsLiveDeleteRule r = (IsLiveDeleteRule) from; + condition.setIsLive(r.isLive()); + } // else would be RawDeleteRule which is handled above + return new LifecycleRule(LifecycleAction.newDeleteAction(), condition.build()); + } + + @SuppressWarnings("deprecation") + private static DeleteRule deleteRuleDecode(LifecycleRule from) { + if (from.getAction() != null + && BucketInfo.DeleteRule.SUPPORTED_ACTION.endsWith(resolveRuleActionType(from))) { + LifecycleCondition condition = from.getCondition(); + Integer age = condition.getAge(); + if (age != null) { + return new BucketInfo.AgeDeleteRule(age); + } + OffsetDateTime createdBefore = condition.getCreatedBeforeOffsetDateTime(); + if (createdBefore != null) { + return new BucketInfo.CreatedBeforeDeleteRule(createdBefore); + } + Integer numNewerVersions = condition.getNumberOfNewerVersions(); + if (numNewerVersions != null) { + return new BucketInfo.NumNewerVersionsDeleteRule(numNewerVersions); + } + Boolean isLive = condition.getIsLive(); + if (isLive != null) { + return new BucketInfo.IsLiveDeleteRule(isLive); + } + } + return new RawDeleteRule(Conversions.json().lifecycleRule().encode(from)); + } + + @SuppressWarnings("deprecation") + private static String resolveRuleActionType(DeleteRule deleteRule) { + if (deleteRule != null && deleteRule.getType() != null) { + return deleteRule.getType().name(); + } else { + return null; + } + } + + private static String resolveRuleActionType(LifecycleRule rule) { + if (rule != null && rule.getAction() != null) { + return rule.getAction().getActionType(); + } else { + return null; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseObjectReadSessionStreamRead.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseObjectReadSessionStreamRead.java new file mode 100644 index 000000000000..dc71350d70ca --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseObjectReadSessionStreamRead.java @@ -0,0 +1,651 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ReadRange; +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import org.checkerframework.checker.nullness.qual.Nullable; + +@InternalApi +@InternalExtensionOnly +abstract class BaseObjectReadSessionStreamRead + implements ObjectReadSessionStreamRead { + + protected final RangeSpec rangeSpec; + protected final RetryContext retryContext; + protected final AtomicLong readOffset; + protected boolean closed; + protected boolean tombstoned; + protected IOAutoCloseable onCloseCallback; + + BaseObjectReadSessionStreamRead( + RangeSpec rangeSpec, RetryContext retryContext, IOAutoCloseable onCloseCallback) { + this(rangeSpec, new AtomicLong(rangeSpec.begin()), retryContext, onCloseCallback, false); + } + + BaseObjectReadSessionStreamRead( + RangeSpec rangeSpec, + AtomicLong readOffset, + RetryContext retryContext, + IOAutoCloseable onCloseCallback, + boolean closed) { + this.rangeSpec = rangeSpec; + this.retryContext = retryContext; + this.readOffset = readOffset; + this.closed = closed; + this.tombstoned = false; + this.onCloseCallback = onCloseCallback; + } + + abstract long readId(); + + @Override + public long readOffset() { + return readOffset.get(); + } + + @Override + public final void preFail() { + tombstoned = true; + } + + @Override + public final ReadRange makeReadRange() { + long currentOffset = readOffset.get(); + ReadRange.Builder b = ReadRange.newBuilder().setReadId(readId()).setReadOffset(currentOffset); + rangeSpec + .maxLength() + .ifPresent( + length -> { + long readSoFar = currentOffset - rangeSpec.begin(); + b.setReadLength(length - readSoFar); + }); + return b.build(); + } + + @Override + public void recordError(T t, OnSuccess onSuccess, OnFailure onFailure) { + retryContext.recordError(t, onSuccess, onFailure); + } + + @Override + public boolean readyToSend() { + return !tombstoned && !retryContext.inBackoff(); + } + + @Override + public boolean canShareStreamWith(ObjectReadSessionStreamRead other) { + return this.getClass() == other.getClass(); + } + + @Override + public final void close() throws IOException { + try { + internalClose(); + } finally { + onCloseCallback.close(); + } + } + + @Override + public void setOnCloseCallback(IOAutoCloseable onCloseCallback) { + this.onCloseCallback = this.onCloseCallback.andThen(onCloseCallback); + } + + /** Base class of a read that will accumulate before completing by resolving a future */ + abstract static class AccumulatingRead + extends BaseObjectReadSessionStreamRead> implements ApiFuture { + protected final List childRefs; + protected final SettableApiFuture complete; + protected final long readId; + protected final Hasher hasher; + + private AccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + RetryContext retryContext, + IOAutoCloseable onCloseCallback) { + super(rangeSpec, retryContext, onCloseCallback); + this.readId = readId; + this.hasher = hasher; + this.complete = SettableApiFuture.create(); + this.childRefs = Collections.synchronizedList(new ArrayList<>()); + } + + private AccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + List childRefs, + AtomicLong readOffset, + RetryContext retryContext, + boolean closed, + SettableApiFuture complete, + IOAutoCloseable onCloseCallback) { + super(rangeSpec, readOffset, retryContext, onCloseCallback, closed); + this.readId = readId; + this.childRefs = childRefs; + this.complete = complete; + this.hasher = hasher; + } + + @Override + long readId() { + return readId; + } + + @Override + public boolean acceptingBytes() { + return !complete.isDone() && !tombstoned; + } + + @Override + public void accept(ChildRef childRef) throws IOException { + retryContext.reset(); + int size = childRef.byteString().size(); + childRefs.add(childRef); + readOffset.addAndGet(size); + } + + @Override + public ApiFuture fail(Throwable t) { + try { + tombstoned = true; + close(); + } catch (IOException e) { + t.addSuppressed(e); + } finally { + complete.setException(t); + } + return complete; + } + + @Override + public Hasher hasher() { + return hasher; + } + + @Override + public void internalClose() throws IOException { + if (!closed) { + retryContext.reset(); + closed = true; + GrpcUtils.closeAll(childRefs); + } + } + + @Override + public void addListener(Runnable listener, Executor executor) { + complete.addListener(listener, executor); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + if (!complete.isCancelled()) { + fail(new CancellationException()); + } + return complete.cancel(mayInterruptIfRunning); + } + + @Override + public Result get() throws InterruptedException, ExecutionException { + return complete.get(); + } + + @Override + public Result get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return complete.get(timeout, unit); + } + + @Override + public boolean isCancelled() { + return complete.isCancelled(); + } + + @Override + public boolean isDone() { + return complete.isDone(); + } + + @Override + public boolean canShareStreamWith(ObjectReadSessionStreamRead other) { + return other instanceof AccumulatingRead; + } + } + + /** + * Base class of a read that will be processed in a streaming manner (e.g. {@link + * ReadableByteChannel}) + */ + static class StreamingRead extends BaseObjectReadSessionStreamRead + implements UnbufferedReadableByteChannel { + + private final Hasher hasher; + private final SettableApiFuture failFuture; + private final ArrayBlockingQueue queue; + + private AtomicLong readId; + private boolean complete; + @Nullable private ChildRefHelper leftovers; + + StreamingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + RetryContext retryContext, + IOAutoCloseable onCloseCallback) { + super(rangeSpec, retryContext, onCloseCallback); + this.readId = new AtomicLong(readId); + this.hasher = hasher; + this.closed = false; + this.failFuture = SettableApiFuture.create(); + this.queue = new ArrayBlockingQueue<>(2); + this.complete = false; + this.leftovers = null; + } + + @Override + long readId() { + return readId.get(); + } + + @Override + public Hasher hasher() { + return hasher; + } + + @Override + public boolean acceptingBytes() { + return !closed && !tombstoned; + } + + @Override + public void accept(ChildRef childRef) throws IOException { + retryContext.reset(); + int size = childRef.byteString().size(); + offer(childRef); + readOffset.addAndGet(size); + } + + @Override + public void eof() throws IOException { + retryContext.reset(); + offer(EofMarker.INSTANCE); + } + + @Override + public ApiFuture fail(Throwable t) { + try { + offer(new SmuggledFailure(t)); + failFuture.set(null); + } catch (InterruptedIOException e) { + Thread.currentThread().interrupt(); + failFuture.setException(e); + } + return failFuture; + } + + @Override + public StreamingRead withNewReadId(long newReadId) { + readId.set(newReadId); + return this; + } + + @Override + public boolean canShareStreamWith(ObjectReadSessionStreamRead other) { + return false; + } + + @Override + public void internalClose() throws IOException { + if (!closed) { + closed = true; + internalCleanup(); + } + } + + @Override + public boolean isOpen() { + return !closed; + } + + @Override + public UnbufferedReadableByteChannel project() { + return this; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return Math.toIntExact(read(new ByteBuffer[] {dst}, 0, 1)); + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + if (complete) { + internalCleanup(); + return -1; + } + + long read = 0; + if (leftovers != null) { + read += leftovers.copy(dsts, offset, length); + if (!leftovers.hasRemaining()) { + leftovers.ref.close(); + leftovers = null; + } + } + + try { + Object poll; + while (leftovers == null && (poll = queue.poll(10, TimeUnit.MICROSECONDS)) != null) { + if (poll instanceof ChildRef) { + ChildRefHelper ref = new ChildRefHelper((ChildRef) poll); + read += ref.copy(dsts, offset, length); + if (ref.hasRemaining()) { + leftovers = ref; + break; + } else { + ref.ref.close(); + } + } else if (poll == EofMarker.INSTANCE) { + complete = true; + if (read == 0) { + internalCleanup(); + return -1; + } + break; + } else if (poll instanceof SmuggledFailure) { + SmuggledFailure throwable = (SmuggledFailure) poll; + close(); + BaseServiceException coalesce = StorageException.coalesce(throwable.getSmuggled()); + throw new IOException(coalesce); + } else { + //noinspection DataFlowIssue + Preconditions.checkState( + false, "unhandled queue element type %s", poll.getClass().getName()); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException ioe = new InterruptedIOException(); + ioe.initCause(e); + throw ioe; + } + + return read; + } + + private void offer(Closeable offer) throws InterruptedIOException { + try { + queue.put(offer); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException ioe = new InterruptedIOException(); + ioe.initCause(e); + throw ioe; + } + } + + private void internalCleanup() throws IOException { + retryContext.reset(); + if (leftovers != null) { + leftovers.ref.close(); + } + GrpcUtils.closeAll(queue); + } + + /** + * The queue items are added to is a queue of {@link Closeable}. This class smuggles a Throwable + * in a no-op Closable, such that the throwable can be in the queue. + * + *

Refer to {@link #fail(Throwable)} to see where this class is instantiated. + */ + static final class SmuggledFailure implements Closeable { + private final Throwable smuggled; + + private SmuggledFailure(Throwable smuggled) { + this.smuggled = smuggled; + } + + Throwable getSmuggled() { + return smuggled; + } + + @Override + public void close() throws IOException {} + } + + static final class ChildRefHelper { + private final ChildRef ref; + + private final List buffers; + + private ChildRefHelper(ChildRef ref) { + this.ref = ref; + this.buffers = ref.byteString().asReadOnlyByteBufferList(); + } + + long copy(ByteBuffer[] dsts, int offset, int length) { + long copied = 0; + for (ByteBuffer b : buffers) { + long copiedBytes = Buffers.copy(b, dsts, offset, length); + copied += copiedBytes; + if (b.hasRemaining()) break; + } + return copied; + } + + boolean hasRemaining() { + for (ByteBuffer b : buffers) { + if (b.hasRemaining()) return true; + } + return false; + } + } + + private static final class EofMarker implements Closeable { + private static final EofMarker INSTANCE = new EofMarker(); + + private EofMarker() {} + + @Override + public void close() {} + } + } + + static final class ByteArrayAccumulatingRead extends AccumulatingRead { + + ByteArrayAccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + RetryContext retryContext, + IOAutoCloseable onCloseCallback) { + super(readId, rangeSpec, hasher, retryContext, onCloseCallback); + } + + private ByteArrayAccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + List childRefs, + RetryContext retryContext, + AtomicLong readOffset, + boolean closed, + SettableApiFuture complete, + IOAutoCloseable onCloseCallback) { + super( + readId, + rangeSpec, + hasher, + childRefs, + readOffset, + retryContext, + closed, + complete, + onCloseCallback); + } + + @Override + public ApiFuture project() { + return this; + } + + @Override + public void eof() throws IOException { + retryContext.reset(); + try { + ByteString base = ByteString.empty(); + for (ChildRef ref : childRefs) { + base = base.concat(ref.byteString()); + } + complete.set(base.toByteArray()); + } finally { + close(); + } + } + + @Override + public ByteArrayAccumulatingRead withNewReadId(long newReadId) { + this.tombstoned = true; + return new ByteArrayAccumulatingRead( + newReadId, + rangeSpec, + hasher, + childRefs, + retryContext, + readOffset, + closed, + complete, + onCloseCallback); + } + } + + static final class ZeroCopyByteStringAccumulatingRead + extends AccumulatingRead implements DisposableByteString { + + private volatile ByteString byteString; + + ZeroCopyByteStringAccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + RetryContext retryContext, + IOAutoCloseable onCloseCallback) { + super(readId, rangeSpec, hasher, retryContext, onCloseCallback); + } + + public ZeroCopyByteStringAccumulatingRead( + long readId, + RangeSpec rangeSpec, + Hasher hasher, + List childRefs, + AtomicLong readOffset, + RetryContext retryContext, + boolean closed, + SettableApiFuture complete, + ByteString byteString, + IOAutoCloseable onCloseCallback) { + super( + readId, + rangeSpec, + hasher, + childRefs, + readOffset, + retryContext, + closed, + complete, + onCloseCallback); + this.byteString = byteString; + } + + @Override + public ApiFuture project() { + return this; + } + + @Override + public ByteString byteString() { + return byteString; + } + + @Override + public void eof() throws IOException { + retryContext.reset(); + ByteString base = ByteString.empty(); + for (ChildRef ref : childRefs) { + base = base.concat(ref.byteString()); + } + byteString = base; + complete.set(this); + } + + @Override + public ZeroCopyByteStringAccumulatingRead withNewReadId(long newReadId) { + this.tombstoned = true; + return new ZeroCopyByteStringAccumulatingRead( + newReadId, + rangeSpec, + hasher, + childRefs, + readOffset, + retryContext, + closed, + complete, + byteString, + onCloseCallback); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageReadChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageReadChannel.java new file mode 100644 index 000000000000..465d3d6ae2f8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageReadChannel.java @@ -0,0 +1,222 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.util.concurrent.locks.ReentrantLock; +import org.checkerframework.checker.nullness.qual.Nullable; + +abstract class BaseStorageReadChannel implements StorageReadChannel { + + private final Decoder objectDecoder; + private final SettableApiFuture result; + + private boolean open; + private ByteRangeSpec byteRangeSpec; + private int chunkSize = _2MiB; + private BufferHandle bufferHandle; + private LazyReadChannel lazyReadChannel; + protected final ReentrantLock lock; + + protected BaseStorageReadChannel(Decoder objectDecoder) { + this.objectDecoder = objectDecoder; + this.result = SettableApiFuture.create(); + this.open = true; + this.byteRangeSpec = ByteRangeSpec.nullRange(); + this.lock = new ReentrantLock(); + } + + @Override + public final void setChunkSize(int chunkSize) { + lock.lock(); + try { + StorageException.wrapIOException(() -> maybeResetChannel(true)); + this.chunkSize = chunkSize; + } finally { + lock.unlock(); + } + } + + @Override + public final boolean isOpen() { + lock.lock(); + try { + return open; + } finally { + lock.unlock(); + } + } + + @Override + public final void close() { + lock.lock(); + try { + open = false; + if (internalGetLazyChannel().isOpen()) { + ReadableByteChannel channel = internalGetLazyChannel().getChannel(); + StorageException.wrapIOException(channel::close); + } + } finally { + lock.unlock(); + } + } + + @Override + public final StorageReadChannel setByteRangeSpec(ByteRangeSpec byteRangeSpec) { + requireNonNull(byteRangeSpec, "byteRangeSpec must be non null"); + lock.lock(); + try { + if (!this.byteRangeSpec.equals(byteRangeSpec)) { + StorageException.wrapIOException(() -> maybeResetChannel(false)); + this.byteRangeSpec = byteRangeSpec; + } + return this; + } finally { + lock.unlock(); + } + } + + @Override + public final ByteRangeSpec getByteRangeSpec() { + return byteRangeSpec; + } + + @Override + public final int read(ByteBuffer dst) throws IOException { + lock.lock(); + try { + // BlobReadChannel only considered itself closed if close had been called on it. + if (!open) { + throw new ClosedChannelException(); + } + long diff = byteRangeSpec.length(); + // the check on beginOffset >= 0 used to be a precondition on seek(long) + // move it here to preserve existing behavior while allowing new negative offsets + if (diff <= 0 && byteRangeSpec.beginOffset() >= 0) { + return -1; + } + try { + // trap if the fact that tmp is already closed, and instead return -1 + ReadableByteChannel tmp = internalGetLazyChannel().getChannel(); + if (!tmp.isOpen()) { + return -1; + } + int read = tmp.read(dst); + if (read != -1) { + byteRangeSpec = byteRangeSpec.withShiftBeginOffset(read); + } + return read; + } catch (StorageException e) { + if (e.getCode() == 416) { + // HttpStorageRpc turns 416 into a null etag with an empty byte array, leading + // BlobReadChannel to believe it read 0 bytes, returning -1 and leaving the channel open. + // Emulate that same behavior here to preserve behavior compatibility, though this should + // be removed in the next major version. + return -1; + } else { + throw new IOException(e); + } + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw new IOException(StorageException.coalesce(e)); + } + } finally { + lock.unlock(); + } + } + + @Override + public final ApiFuture getObject() { + return ApiFutures.transform(result, objectDecoder::decode, MoreExecutors.directExecutor()); + } + + protected final BufferHandle getBufferHandle() { + if (bufferHandle == null) { + bufferHandle = BufferHandle.allocate(chunkSize); + } + return bufferHandle; + } + + protected final int getChunkSize() { + return chunkSize; + } + + @Nullable + protected final T getResolvedObject() { + if (result.isDone()) { + return StorageException.wrapFutureGet(result); + } else { + return null; + } + } + + protected abstract LazyReadChannel newLazyReadChannel(); + + private void maybeResetChannel(boolean freeBuffer) throws IOException { + if (lazyReadChannel != null) { + if (lazyReadChannel.isOpen()) { + lazyReadChannel.getChannel().close(); + } + if (bufferHandle != null && !freeBuffer) { + bufferHandle.get().clear(); + } else if (freeBuffer) { + bufferHandle = null; + } + lazyReadChannel = null; + } + } + + private LazyReadChannel internalGetLazyChannel() { + if (lazyReadChannel == null) { + LazyReadChannel tmp = newLazyReadChannel(); + ApiFuture future = tmp.getSession().getResult(); + ApiFutures.addCallback( + future, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + if (!result.isDone()) { + result.setException(t); + } + } + + @Override + public void onSuccess(T t) { + if (!result.isDone()) { + result.set(t); + } + } + }, + MoreExecutors.directExecutor()); + lazyReadChannel = tmp; + } + return lazyReadChannel; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageWriteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageWriteChannel.java new file mode 100644 index 000000000000..10f79c8dffdf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BaseStorageWriteChannel.java @@ -0,0 +1,198 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._16MiB; +import static com.google.cloud.storage.ByteSizeConstants._256KiB; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.locks.ReentrantLock; +import org.checkerframework.checker.nullness.qual.Nullable; + +abstract class BaseStorageWriteChannel implements StorageWriteChannel { + + private final Decoder objectDecoder; + private final SettableApiFuture result; + protected final ReentrantLock lock; + + private long position; + private boolean open; + private int chunkSize; + private LazyWriteChannel lazyWriteChannel; + private BufferHandle bufferHandle; + + /** + * This is tracked for compatibility with BlobWriteChannel, such that simply creating a writer + * will create an object. + * + *

In the future we should move away from this behavior, and only create an object if write is + * called. + */ + protected boolean writeCalledAtLeastOnce; + + protected BaseStorageWriteChannel(Decoder objectDecoder) { + this.objectDecoder = objectDecoder; + this.result = SettableApiFuture.create(); + this.lock = new ReentrantLock(); + this.open = true; + this.chunkSize = _16MiB; + this.writeCalledAtLeastOnce = false; + } + + @Override + public final void setChunkSize(int chunkSize) { + lock.lock(); + try { + Preconditions.checkArgument(chunkSize > 0, "chunkSize must be > 0, received %d", chunkSize); + Preconditions.checkState( + bufferHandle == null || bufferHandle.position() == 0, + "unable to change chunk size with data buffered"); + this.chunkSize = chunkSize; + } finally { + lock.unlock(); + } + } + + @Override + public final boolean isOpen() { + lock.lock(); + try { + return open; + } finally { + lock.unlock(); + } + } + + @Override + public final void close() throws IOException { + lock.lock(); + try { + if (open && !writeCalledAtLeastOnce) { + this.write(ByteBuffer.allocate(0)); + } + if (internalGetLazyChannel().isOpen()) { + StorageException.wrapIOException(internalGetLazyChannel().getChannel()::close); + } + } finally { + open = false; + lock.unlock(); + } + } + + @Override + public final int write(ByteBuffer src) throws IOException { + lock.lock(); + try { + if (!open) { + throw new ClosedChannelException(); + } + writeCalledAtLeastOnce = true; + try { + BufferedWritableByteChannel tmp = internalGetLazyChannel().getChannel(); + if (!tmp.isOpen()) { + return 0; + } + int write = tmp.write(src); + return write; + } catch (StorageException e) { + throw new IOException(e); + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw new IOException(StorageException.coalesce(e)); + } + } finally { + lock.unlock(); + } + } + + @Override + public final ApiFuture getObject() { + return ApiFutures.transform(result, objectDecoder::decode, MoreExecutors.directExecutor()); + } + + protected final BufferHandle getBufferHandle() { + if (bufferHandle == null) { + bufferHandle = BufferHandle.allocate(Buffers.alignSize(getChunkSize(), _256KiB)); + } + return bufferHandle; + } + + protected final int getChunkSize() { + return chunkSize; + } + + @Nullable + protected final T getResolvedObject() { + if (result.isDone()) { + return StorageException.wrapFutureGet(result); + } else { + return null; + } + } + + protected final long getCommittedPosition() { + return position; + } + + protected final void setCommittedPosition(long l) { + position = l; + } + + protected final void setOpen(boolean isOpen) { + this.open = isOpen; + } + + protected abstract LazyWriteChannel newLazyWriteChannel(); + + private LazyWriteChannel internalGetLazyChannel() { + if (lazyWriteChannel == null) { + LazyWriteChannel tmp = newLazyWriteChannel(); + ApiFuture future = tmp.getSession().getResult(); + ApiFutures.addCallback( + future, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + if (!result.isDone()) { + result.setException(t); + } + } + + @Override + public void onSuccess(T t) { + if (!result.isDone()) { + result.set(t); + } + } + }, + MoreExecutors.directExecutor()); + lazyWriteChannel = tmp; + } + return lazyWriteChannel; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java new file mode 100644 index 000000000000..8c45da06df5e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java @@ -0,0 +1,214 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +final class BidiAppendableUnbufferedWritableByteChannel implements UnbufferedWritableByteChannel { + + private final BidiUploadStreamingStream stream; + private final ChunkSegmenter chunkSegmenter; + private final long flushInterval; + + private boolean open; + private long writeOffset; + private volatile boolean nextWriteShouldFinalize; + private boolean writeCalledAtLeastOnce; + private long lastFlushOffset; + + /** If write throws an error, don't attempt to finalize things when {@link #close()} is called. */ + private boolean writeThrewError; + + BidiAppendableUnbufferedWritableByteChannel( + BidiUploadStreamingStream stream, + ChunkSegmenter chunkSegmenter, + long flushInterval, + long writeOffset) { + this.stream = stream; + this.chunkSegmenter = chunkSegmenter; + this.flushInterval = flushInterval; + this.open = true; + this.writeOffset = writeOffset; + this.nextWriteShouldFinalize = false; + this.writeThrewError = false; + this.lastFlushOffset = writeOffset; + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + return internalWrite(srcs, srcsOffset, srcsLength); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long totalRemaining = Buffers.totalRemaining(srcs, offset, length); + // internalWrite is non-blocking, but close is blocking. + // loop here to ensure all the bytes we need flush are enqueued before we transition to trying + // to close. + long written = 0; + do { + written += internalWrite(srcs, offset, length); + } while (written < totalRemaining); + close(); + return written; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + try { + if (writeThrewError) { + return; + } + + if (!writeCalledAtLeastOnce) { + stream.flush(); + } + if (nextWriteShouldFinalize) { + //noinspection StatementWithEmptyBody + while (!stream.finishWrite(writeOffset)) {} + } else { + //noinspection StatementWithEmptyBody + while (!stream.closeStream(writeOffset)) {} + } + + awaitResultFuture(); + } finally { + stream.sendClose(); + open = false; + } + } + + public void nextWriteShouldFinalize() { + this.nextWriteShouldFinalize = true; + } + + void flush() throws InterruptedException { + stream.flush(); + stream.awaitAckOf(writeOffset); + } + + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + // error early. if the result future is already failed, await it to throw the error + if (stream.getResultFuture().isDone()) { + awaitResultFuture(); + return 0; + } + writeCalledAtLeastOnce = true; + + long availableCapacity = stream.availableCapacity(); + if (availableCapacity <= 0) { + return 0; + } + RewindableContent rewindableContent = RewindableContent.of(srcs, srcsOffset, srcsLength); + long totalBufferRemaining = rewindableContent.getLength(); + + ChunkSegment[] data = + chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength, true, availableCapacity); + if (data.length == 0) { + return 0; + } + // we consumed some bytes from srcs, flag our content as dirty since we aren't writing + // those bytes to implicitly flag as dirty. + rewindableContent.flagDirty(); + + long remainingAfterPacking = Buffers.totalRemaining(srcs, srcsOffset, srcsLength); + long bytesConsumed = 0; + for (int i = 0, len = data.length, lastIdx = len - 1; i < len; i++) { + ChunkSegment datum = data[i]; + int size = datum.getB().size(); + boolean shouldFlush = writeOffset + size >= lastFlushOffset + flushInterval; + boolean appended; + if (i < lastIdx && !shouldFlush) { + appended = stream.append(datum); + } else if (i == lastIdx && remainingAfterPacking == 0 && nextWriteShouldFinalize) { + appended = stream.appendAndFinalize(datum); + } else { + appended = stream.appendAndFlush(datum); + } + if (appended) { + bytesConsumed += size; + writeOffset += size; + if (shouldFlush) { + lastFlushOffset = writeOffset; + } + } else { + // if we weren't able to trigger a flush by reaching the end of the array and calling + // appendAndFlush, explicitly call flush here so that some progress can be made. + // we prefer appendAndFlush so a separate message is not needed, but an extra message + // in order to make progress and free buffer space is better than ending up in a live-lock. + stream.flush(); + break; + } + } + + if (bytesConsumed != totalBufferRemaining) { + rewindableContent.rewindTo(bytesConsumed); + } + + return bytesConsumed; + } + + private void awaitResultFuture() throws IOException { + try { + stream.awaitAckOf(writeOffset); + stream.getResultFuture().get(10_717, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException ioe = new InterruptedIOException(); + ioe.initCause(e); + writeThrewError = true; + throw ioe; + } catch (ExecutionException e) { + BaseServiceException coalesce = StorageException.coalesce(e.getCause()); + String message = coalesce.getMessage(); + String ioExceptionMessage = message; + // if the failure is an upload scenario we detect client side, it's message will be + // verbose. To avoid duplication, select the first line only for the io exception + int firstNewLineIndex = message != null ? message.indexOf('\n') : -1; + if (firstNewLineIndex > -1) { + ioExceptionMessage = message.substring(0, firstNewLineIndex); + } + IOException ioException = new IOException(ioExceptionMessage, coalesce); + // ioException.addSuppressed(new AsyncStorageTaskException()); + writeThrewError = true; + throw ioException; + } catch (TimeoutException e) { + writeThrewError = true; + throw new IOException(e); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java new file mode 100644 index 000000000000..5cd7a8a6502b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java @@ -0,0 +1,158 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.DefaultBlobWriteSessionConfig.DecoratedWritableByteChannelSession; +import com.google.cloud.storage.DefaultBlobWriteSessionConfig.LazySession; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.common.base.Preconditions; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import java.io.IOException; +import java.time.Clock; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Perform a resumable upload, uploading at most {@code bufferSize} bytes each flush. + * + *

Configuration of buffer size can be performed via {@link + * BidiBlobWriteSessionConfig#withBufferSize(int)}. + * + * @since 2.34.0 This new api is in preview and is subject to breaking changes. + */ +@Immutable +@BetaApi +@TransportCompatibility({Transport.GRPC}) +public final class BidiBlobWriteSessionConfig extends BlobWriteSessionConfig + implements BlobWriteSessionConfig.GrpcCompatible { + private static final long serialVersionUID = -903533790705476197L; + + private final int bufferSize; + + @InternalApi + BidiBlobWriteSessionConfig(int bufferSize) { + this.bufferSize = bufferSize; + } + + /** + * The number of bytes to hold in the buffer before each flush + * + *

Default: {@code 16777216 (16 MiB)} + * + * @see #withBufferSize(int) + * @since 2.34.0 This new api is in preview and is subject to breaking changes. + */ + public int getBufferSize() { + return bufferSize; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BidiBlobWriteSessionConfig)) { + return false; + } + BidiBlobWriteSessionConfig that = (BidiBlobWriteSessionConfig) o; + return bufferSize == that.bufferSize; + } + + @Override + public int hashCode() { + return Objects.hashCode(bufferSize); + } + + @Override + WriterFactory createFactory(Clock clock) throws IOException { + return new Factory(bufferSize); + } + + @InternalApi + static final class Factory implements WriterFactory { + static final Conversions.Decoder + WRITE_OBJECT_RESPONSE_BLOB_INFO_DECODER = + Conversions.grpc().blobInfo().compose(BidiWriteObjectResponse::getResource); + + private final int bufferSize; + + private Factory(int bufferSize) { + this.bufferSize = bufferSize; + } + + @InternalApi + @Override + public WritableByteChannelSession writeSession( + StorageInternal s, BlobInfo info, UnifiedOpts.Opts opts) { + if (s instanceof GrpcStorageImpl) { + return new DecoratedWritableByteChannelSession<>( + new LazySession<>( + new LazyWriteChannel<>( + () -> { + GrpcStorageImpl grpc = (GrpcStorageImpl) s; + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + BidiWriteObjectRequest req = + grpc.getBidiWriteObjectRequest(info, opts, false); + + ApiFuture startResumableWrite = + grpc.startResumableWrite(grpcCallContext, req, opts); + return ResumableMedia.gapic() + .write() + .bidiByteChannel(grpc.storageClient.bidiWriteObjectCallable()) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.copy()) + .resumable() + .withRetryConfig( + grpc.retrier.withAlg(grpc.retryAlgorithmManager.idempotent())) + .buffered(BufferHandle.allocate(bufferSize)) + .setStartAsync(startResumableWrite) + .build(); + })), + WRITE_OBJECT_RESPONSE_BLOB_INFO_DECODER); + } else { + throw new IllegalStateException( + "Unknown Storage implementation: " + s.getClass().getName()); + } + } + } + + /** + * Create a new instance with the {@code bufferSize} set to the specified value. + * + *

Default: {@code 16777216 (16 MiB)} + * + * @param bufferSize The number of bytes to hold in the buffer before each flush. Must be >= + * {@code 262144 (256 KiB)} + * @return The new instance + * @see #getBufferSize() + * @since 2.34.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public BidiBlobWriteSessionConfig withBufferSize(int bufferSize) { + Preconditions.checkArgument( + bufferSize >= ByteSizeConstants._256KiB, + "bufferSize must be >= %d", + ByteSizeConstants._256KiB); + return new BidiBlobWriteSessionConfig(bufferSize); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java new file mode 100644 index 000000000000..0f5a378f8026 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; + +import com.google.cloud.storage.BidiWriteCtx.BidiWriteObjectRequestBuilderFactory; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import java.util.Objects; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class BidiResumableWrite implements BidiWriteObjectRequestBuilderFactory { + + private final StartResumableWriteRequest req; + private final StartResumableWriteResponse res; + + private final BidiWriteObjectRequest writeRequest; + + public BidiResumableWrite( + StartResumableWriteRequest req, + StartResumableWriteResponse res, + Function f) { + this.req = req; + this.res = res; + this.writeRequest = f.apply(res.getUploadId()); + } + + public StartResumableWriteRequest getReq() { + return req; + } + + public StartResumableWriteResponse getRes() { + return res; + } + + @Override + public BidiWriteObjectRequest.Builder newBuilder() { + return writeRequest.toBuilder().clearWriteObjectSpec(); + } + + @Override + public @Nullable String bucketName() { + if (req.hasWriteObjectSpec() && req.getWriteObjectSpec().hasResource()) { + return req.getWriteObjectSpec().getResource().getBucket(); + } + return null; + } + + @Override + public String toString() { + return "BidiResumableWrite{" + "req=" + fmtProto(req) + ", res=" + fmtProto(res) + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ResumableWrite)) { + return false; + } + ResumableWrite resumableWrite = (ResumableWrite) o; + return Objects.equals(req, resumableWrite.getReq()) + && Objects.equals(res, resumableWrite.getRes()); + } + + @Override + public int hashCode() { + return Objects.hash(req, res); + } + + /** + * Helper function which is more specific than {@link Function#identity()}. Constraining the input + * and output to be exactly {@link BidiResumableWrite}. + */ + static BidiResumableWrite identity(BidiResumableWrite w) { + return w; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java new file mode 100644 index 000000000000..7894e35f9934 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java @@ -0,0 +1,1150 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.Utils.ifNonNull; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.OneofDescriptor; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteHandle; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.WriteObjectSpec; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; +import org.slf4j.MarkerFactory; + +@SuppressWarnings("LoggingSimilarMessage") +abstract class BidiUploadState { + private static final Logger LOGGER = LoggerFactory.getLogger(BidiUploadState.class); + private static final Marker TRACE_ENTER = MarkerFactory.getMarker("enter"); + private static final Marker TRACE_EXIT = MarkerFactory.getMarker("exit"); + + static final OneofDescriptor FIRST_MESSAGE_DESCRIPTOR = + BidiWriteObjectRequest.getDescriptor().getOneofs().stream() + .filter(d -> "first_message".equalsIgnoreCase(d.getName())) + .findFirst() + .orElseThrow( + () -> new IllegalStateException("BidiWriteObject.first_message oneof not found")); + + // seal this class to extension + private BidiUploadState() {} + + @VisibleForTesting + BidiUploadState(String testName) { + // some runtime enforcement that this constructor is only called from a test + // if we had java9+ we could seal this all the way without this hack + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + boolean isJunitTest = + Arrays.stream(stackTrace).anyMatch(ste -> ste.getClassName().startsWith("org.junit")); + + checkState(isJunitTest, "not a junit test", testName); + } + + protected final StorageException err( + UploadFailureScenario scenario, BidiWriteObjectResponse response) { + BidiWriteObjectRequest t = peekLast(); + GrpcCallContext ctx = enqueueFirstMessageAndGetGrpcCallContext(); + return scenario.toStorageException(Utils.nullSafeList(t), response, ctx, null); + } + + @Nullable Crc32cLengthKnown getCumulativeCrc32c() { + return unimplemented(); + } + + long getTotalSentBytes() { + return unimplemented(); + } + + long getConfirmedBytes() { + return unimplemented(); + } + + long availableCapacity() { + return unimplemented(); + } + + boolean offer(ChunkSegmenter.@NonNull ChunkSegment data) { + return unimplemented(); + } + + boolean finalFlush(long totalLength) { + return unimplemented(); + } + + boolean offer(@NonNull BidiWriteObjectRequest e) { + return unimplemented(); + } + + void updateStateFromResponse(BidiWriteObjectResponse response) { + unimplemented(); + } + + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return unimplemented(); + } + + void sendVia(Consumer consumer) { + unimplemented(); + } + + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + unimplemented(); + } + + void terminalError() { + unimplemented(); + } + + void pendingRetry() { + unimplemented(); + } + + void retrying() { + unimplemented(); + } + + @Nullable BidiWriteObjectRequest peekLast() { + return unimplemented(); + } + + boolean isFinalizing() { + return unimplemented(); + } + + ApiFuture beginReconciliation() { + return unimplemented(); + } + + static AppendableUploadState appendableNew( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + checkArgument( + initial.hasWriteObjectSpec(), "provided initial request did not contain a WriteObjectSpec"); + WriteObjectSpec spec = initial.getWriteObjectSpec(); + return new NewAppendableUploadState( + initial, spec, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + + static AppendableUploadState appendableTakeover( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + checkArgument( + initial.hasAppendObjectSpec(), + "provided initial request did not contain a AppendableObjectSpec"); + AppendObjectSpec spec = initial.getAppendObjectSpec(); + return new TakeoverAppendableUploadState( + initial, spec, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + + private static ImmutableMap> makeHeadersMap( + Stream xGoogRequestParamsEntries) { + return ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of( + xGoogRequestParamsEntries.filter(Objects::nonNull).collect(Collectors.joining("&")))); + } + + /** + * Create a single BidiWriteObjectRequest consisting of the same semantic meaning as if doing + * first then second. + * + * @throws IllegalArgumentException if both first and second have checksummedData + */ + static BidiWriteObjectRequest concatenate( + BidiWriteObjectRequest first, BidiWriteObjectRequest second) { + checkArgument( + !(first.hasChecksummedData() && second.hasChecksummedData()), + "attempting to merge two requests that both specify checksummed_data"); + BidiWriteObjectRequest.Builder b = first.toBuilder().mergeFrom(second); + long lwo = first.getWriteOffset(); + long rwo = second.getWriteOffset(); + if (first.hasChecksummedData()) { + int size = first.getChecksummedData().getContent().size(); + checkArgument( + lwo + size == rwo, + "(leftWriteOffset + size == rightWriteOffset) (%s + %s == %s)", + lwo, + size, + rwo); + b.setWriteOffset(lwo); + } else { + b.setWriteOffset(rwo); + } + + // finish_write implies flush & state_lookup. dedupe to avoid an extra incremental message + if (second.getFinishWrite() && (first.getFlush() || first.getStateLookup())) { + b.clearFlush().clearStateLookup(); + } + return b.build(); + } + + @Nullable StorageException onResponse(BidiWriteObjectResponse response) { + return unimplemented(); + } + + State getState() { + return unimplemented(); + } + + @VisibleForTesting + @Nullable BidiWriteObjectRequest peekFirst() { + return unimplemented(); + } + + SettableApiFuture getResultFuture() { + return unimplemented(); + } + + void awaitState(State... state) throws InterruptedException { + unimplemented(); + } + + public void awaitTakeoverStateReconciliation(Runnable restart) { + unimplemented(); + } + + public void awaitAck(long writeOffset) throws InterruptedException { + unimplemented(); + } + + enum State { + INITIALIZING, + TAKEOVER, + RUNNING, + PENDING_RETRY, + RETRYING, + TERMINAL_SUCCESS, + TERMINAL_ERROR; + + private static final State[] allNonTerminal = + new State[] {INITIALIZING, TAKEOVER, RUNNING, PENDING_RETRY, RETRYING}; + + boolean in(State... states) { + for (State state : states) { + if (state == this) { + return true; + } + } + return false; + } + } + + private static T unimplemented() { + throw new IllegalStateException("not implemented"); + } + + abstract static class BaseUploadState extends BidiUploadState { + + protected final BidiWriteObjectRequest initial; + protected final Supplier baseCallContext; + protected final ReentrantLock lock; + protected final Condition stateUpdated; + protected final Condition confirmedBytesUpdated; + + /** The maximum number of bytes allowed to be enqueued in {@link #queue} across all messages. */ + protected final long maxBytes; + + protected final ArrayList queue; + protected final SettableApiFuture resultFuture; + + /** The total number of bytes currently enqueued in {@link #queue} */ + private long enqueuedBytes; + + /** A value in the range of {@code -1 <= lastSentRequest && lastSentRequest < queue.size()} */ + @VisibleForTesting int lastSentRequestIndex; + + /** The minimum offset of bytes for those pending messages. */ + protected long minByteOffset; + + /** + * The number of bytes that have been "sent". This might also be named something like + * cumulativeWriteOffset. + */ + protected long totalSentBytes; + + protected @Nullable Crc32cLengthKnown cumulativeCrc32c; + + /** + * Initially {@code -1} to signify the upload does not exist at all in the server, when the + * server responds successfully this will be updated to a value >= 0. + */ + protected long confirmedBytes; + + protected long generation; + protected @Nullable BidiWriteHandle writeHandle; + protected @Nullable String routingToken; + protected @NonNull State state; + protected @MonotonicNonNull BidiWriteObjectResponse lastResponseWithResource; + protected @Nullable State stateToReturnToAfterRetry; + protected long finalFlushOffset; + protected boolean finalFlushSent; + protected long finishWriteOffset; + protected boolean finishWriteSent; + protected @MonotonicNonNull OpenArguments lastOpenArguments; + protected @Nullable SettableApiFuture pendingReconciliation; + + private BaseUploadState( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c, + State startingState) { + this.initial = initial; + this.baseCallContext = baseCallContext; + this.resultFuture = resultFuture; + this.cumulativeCrc32c = initialCrc32c; + this.maxBytes = maxBytes; + this.queue = new ArrayList<>(); + this.enqueuedBytes = 0; + this.lock = new ReentrantLock(); + this.stateUpdated = lock.newCondition(); + this.confirmedBytesUpdated = lock.newCondition(); + this.lastSentRequestIndex = -1; + this.minByteOffset = 0; + this.totalSentBytes = 0; + this.confirmedBytes = -1; + this.state = startingState; + this.finalFlushOffset = -1; + this.finishWriteOffset = -1; + } + + @Override + final State getState() { + lock.lock(); + try { + return state; + } finally { + lock.unlock(); + } + } + + @Override + final @Nullable Crc32cLengthKnown getCumulativeCrc32c() { + lock.lock(); + try { + return cumulativeCrc32c; + } finally { + lock.unlock(); + } + } + + @Override + final long getTotalSentBytes() { + lock.lock(); + try { + return totalSentBytes; + } finally { + lock.unlock(); + } + } + + @Override + final long getConfirmedBytes() { + lock.lock(); + try { + return confirmedBytes; + } finally { + lock.unlock(); + } + } + + @Override + final long availableCapacity() { + lock.lock(); + try { + return maxBytes - enqueuedBytes; + } finally { + lock.unlock(); + } + } + + @Override + final boolean offer(ChunkSegmenter.@NonNull ChunkSegment datum) { + lock.lock(); + try { + requireNonNull(datum, "data must be non null"); + validateCurrentStateIsOneOf(State.allNonTerminal); + ByteString b = datum.getB(); + int size = b.size(); + checkNotFinalizing(size); + long availableCapacity = availableCapacity(); + if (size <= availableCapacity) { + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + ChecksummedData built = checksummedData.build(); + boolean offered = + internalOffer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(totalSentBytes) + .setChecksummedData(built) + .build()); + if (offered) { + cumulativeCrc32c = crc32cConcat(crc32c); + } + return offered; + } + return false; + } finally { + lock.unlock(); + } + } + + @Override + public boolean finalFlush(long totalLength) { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + checkNotFinalizing(0); + checkArgument( + totalLength == totalSentBytes, + "(totalLength == totalSentBytes) (%s == %s)", + totalLength, + totalSentBytes); + + BidiWriteObjectRequest flush = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(totalLength) + .setFlush(true) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest currentLast = peekLast(); + boolean equals = flush.equals(currentLast); + if (equals && finalFlushOffset == totalLength) { + return true; + } else if (equals && lastSentRequestIndex == queue.size() - 1) { + finalFlushOffset = totalLength; + finalFlushSent = true; + return true; + } + + boolean offered = internalOffer(flush); + if (offered) { + finalFlushOffset = totalLength; + } + return offered; + } finally { + lock.unlock(); + } + } + + @Override + final boolean offer(@NonNull BidiWriteObjectRequest e) { + lock.lock(); + try { + requireNonNull(e, "e must be non null"); + validateCurrentStateIsOneOf(State.allNonTerminal); + if (e.hasChecksummedData()) { + checkNotFinalizing(e.getChecksummedData().getContent().size()); + } + int size = e.getChecksummedData().getContent().size(); + long availableCapacity = availableCapacity(); + if (size > availableCapacity) { + return false; + } + + checkArgument( + e.hasOneof(FIRST_MESSAGE_DESCRIPTOR) || e.getWriteOffset() == totalSentBytes, + "(write_offset == totalSentBytes) (%s == %s)", + e.getWriteOffset(), + totalSentBytes); + return internalOffer(e); + } finally { + lock.unlock(); + } + } + + protected void setConfirmedBytes(long newConfirmedBytes) { + this.confirmedBytes = newConfirmedBytes; + this.confirmedBytesUpdated.signalAll(); + } + + @Override + final void updateStateFromResponse(BidiWriteObjectResponse response) { + lock.lock(); + try { + long persistedSize = -1; + if (response.hasPersistedSize()) { + persistedSize = response.getPersistedSize(); + } else if (response.hasResource()) { + persistedSize = response.getResource().getSize(); + lastResponseWithResource = response; + generation = lastResponseWithResource.getResource().getGeneration(); + } + checkState(persistedSize > -1, "persistedSize > -1 (%s > -1)", persistedSize); + checkArgument( + persistedSize >= confirmedBytes, + "(persistedSize >= confirmedBytes) (%s >= %s)", + response, + confirmedBytes); + validateCurrentStateIsOneOf( + State.INITIALIZING, State.TAKEOVER, State.RUNNING, State.RETRYING); + routingToken = null; + // todo: test more permutations where this might be true + // 1. retry, object not yet created + if (state == State.INITIALIZING) { + setConfirmedBytes(persistedSize); + totalSentBytes = Math.max(totalSentBytes, persistedSize); + } + if (state == State.INITIALIZING || state == State.RETRYING) { + transitionTo( + stateToReturnToAfterRetry != null ? stateToReturnToAfterRetry : State.RUNNING); + } + + boolean signalTerminalSuccess = false; + BidiWriteObjectRequest peek; + while ((peek = peekFirst()) != null) { + if (peek.hasChecksummedData()) { + int size = peek.getChecksummedData().getContent().size(); + long endOffset = peek.getWriteOffset() + size; + if (endOffset <= persistedSize) { + poll(); + setConfirmedBytes(endOffset); + enqueuedBytes -= size; + minByteOffset = peek.getWriteOffset(); + } else { + break; + } + } else if (peek.hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + poll(); + } else if (peek.getFlush()) { + if (finalFlushSent + && persistedSize == totalSentBytes + && persistedSize == finalFlushOffset) { + setConfirmedBytes(persistedSize); + signalTerminalSuccess = true; + poll(); + } else if (persistedSize >= peek.getWriteOffset()) { + setConfirmedBytes(persistedSize); + poll(); + } else { + break; + } + } else if (peek.getFinishWrite()) { + checkState( + enqueuedBytes == 0, + "attempting to evict finish_write: true while bytes are still enqueued"); + if (response.hasResource() + && persistedSize == totalSentBytes + && persistedSize == finishWriteOffset) { + setConfirmedBytes(persistedSize); + if (response.getResource().hasFinalizeTime()) { + signalTerminalSuccess = true; + poll(); + } else { + break; + } + } else { + break; + } + } else { + //noinspection DataFlowIssue + checkState(false, "peek = {%s}, response = {%s}", fmtProto(peek), fmtProto(response)); + } + } + + if (pendingReconciliation != null) { + pendingReconciliation.set(null); + pendingReconciliation = null; + } + + if (signalTerminalSuccess && lastResponseWithResource != null) { + BidiWriteObjectResponse.Builder b = lastResponseWithResource.toBuilder(); + b.getResourceBuilder().setSize(confirmedBytes); + b.getResourceBuilder().getChecksumsBuilder().clearMd5Hash().clearCrc32C(); + if (cumulativeCrc32c != null) { + b.getResourceBuilder().getChecksumsBuilder().setCrc32C(cumulativeCrc32c.getValue()); + } + BidiWriteObjectResponse updated = b.build(); + resultFuture.set(updated); + terminalSuccess(); + } else if (signalTerminalSuccess) { + checkState(false, "signalTerminalSuccess without prior resource response"); + } + } finally { + lock.unlock(); + } + } + + @Override + final void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + lock.lock(); + try { + validateCurrentStateIsOneOf( + State.INITIALIZING, State.RUNNING, State.PENDING_RETRY, State.RETRYING); + if (redirect.hasWriteHandle()) { + this.writeHandle = redirect.getWriteHandle(); + } + if (redirect.hasRoutingToken()) { + routingToken = redirect.getRoutingToken(); + } + if (redirect.hasGeneration()) { + if (generation > 0) { + checkState( + generation == redirect.getGeneration(), + "Generation changed: (generation == redirect.getGeneration()) (%s == %s)", + generation, + redirect.getGeneration()); + } + generation = redirect.getGeneration(); + } + } finally { + lock.unlock(); + } + } + + @Override + final void terminalError() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + transitionTo(State.TERMINAL_ERROR); + if (pendingReconciliation != null) { + pendingReconciliation.cancel(true); + } + stateUpdated.signalAll(); + } finally { + lock.unlock(); + } + } + + private void terminalSuccess() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + transitionTo(State.TERMINAL_SUCCESS); + stateUpdated.signalAll(); + } finally { + lock.unlock(); + } + } + + @Override + final void pendingRetry() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + stateToReturnToAfterRetry = state; + transitionTo(State.PENDING_RETRY); + } finally { + lock.unlock(); + } + } + + @Override + final void retrying() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.PENDING_RETRY, State.INITIALIZING, State.TAKEOVER); + transitionTo(State.RETRYING); + lastSentRequestIndex = -1; + finishWriteSent = false; + } finally { + lock.unlock(); + } + } + + @Override + final boolean isFinalizing() { + lock.lock(); + try { + return finishWriteOffset >= 0 && finishWriteSent; + } finally { + lock.unlock(); + } + } + + @Override + ApiFuture beginReconciliation() { + lock.lock(); + try { + if (pendingReconciliation == null) { + pendingReconciliation = SettableApiFuture.create(); + } + return pendingReconciliation; + } finally { + lock.unlock(); + } + } + + @Override + final void sendVia(Consumer consumer) { + lock.lock(); + try { + validateCurrentStateIsOneOf( + State.INITIALIZING, State.RUNNING, State.RETRYING, State.TAKEOVER); + BidiWriteObjectRequest prev = null; + int i = lastSentRequestIndex + 1; + for (; i < queue.size(); i++) { + BidiWriteObjectRequest m = queue.get(i); + lastSentRequestIndex = i; + if (state == State.RETRYING) { + prev = m; + break; // if retrying only send the first message + } + + if (prev != null) { + // never compact bytes, purely for simplicity’s sake. ByteString won't copy when + // concatenating two values together, but there is a limit on how many bytes can be in + // an + // individual message, and it's much easier to not have to worry about all of that here. + // We're mainly wanting to ensure things like flush/finish are packed into the last data + // message, and the first data message is included with the initial request if no state + // reconciliation needs to take place. + if (prev.hasChecksummedData() && m.hasChecksummedData()) { + consumer.accept(prev); + prev = m; + } else { + prev = concatenate(prev, m); + } + } else { + prev = m; + } + } + if (prev != null) { + if (prev.getFinishWrite()) { + finishWriteSent = true; + } else if (prev.getFlush() && prev.getStateLookup() && finalFlushOffset > -1) { + finalFlushSent = true; + } + consumer.accept(prev); + } + } finally { + lock.unlock(); + } + } + + private void prepend(BidiWriteObjectRequest e) { + queue.add(0, e); + enqueuedBytes = enqueuedBytes + e.getChecksummedData().getContent().size(); + } + + private void append(BidiWriteObjectRequest e) { + queue.add(e); + enqueuedBytes = enqueuedBytes + e.getChecksummedData().getContent().size(); + } + + @Override + final @Nullable BidiWriteObjectRequest peekLast() { + lock.lock(); + try { + int index = queue.size() - 1; + if (index < 0) { + return null; + } + return queue.get(index); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + @Override + final @Nullable BidiWriteObjectRequest peekFirst() { + lock.lock(); + try { + if (queue.isEmpty()) { + return null; + } + return queue.get(0); + } finally { + lock.unlock(); + } + } + + private void poll() { + BidiWriteObjectRequest remove = queue.remove(0); + if (remove != null) { + lastSentRequestIndex = Math.max(lastSentRequestIndex - 1, -1); + } + } + + protected final void transitionTo(State state) { + this.state = state; + stateUpdated.signalAll(); + } + + protected final void validateCurrentStateIsOneOf(State... allowed) { + checkState( + state.in(allowed), + "state mismatch. expected one of %s but is %s", + Arrays.toString(allowed), + state); + } + + private void checkNotFinalizing(int size) { + checkState( + finishWriteOffset == -1, + "Attempting to append bytes even though finalization has previously been signaled." + + " (finishWriteOffset: %s, totalSentBytes: %s, confirmedBytes: %s, size: %s)", + finishWriteOffset, + totalSentBytes, + confirmedBytes, + size); + } + + protected final boolean internalOffer(BidiWriteObjectRequest e) { + Consumer add = this::append; + if (e.hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + if (!queue.isEmpty() && queue.get(0).hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + poll(); // dequeue the existing first message + } + add = this::prepend; + } + + boolean appended = false; + if (e.hasChecksummedData() && finishWriteOffset == -1) { + ChecksummedData checksummedData = e.getChecksummedData(); + int size = checksummedData.getContent().size(); + if (size <= availableCapacity()) { + totalSentBytes += size; + add.accept(e); + appended = true; + } + } else { + add.accept(e); + appended = true; + } + + if (e.getFinishWrite()) { + finishWriteOffset = totalSentBytes; + } + + return appended; + } + + @Nullable + private Crc32cLengthKnown crc32cConcat(@Nullable Crc32cLengthKnown rhs) { + if (cumulativeCrc32c == null) { + return null; + } + requireNonNull(rhs, "rhs must be non null"); + return cumulativeCrc32c.concat(rhs); + } + + @Override + public SettableApiFuture getResultFuture() { + return resultFuture; + } + + @Override + void awaitState(State... anyOf) throws InterruptedException { + lock.lock(); + try { + ImmutableSet states = ImmutableSet.copyOf(anyOf); + while (!states.contains(this.state) && !stateUpdated.await(5, TimeUnit.MILLISECONDS)) { + if (resultFuture.isDone()) { + return; + } + } + } finally { + lock.unlock(); + } + } + + @Override + public void awaitTakeoverStateReconciliation(Runnable restart) { + try { + pendingRetry(); + restart.run(); + awaitState(State.RUNNING); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw StorageException.coalesce(e); + } + } + + @Override + public void awaitAck(long writeOffset) throws InterruptedException { + lock.lock(); + try { + while (confirmedBytes < writeOffset + && !confirmedBytesUpdated.await(5, TimeUnit.MILLISECONDS)) { + if (resultFuture.isDone()) { + return; + } + } + } finally { + lock.unlock(); + } + } + } + + abstract static class AppendableUploadState extends BaseUploadState { + + private AppendableUploadState( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c, + State startingState) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, startingState); + } + + protected abstract String getBucket(); + + protected abstract BidiWriteObjectRequest.Builder getBuilder(); + + @Override + public @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + super.lock.lock(); + try { + if (!state.in(State.INITIALIZING, State.RETRYING, State.TAKEOVER)) { + return lastOpenArguments.getCtx(); + } + ImmutableMap> xGoogRequestParams = + makeHeadersMap( + Stream.of( + "bucket=" + this.getBucket(), + "appendable=true", + routingToken != null ? "routing_token=" + routingToken : null)); + GrpcCallContext context = baseCallContext.get().withExtraHeaders(xGoogRequestParams); + + BidiWriteObjectRequest.Builder b = this.getBuilder(); + if (state == State.RETRYING) { + b.setStateLookup(true); + } + BidiWriteObjectRequest req = b.build(); + OpenArguments openArguments = new OpenArguments(req, context); + internalOffer(req); + lastOpenArguments = openArguments; + return openArguments.getCtx(); + } finally { + super.lock.unlock(); + } + } + + @Override + @Nullable StorageException onResponse(BidiWriteObjectResponse response) { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + + if (response.hasWriteHandle()) { + this.writeHandle = response.getWriteHandle(); + } + + boolean incremental = !response.hasResource(); + long persistedSize = -1; + if (response.hasPersistedSize()) { + persistedSize = response.getPersistedSize(); + } else if (response.hasResource()) { + persistedSize = response.getResource().getSize(); + } + checkState(persistedSize > -1, "persistedSize > -1 (%s > -1)", persistedSize); + if (state == State.TAKEOVER || stateToReturnToAfterRetry == State.TAKEOVER) { + totalSentBytes = persistedSize; + setConfirmedBytes(persistedSize); + if (response.hasResource() + && response.getResource().hasChecksums() + && response.getResource().getChecksums().hasCrc32C()) { + cumulativeCrc32c = + Crc32cValue.of(response.getResource().getChecksums().getCrc32C(), persistedSize); + } + updateStateFromResponse(response); + transitionTo(State.RUNNING); + return null; + } + + long totalSentBytes = getTotalSentBytes(); + long minWriteOffset = minByteOffset; + boolean finalizing = isFinalizing(); + + if (!finalizing && incremental) { + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + updateStateFromResponse(response); + } else { + return err(UploadFailureScenario.SCENARIO_7, response); + } + } else if (finalizing && !incremental) { + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + if (persistedSize > minWriteOffset) { + updateStateFromResponse(response); + } else if (lastResponseWithResource != null) { + return err(UploadFailureScenario.SCENARIO_4_1, response); + } + } else { + return err(UploadFailureScenario.SCENARIO_4_2, response); + } + } else if (!finalizing /* && !incremental*/) { + // generally the first response from the server + if (persistedSize <= totalSentBytes) { + updateStateFromResponse(response); + } else { + return err(UploadFailureScenario.SCENARIO_7, response); + } + } else /* (finalizing && incremental) */ { + // might happen if a `flush: true, state_lookup: true, finish_write: true` + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + if (persistedSize > minWriteOffset) { + updateStateFromResponse(response); + } else if (lastResponseWithResource != null) { + return err(UploadFailureScenario.SCENARIO_3, response); + } + } else { + return err(UploadFailureScenario.SCENARIO_2, response); + } + } + + return null; + } finally { + lock.unlock(); + } + } + } + + static final class NewAppendableUploadState extends AppendableUploadState { + private final WriteObjectSpec spec; + + private NewAppendableUploadState( + BidiWriteObjectRequest initial, + WriteObjectSpec spec, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, State.INITIALIZING); + this.spec = spec; + } + + @Override + protected String getBucket() { + return spec.getResource().getBucket(); + } + + @Override + protected BidiWriteObjectRequest.Builder getBuilder() { + BidiWriteObjectRequest.Builder b = BidiWriteObjectRequest.newBuilder(); + if (confirmedBytes >= 0) { + checkState(generation > 0, "generation > 0"); + + AppendObjectSpec.Builder aosb = + AppendObjectSpec.newBuilder() + .setBucket(spec.getResource().getBucket()) + .setObject(spec.getResource().getName()) + .setGeneration(generation); + if (spec.hasIfMetagenerationMatch()) { + aosb.setIfMetagenerationMatch(spec.getIfMetagenerationMatch()); + } + if (spec.hasIfMetagenerationNotMatch()) { + aosb.setIfMetagenerationNotMatch(spec.getIfMetagenerationMatch()); + } + ifNonNull(routingToken, aosb::setRoutingToken); + ifNonNull(writeHandle, aosb::setWriteHandle); + b.setAppendObjectSpec(aosb); + } else { + b.setWriteObjectSpec(spec); + } + return b; + } + } + + static final class TakeoverAppendableUploadState extends AppendableUploadState { + private final AppendObjectSpec spec; + + private TakeoverAppendableUploadState( + BidiWriteObjectRequest initial, + AppendObjectSpec spec, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, State.TAKEOVER); + this.spec = spec; + } + + @Override + protected String getBucket() { + return spec.getBucket(); + } + + @Override + protected BidiWriteObjectRequest.Builder getBuilder() { + AppendObjectSpec.Builder aosb = spec.toBuilder(); + ifNonNull(routingToken, aosb::setRoutingToken); + ifNonNull(writeHandle, aosb::setWriteHandle); + return BidiWriteObjectRequest.newBuilder().setAppendObjectSpec(aosb); + } + } + + static final class OpenArguments { + + private final BidiWriteObjectRequest req; + private final GrpcCallContext ctx; + + private OpenArguments(BidiWriteObjectRequest req, GrpcCallContext ctx) { + this.req = req; + this.ctx = ctx; + } + + public BidiWriteObjectRequest getReq() { + return req; + } + + public GrpcCallContext getCtx() { + return ctx; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java new file mode 100644 index 000000000000..f6b9ae399e34 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java @@ -0,0 +1,610 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.BidiUploadState.State; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.common.annotations.VisibleForTesting; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ObjectChecksums; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import org.checkerframework.checker.nullness.qual.EnsuresNonNull; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.checkerframework.checker.nullness.qual.RequiresNonNull; + +/** + * A class that helps tie together a {@link BidiUploadState}, {@link RetryContext} and underlying + * gRPC bidi stream. + * + *

This class helps transparently handle retries in the event an error is observed, and will + * handle redirect(s) if they occur, all without the need for the caller of this class to know about + * those things and the state need to worry about how retries will happen. + */ +final class BidiUploadStreamingStream { + + private final BidiUploadState state; + private final BidiStreamingCallable write; + // private final UnaryCallable get; + private final ScheduledExecutorService executor; + private final RetryContext retryContext; + private final OnSuccess onSuccess; + private final OnFailure onFailure; + private final ReentrantLock lock; + private final int maxRedirectsAllowed; + private final AtomicInteger redirectCounter; + + private volatile @Nullable StreamTuple stream; + private volatile @Nullable ApiFuture pendingReconciliation; + + BidiUploadStreamingStream( + BidiUploadState state, + ScheduledExecutorService executor, + BidiStreamingCallable write, + int maxRedirectsAllowed, + RetryContext retryContext) { + this.state = state; + this.executor = executor; + this.write = write; + this.lock = new ReentrantLock(); + this.retryContext = new StreamRetryContextDecorator(retryContext, lock, this::reset); + this.onSuccess = this::restart; + this.onFailure = + t -> { + SettableApiFuture resultFuture = state.getResultFuture(); + if (!resultFuture.isDone()) { + this.state.terminalError(); + BaseServiceException coalesced = StorageException.coalesce(t); + resultFuture.setException(coalesced); + } + }; + this.maxRedirectsAllowed = maxRedirectsAllowed; + this.redirectCounter = new AtomicInteger(); + } + + public ApiFuture getResultFuture() { + return state.getResultFuture(); + } + + public boolean append(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + internalSend(); + } + return offered; + } finally { + lock.unlock(); + } + } + + public boolean appendAndFlush(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + flush(); + } + return offered; + } finally { + lock.unlock(); + } + } + + public boolean appendAndFinalize(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + finishWrite(state.getTotalSentBytes()); + } + return offered; + } finally { + lock.unlock(); + } + } + + public void flush() { + lock.lock(); + try { + BidiWriteObjectRequest flush = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.getTotalSentBytes()) + .setFlush(true) + .setStateLookup(true) + .build(); + // if our flush is already enqueued, simply tick to make sure things are sent + if (flush.equals(state.peekLast())) { + internalSend(); + return; + } + boolean offered = state.offer(flush); + if (offered) { + internalSend(); + } + } finally { + lock.unlock(); + } + } + + public boolean finishWrite(long length) { + lock.lock(); + try { + // if we're already finalizing, ack rather than enqueueing again + if (state.isFinalizing() && state.getTotalSentBytes() == length) { + return true; + } + + BidiWriteObjectRequest.Builder b = + BidiWriteObjectRequest.newBuilder().setWriteOffset(length).setFinishWrite(true); + Crc32cLengthKnown cumulativeCrc32c = state.getCumulativeCrc32c(); + if (cumulativeCrc32c != null) { + b.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulativeCrc32c.getValue()).build()); + } + BidiWriteObjectRequest msg = b.build(); + boolean offer = state.offer(msg); + if (offer) { + internalSend(); + } + return offer; + } finally { + lock.unlock(); + } + } + + public boolean closeStream(long length) { + lock.lock(); + try { + + boolean offer = state.finalFlush(length); + if (offer) { + internalSend(); + } + return offer; + } finally { + lock.unlock(); + } + } + + public void sendClose() { + lock.lock(); + try { + StreamTuple tmp = getStream(); + if (tmp != null) { + tmp.closeSend(); + } + } finally { + lock.unlock(); + } + } + + public void awaitTakeoverStateReconciliation() { + state.awaitTakeoverStateReconciliation(this::restart); + } + + void awaitAckOf(long writeOffset) throws InterruptedException { + state.awaitAck(writeOffset); + } + + /** + * It is possible for this value to change after reading, however it is guaranteed that the amount + * of available capacity will only ever increase. + * + *

The only way this value is impacted by a background thread is if buffer space is released. + * Buffer consumption can only happen from the same thread that would invoke this method. + */ + long availableCapacity() { + return state.availableCapacity(); + } + + /** expected to be called from a background thread provided by {@link #executor}. */ + @VisibleForTesting + void restart() { + lock.lock(); + try { + checkState(stream == null, "attempting to restart stream when stream is already active"); + state.retrying(); + ApiFuture reconciliation = state.beginReconciliation(); + // read the current volatile value + ApiFuture tmpPendingReconciliation = pendingReconciliation; + StreamTuple tmp = initStreamTuple(); + state.sendVia(tmp); + // Intentionally using reference equality. + // Only register the callback if we haven't previously registered it. + // We want to avoid any error/cancellation on a long-running reconciliation being registered + // in retry context multiple times. + // Unfortunately, ApiFuture doesn't provide "isCallbackRegistered" so we need to track this + // ourselves. + if (reconciliation != tmpPendingReconciliation) { + ApiFutures.addCallback( + reconciliation, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + lock.lock(); + try { + BidiUploadStreamingStream.this.pendingReconciliation = null; + } finally { + lock.unlock(); + } + retryContext.recordError(t, onSuccess, onFailure); + } + + @Override + public void onSuccess(Void result) { + lock.lock(); + try { + BidiUploadStreamingStream.this.pendingReconciliation = null; + } finally { + lock.unlock(); + } + // when the reconciliation completes, trigger sending the rest of the messages + // that might be in the queue. + // re-get the stream so that if a retry is in progress we don't attempt to send + // to a stream that was broken after reconciliation. + StreamTuple tmp = getStream(); + if (tmp != null) { + state.sendVia(tmp); + } + } + }, + executor); + pendingReconciliation = reconciliation; + } + stream = tmp; + } catch (Throwable t) { + retryContext.recordError(t, onSuccess, onFailure); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + void reset() { + lock.lock(); + try { + StreamTuple tmp = stream; + if (tmp != null) { + tmp.in.flagTombstoned(); + tmp.closeSend(); + stream = null; + state.pendingRetry(); + } + } catch (Throwable t) { + // if any exception is thrown, catch it and funnel it into retryContext so that it is surfaced + // to the application. + retryContext.recordError(t, onSuccess, onFailure); + // Then throw it to prevent the current thread from running any following steps. Not ideal, + // but this can execute on a background thread that the application will never see. + // throw t; + } finally { + lock.unlock(); + } + } + + private @Nullable StreamTuple getStream() { + if (stream == null && state.getState() == State.INITIALIZING) { + stream = initStreamTuple(); + } + return stream; + } + + private StreamTuple initStreamTuple() { + GrpcCallContext grpcCallContext = state.enqueueFirstMessageAndGetGrpcCallContext(); + StreamingResponseObserver streamResponseObserver = + new StreamingResponseObserver(state, retryContext, onSuccess, onFailure); + RedirectHandlingResponseObserver responseObserver = + new RedirectHandlingResponseObserver( + state, + streamResponseObserver, + redirectCounter, + maxRedirectsAllowed, + this::reset, + () -> executor.execute(this::restart)); + ClientStream clientStream = + write.splitCall(responseObserver, grpcCallContext); + GracefulOutboundStream out = new GracefulOutboundStream(clientStream); + + return new StreamTuple(out, responseObserver); + } + + private void internalSend() { + StreamTuple tmp = getStream(); + if (tmp != null) { + state.sendVia(tmp); + } + } + + private static final class StreamTuple implements Consumer { + private final ClientStream out; + private final RedirectHandlingResponseObserver in; + + StreamTuple(ClientStream out, RedirectHandlingResponseObserver in) { + this.out = out; + this.in = in; + } + + @Override + public void accept(BidiWriteObjectRequest bidiWriteObjectRequest) { + out.send(bidiWriteObjectRequest); + } + + public void closeSend() { + in.flagTombstoned(); + out.closeSend(); + } + } + + static final class StreamingResponseObserver + implements ResponseObserver { + + private final BidiUploadState state; + private final RetryContext retryContext; + private final OnSuccess onSuccess; + private final OnFailure onFailure; + + @MonotonicNonNull private StreamController controller; + + StreamingResponseObserver( + BidiUploadState state, + RetryContext retryContext, + OnSuccess onSuccess, + OnFailure onFailure) { + this.state = state; + this.retryContext = retryContext; + this.onSuccess = onSuccess; + this.onFailure = onFailure; + } + + @EnsuresNonNull("controller") + @Override + public void onStart(StreamController controller) { + this.controller = controller; + controller.disableAutoInboundFlowControl(); + controller.request(1); + } + + @RequiresNonNull("controller") + @Override + public void onResponse(BidiWriteObjectResponse response) { + try { + controller.request(1); + @Nullable StorageException se = state.onResponse(response); + if (se != null) { + retryContext.recordError(se, onSuccess, onFailure); + } else { + retryContext.reset(); + } + } catch (Throwable t) { + // catch an error that might happen while processing and forward it to our retry context + retryContext.recordError(t, onSuccess, onFailure); + } + } + + @Override + public void onError(Throwable t) { + retryContext.recordError(t, onSuccess, onFailure); + } + + @Override + public void onComplete() { + // ignore + } + } + + static final class RedirectHandlingResponseObserver + implements ResponseObserver { + private final BidiUploadState state; + private final ResponseObserver delegate; + private final AtomicInteger redirectCounter; + private final int maxRedirectsAllowed; + private final Runnable beforeRedirect; + private final Runnable onRedirect; + + private volatile boolean tombstoned; + + RedirectHandlingResponseObserver( + BidiUploadState state, + ResponseObserver delegate, + AtomicInteger redirectCounter, + int maxRedirectsAllowed, + Runnable beforeRedirect, + Runnable onRedirect) { + this.state = state; + this.delegate = delegate; + this.redirectCounter = redirectCounter; + this.maxRedirectsAllowed = maxRedirectsAllowed; + this.beforeRedirect = beforeRedirect; + this.onRedirect = onRedirect; + this.tombstoned = false; + } + + /** + * mark this observer instance as tombstoned, this will cause it to ignore any invocations of + * its methods. + * + *

When we are going to retry a client detected error instead of a server detected one, we + * want to effectively ignore any following message that might already be inflight from the + * server. + */ + void flagTombstoned() { + tombstoned = true; + } + + @Override + public void onStart(StreamController controller) { + if (tombstoned) { + return; + } + delegate.onStart(controller); + } + + @Override + public void onResponse(BidiWriteObjectResponse response) { + if (tombstoned) { + return; + } + redirectCounter.set(0); + delegate.onResponse(response); + } + + @Override + public void onError(Throwable t) { + if (tombstoned) { + return; + } + BidiWriteObjectRedirectedError error = GrpcUtils.getBidiWriteObjectRedirectedError(t); + if (error == null) { + delegate.onError(t); + return; + } + int redirectCount = redirectCounter.incrementAndGet(); + if (redirectCount > maxRedirectsAllowed) { + // attach the fact we're ignoring the redirect to the original exception as a suppressed + // Exception. The lower level handler can then perform its usual handling, but if things + // bubble all the way up to the invoker we'll be able to see it in a bug report. + t.addSuppressed(new MaxRedirectsExceededException(maxRedirectsAllowed, redirectCount)); + delegate.onError(t); + return; + } + beforeRedirect.run(); + state.updateFromRedirect(error); + onRedirect.run(); + } + + @Override + public void onComplete() { + if (tombstoned) { + return; + } + delegate.onComplete(); + } + } + + /** + * Prevent "already half-closed" if we previously called onComplete but then detect an error and + * call onError + */ + private static final class GracefulOutboundStream + implements ClientStream { + + private final ClientStream delegate; + private volatile boolean closing; + + private GracefulOutboundStream(ClientStream delegate) { + this.delegate = delegate; + this.closing = false; + } + + @Override + public boolean isSendReady() { + return delegate.isSendReady(); + } + + @Override + public void send(BidiWriteObjectRequest request) { + delegate.send(request); + } + + @Override + public void closeSendWithError(Throwable t) { + if (closing) { + return; + } + closing = true; + delegate.closeSendWithError(t); + } + + @Override + public void closeSend() { + if (closing) { + return; + } + closing = true; + delegate.closeSend(); + } + } + + /** + * Decorate a RetryContext to allow observing the invocation of {@link #recordError(Throwable, + * OnSuccess, OnFailure)}. This allows us to clear out the pending stream before a retry. + */ + @VisibleForTesting + static final class StreamRetryContextDecorator implements RetryContext { + private final RetryContext retryContext; + private final ReentrantLock lock; + private final Runnable onRecordError; + + @VisibleForTesting + StreamRetryContextDecorator( + RetryContext retryContext, ReentrantLock lock, Runnable onRecordError) { + this.retryContext = retryContext; + this.lock = lock; + this.onRecordError = onRecordError; + } + + @Override + public boolean inBackoff() { + return retryContext.inBackoff(); + } + + @Override + public void reset() { + retryContext.reset(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + lock.lock(); + try { + try { + onRecordError.run(); + } catch (Throwable tt) { + t.addSuppressed(tt); + onFailure.onFailure(t); + return; + } + retryContext.recordError(t, onSuccess, onFailure); + } finally { + lock.unlock(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiWriteCtx.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiWriteCtx.java new file mode 100644 index 000000000000..7a11e0c5a7f9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiWriteCtx.java @@ -0,0 +1,85 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BidiWriteCtx.BidiWriteObjectRequestBuilderFactory; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.storage.v2.BidiWriteObjectRequest; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class BidiWriteCtx { + + private final RequestFactoryT requestFactory; + + private final AtomicLong totalSentBytes; + private final AtomicLong confirmedBytes; + private final AtomicReference cumulativeCrc32c; + + BidiWriteCtx(RequestFactoryT requestFactory) { + this.requestFactory = requestFactory; + this.totalSentBytes = new AtomicLong(0); + this.confirmedBytes = new AtomicLong(0); + this.cumulativeCrc32c = new AtomicReference<>(); + } + + public RequestFactoryT getRequestFactory() { + return requestFactory; + } + + public BidiWriteObjectRequest.Builder newRequestBuilder() { + return requestFactory.newBuilder(); + } + + public AtomicLong getTotalSentBytes() { + return totalSentBytes; + } + + public AtomicLong getConfirmedBytes() { + return confirmedBytes; + } + + public AtomicReference getCumulativeCrc32c() { + return cumulativeCrc32c; + } + + // TODO: flush this out more + boolean isDirty() { + return confirmedBytes.get() == totalSentBytes.get(); + } + + @Override + public String toString() { + return "ServerState{" + + "requestFactory=" + + requestFactory + + ", totalSentBytes=" + + totalSentBytes + + ", confirmedBytes=" + + confirmedBytes + + ", totalSentCrc32c=" + + cumulativeCrc32c + + '}'; + } + + interface BidiWriteObjectRequestBuilderFactory { + BidiWriteObjectRequest.Builder newBuilder(); + + @Nullable String bucketName(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Blob.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Blob.java new file mode 100644 index 000000000000..03d9d3f1cbc0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Blob.java @@ -0,0 +1,1259 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.auth.ServiceAccountSigner; +import com.google.auth.ServiceAccountSigner.SigningException; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.Storage.SignUrlOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectOptExtractor; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.file.Path; +import java.security.Key; +import java.time.OffsetDateTime; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * An object in Google Cloud Storage. A {@code Blob} object includes the {@code BlobId} instance, + * the set of properties inherited from the {@link BlobInfo} class and the {@code Storage} instance. + * The class provides methods to perform operations on the object. Reading a property value does not + * issue any RPC calls. The object content is not stored within the {@code Blob} instance. + * Operations that access the content issue one or multiple RPC calls, depending on the content + * size. + * + *

Objects of this class are immutable. Operations that modify the blob like {@link #update} and + * {@link #copyTo} return a new object. Any changes to the object in Google Cloud Storage made after + * creation of the {@code Blob} are not visible in the {@code Blob}. To get a {@code Blob} object + * with the most recent information use {@link #reload}. + * + *

Example of getting the content of the object in Google Cloud Storage: + * + *

{@code
+ * BlobId blobId = BlobId.of(bucketName, blobName);
+ * Blob blob = storage.get(blobId);
+ * long size = blob.getSize(); // no RPC call is required
+ * byte[] content = blob.getContent(); // one or multiple RPC calls will be issued
+ * }
+ */ +@TransportCompatibility({Transport.HTTP, Transport.GRPC}) +public class Blob extends BlobInfo { + + private static final long serialVersionUID = 5007541696912440917L; + + private final StorageOptions options; + private transient Storage storage; + + /** Class for specifying blob source options when {@code Blob} methods are used. */ + public static class BlobSourceOption extends Option { + + private static final long serialVersionUID = 8205000496563385634L; + + private BlobSourceOption(ObjectSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for blob's generation match. If this option is used the request will fail + * if generation does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationMatch() { + return new BlobSourceOption(UnifiedOpts.generationMatchExtractor()); + } + + /** + * Returns an option for blob's generation mismatch. If this option is used the request will + * fail if generation matches. + * + * @deprecated This option is invalid, and can never result in a valid response from the server. + */ + @Deprecated + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationNotMatch() { + return new BlobSourceOption(UnifiedOpts.generationNotMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if metageneration does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption metagenerationMatch() { + return new BlobSourceOption(UnifiedOpts.metagenerationMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption metagenerationNotMatch() { + return new BlobSourceOption(UnifiedOpts.metagenerationNotMatchExtractor()); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption decryptionKey(@NonNull Key key) { + return new BlobSourceOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption decryptionKey(@NonNull String key) { + return new BlobSourceOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option for blob's billing user project. This option is used only if the blob's + * bucket has requester_pays flag enabled. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption userProject(@NonNull String userProject) { + return new BlobSourceOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option for whether the request should return the raw input stream, instead of + * automatically decompressing the content. By default, this is false for Blob.downloadTo(), but + * true for ReadChannel.read(). + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption shouldReturnRawInputStream(boolean shouldReturnRawInputStream) { + return new BlobSourceOption(UnifiedOpts.returnRawInputStream(shouldReturnRawInputStream)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobSourceOption[] dedupe(BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobSourceOption[] dedupe( + Collection collection, BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobSourceOption[] dedupe(BlobSourceOption[] array, BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, array, os); + } + + static Storage.BlobSourceOption[] toSourceOptions( + BlobInfo blobInfo, BlobSourceOption... options) { + Storage.BlobSourceOption[] convertedOptions = new Storage.BlobSourceOption[options.length]; + for (int i = 0; i < options.length; i++) { + ObjectSourceOpt opt = options[i].getOpt(); + if (opt instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) opt; + ObjectSourceOpt objectSourceOpt = ex.extractFromBlobInfo(blobInfo); + convertedOptions[i] = new Storage.BlobSourceOption(objectSourceOpt); + } else { + convertedOptions[i] = new Storage.BlobSourceOption(opt); + } + } + return convertedOptions; + } + + static Storage.BlobGetOption[] toGetOptions(BlobInfo blobInfo, BlobSourceOption... options) { + Storage.BlobGetOption[] convertedOptions = new Storage.BlobGetOption[options.length]; + for (int i = 0; i < options.length; i++) { + ObjectSourceOpt opt = options[i].getOpt(); + if (opt instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) opt; + ObjectSourceOpt objectSourceOpt = ex.extractFromBlobInfo(blobInfo); + convertedOptions[i] = new BlobGetOption(objectSourceOpt); + } else { + convertedOptions[i] = new BlobGetOption(options[i].getOpt()); + } + } + return convertedOptions; + } + } + + /** + * Downloads this blob to the given file path using specified blob read options. + * + * @param path destination + * @param options blob read options + * @throws StorageException upon failure + * @see Storage#downloadTo(BlobId, Path, Storage.BlobSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public void downloadTo(Path path, BlobSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + storage.downloadTo(this.getBlobId(), path, BlobSourceOption.toSourceOptions(this, options)); + } + + /** + * Downloads this blob to the given output stream using specified blob read options. + * + * @param outputStream + * @param options + * @throws StorageException upon failure + * @see Storage#downloadTo(BlobId, OutputStream, Storage.BlobSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public void downloadTo(OutputStream outputStream, BlobSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + storage.downloadTo( + this.getBlobId(), outputStream, BlobSourceOption.toSourceOptions(this, options)); + } + + /** + * Downloads this blob to the given file path. + * + *

This method is replaced with {@link #downloadTo(Path, BlobSourceOption...)}, but is kept + * here for binary compatibility with the older versions of the client library. + * + * @param path destination + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public void downloadTo(Path path) { + downloadTo(path, new BlobSourceOption[0]); + } + + /** Builder for {@code Blob}. */ + public static class Builder extends BlobInfo.Builder { + + private final Storage storage; + private final BlobInfo.BuilderImpl infoBuilder; + + Builder(Blob blob) { + this.storage = blob.getStorage(); + this.infoBuilder = new BlobInfo.BuilderImpl(blob); + } + + @Override + public Builder setBlobId(BlobId blobId) { + infoBuilder.setBlobId(blobId); + return this; + } + + @Override + Builder setGeneratedId(String generatedId) { + infoBuilder.setGeneratedId(generatedId); + return this; + } + + @Override + public Builder setContentType(String contentType) { + infoBuilder.setContentType(contentType); + return this; + } + + @Override + public Builder setContentDisposition(String contentDisposition) { + infoBuilder.setContentDisposition(contentDisposition); + return this; + } + + @Override + public Builder setContentLanguage(String contentLanguage) { + infoBuilder.setContentLanguage(contentLanguage); + return this; + } + + @Override + public Builder setContentEncoding(String contentEncoding) { + infoBuilder.setContentEncoding(contentEncoding); + return this; + } + + @Override + Builder setComponentCount(Integer componentCount) { + infoBuilder.setComponentCount(componentCount); + return this; + } + + @Override + public Builder setCacheControl(String cacheControl) { + infoBuilder.setCacheControl(cacheControl); + return this; + } + + @Override + public Builder setAcl(List acl) { + infoBuilder.setAcl(acl); + return this; + } + + @Override + Builder setOwner(Entity owner) { + infoBuilder.setOwner(owner); + return this; + } + + @Override + Builder setSize(Long size) { + infoBuilder.setSize(size); + return this; + } + + @Override + Builder setEtag(String etag) { + infoBuilder.setEtag(etag); + return this; + } + + @Override + Builder setSelfLink(String selfLink) { + infoBuilder.setSelfLink(selfLink); + return this; + } + + @Override + public Builder setMd5(String md5) { + infoBuilder.setMd5(md5); + return this; + } + + @Override + public Builder setMd5FromHexString(String md5HexString) { + infoBuilder.setMd5FromHexString(md5HexString); + return this; + } + + @Override + public Builder setCrc32c(String crc32c) { + infoBuilder.setCrc32c(crc32c); + return this; + } + + @Override + public Builder setCrc32cFromHexString(String crc32cHexString) { + infoBuilder.setCrc32cFromHexString(crc32cHexString); + return this; + } + + @Override + Builder setMediaLink(String mediaLink) { + infoBuilder.setMediaLink(mediaLink); + return this; + } + + @Override + public Builder setMetadata(Map metadata) { + infoBuilder.setMetadata(metadata); + return this; + } + + @Override + public Builder setStorageClass(StorageClass storageClass) { + infoBuilder.setStorageClass(storageClass); + return this; + } + + /** + * @deprecated Use {@link #setTimeStorageClassUpdatedOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + public Builder setTimeStorageClassUpdated(Long timeStorageClassUpdated) { + infoBuilder.setTimeStorageClassUpdated(timeStorageClassUpdated); + return this; + } + + @Override + public BlobInfo.Builder setTimeStorageClassUpdatedOffsetDateTime( + OffsetDateTime timeStorageClassUpdated) { + infoBuilder.setTimeStorageClassUpdatedOffsetDateTime(timeStorageClassUpdated); + return this; + } + + @Override + Builder setMetageneration(Long metageneration) { + infoBuilder.setMetageneration(metageneration); + return this; + } + + /** + * @deprecated Use {@link #setDeleteTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setDeleteTime(Long deleteTime) { + infoBuilder.setDeleteTime(deleteTime); + return this; + } + + @Override + BlobInfo.Builder setDeleteTimeOffsetDateTime(OffsetDateTime deleteTime) { + infoBuilder.setDeleteTimeOffsetDateTime(deleteTime); + return this; + } + + /** + * @deprecated Use {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setUpdateTime(Long updateTime) { + infoBuilder.setUpdateTime(updateTime); + return this; + } + + @Override + BlobInfo.Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + infoBuilder.setUpdateTimeOffsetDateTime(updateTime); + return this; + } + + @Override + @Deprecated + Builder setCreateTime(Long createTime) { + infoBuilder.setCreateTime(createTime); + return this; + } + + @Override + BlobInfo.Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + infoBuilder.setCreateTimeOffsetDateTime(createTime); + return this; + } + + /** + * @deprecated Use {@link #setCustomTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + public Builder setCustomTime(Long customTime) { + infoBuilder.setCustomTime(customTime); + return this; + } + + @Override + public BlobInfo.Builder setCustomTimeOffsetDateTime(OffsetDateTime customTime) { + infoBuilder.setCustomTimeOffsetDateTime(customTime); + return this; + } + + @Override + Builder setIsDirectory(boolean isDirectory) { + infoBuilder.setIsDirectory(isDirectory); + return this; + } + + @Override + Builder setCustomerEncryption(CustomerEncryption customerEncryption) { + infoBuilder.setCustomerEncryption(customerEncryption); + return this; + } + + @Override + Builder setKmsKeyName(String kmsKeyName) { + infoBuilder.setKmsKeyName(kmsKeyName); + return this; + } + + @Override + public Builder setEventBasedHold(Boolean eventBasedHold) { + infoBuilder.setEventBasedHold(eventBasedHold); + return this; + } + + @Override + public Builder setTemporaryHold(Boolean temporaryHold) { + infoBuilder.setTemporaryHold(temporaryHold); + return this; + } + + /** + * @deprecated Use {@link #setRetentionExpirationTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setRetentionExpirationTime(Long retentionExpirationTime) { + infoBuilder.setRetentionExpirationTime(retentionExpirationTime); + return this; + } + + @Override + Builder setRetentionExpirationTimeOffsetDateTime(OffsetDateTime retentionExpirationTime) { + infoBuilder.setRetentionExpirationTimeOffsetDateTime(retentionExpirationTime); + return this; + } + + @Override + Builder setSoftDeleteTime(OffsetDateTime softDeleteTime) { + infoBuilder.setSoftDeleteTime(softDeleteTime); + return this; + } + + @Override + Builder setHardDeleteTime(OffsetDateTime hardDeleteTime) { + infoBuilder.setHardDeleteTime(hardDeleteTime); + return this; + } + + @Override + public Builder setRetention(Retention retention) { + infoBuilder.setRetention(retention); + return this; + } + + @Override + public Builder setContexts(ObjectContexts contexts) { + infoBuilder.setContexts(contexts); + return this; + } + + @Override + public Blob build() { + return new Blob(storage, infoBuilder); + } + + @Override + BlobId getBlobId() { + return infoBuilder.getBlobId(); + } + + @Override + Builder clearBlobId() { + infoBuilder.clearBlobId(); + return this; + } + + @Override + Builder clearGeneratedId() { + infoBuilder.clearGeneratedId(); + return this; + } + + @Override + Builder clearContentType() { + infoBuilder.clearContentType(); + return this; + } + + @Override + Builder clearContentEncoding() { + infoBuilder.clearContentEncoding(); + return this; + } + + @Override + Builder clearContentDisposition() { + infoBuilder.clearContentDisposition(); + return this; + } + + @Override + Builder clearContentLanguage() { + infoBuilder.clearContentLanguage(); + return this; + } + + @Override + Builder clearComponentCount() { + infoBuilder.clearComponentCount(); + return this; + } + + @Override + Builder clearCacheControl() { + infoBuilder.clearCacheControl(); + return this; + } + + @Override + Builder clearAcl() { + infoBuilder.clearAcl(); + return this; + } + + @Override + Builder clearOwner() { + infoBuilder.clearOwner(); + return this; + } + + @Override + Builder clearSize() { + infoBuilder.clearSize(); + return this; + } + + @Override + Builder clearEtag() { + infoBuilder.clearEtag(); + return this; + } + + @Override + Builder clearSelfLink() { + infoBuilder.clearSelfLink(); + return this; + } + + @Override + Builder clearMd5() { + infoBuilder.clearMd5(); + return this; + } + + @Override + Builder clearCrc32c() { + infoBuilder.clearCrc32c(); + return this; + } + + @Override + Builder clearCustomTime() { + infoBuilder.clearCustomTime(); + return this; + } + + @Override + Builder clearMediaLink() { + infoBuilder.clearMediaLink(); + return this; + } + + @Override + Builder clearMetadata() { + infoBuilder.clearMetadata(); + return this; + } + + @Override + Builder clearMetageneration() { + infoBuilder.clearMetageneration(); + return this; + } + + @Override + Builder clearDeleteTime() { + infoBuilder.clearDeleteTime(); + return this; + } + + @Override + Builder clearUpdateTime() { + infoBuilder.clearUpdateTime(); + return this; + } + + @Override + Builder clearCreateTime() { + infoBuilder.clearCreateTime(); + return this; + } + + @Override + Builder clearIsDirectory() { + infoBuilder.clearIsDirectory(); + return this; + } + + @Override + Builder clearCustomerEncryption() { + infoBuilder.clearCustomerEncryption(); + return this; + } + + @Override + Builder clearStorageClass() { + infoBuilder.clearStorageClass(); + return this; + } + + @Override + Builder clearTimeStorageClassUpdated() { + infoBuilder.clearTimeStorageClassUpdated(); + return this; + } + + @Override + Builder clearKmsKeyName() { + infoBuilder.clearKmsKeyName(); + return this; + } + + @Override + Builder clearEventBasedHold() { + infoBuilder.clearEventBasedHold(); + return this; + } + + @Override + Builder clearTemporaryHold() { + infoBuilder.clearTemporaryHold(); + return this; + } + + @Override + Builder clearRetentionExpirationTime() { + infoBuilder.clearRetentionExpirationTime(); + return this; + } + + @Override + Builder clearContexts() { + infoBuilder.clearContexts(); + return this; + } + } + + Blob(Storage storage, BlobInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.storage = checkNotNull(storage); + this.options = storage.getOptions(); + } + + /** + * Checks if this blob exists. + * + *

Example of checking if the blob exists. + * + *

{@code
+   * boolean exists = blob.exists();
+   * if (exists) {
+   *   // the blob exists
+   * } else {
+   *   // the blob was not found
+   * }
+   * }
+ * + * @param options blob read options + * @return true if this blob exists, false otherwise + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean exists(BlobSourceOption... options) { + int length = options.length; + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + Storage.BlobGetOption[] getOptions = + Arrays.copyOf(BlobSourceOption.toGetOptions(this, options), length + 1); + getOptions[length] = Storage.BlobGetOption.fields(); + return storage.get(getBlobId(), getOptions) != null; + } + + /** + * Returns this blob's content. + * + *

Example of reading all bytes of the blob, if its generation matches the {@link + * Blob#getGeneration()} value, otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * byte[] content = blob.getContent(BlobSourceOption.generationMatch());
+   * }
+ * + * @param options blob read options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public byte[] getContent(BlobSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.readAllBytes(getBlobId(), BlobSourceOption.toSourceOptions(this, options)); + } + + /** + * Fetches the latest blob properties. Returns {@code null} if the blob no longer exists. + * + *

{@code options} parameter can contain the preconditions. For example, the user might want to + * get the blob properties only if the content has not been updated externally. {@code + * StorageException} with the code {@code 412} is thrown if preconditions fail. + * + *

Example of retrieving the blob's latest information only if the content is not updated + * externally: + * + *

{@code
+   * Blob blob = storage.get(BlobId.of(bucketName, blobName));
+   *
+   * doSomething();
+   *
+   * try {
+   *   blob = blob.reload(Blob.BlobSourceOption.generationMatch());
+   * } catch (StorageException e) {
+   *   if (e.getCode() == 412) {
+   *     // the content was updated externally
+   *   } else {
+   *     throw e;
+   *   }
+   * }
+   * }
+ * + * @param options preconditions to use on reload, see https://cloud.google.com/storage/docs/json_api/v1/objects/get + * for more information. + * @return a {@code Blob} object with latest information or {@code null} if no longer exists. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob reload(BlobSourceOption... options) { + // BlobId with generation unset is needed to retrieve the latest version of the Blob + BlobId idWithoutGeneration = BlobId.of(getBucket(), getName()); + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.get(idWithoutGeneration, BlobSourceOption.toGetOptions(this, options)); + } + + /** + * Updates the blob properties. The {@code options} parameter contains the preconditions for + * applying the update. To update the properties call {@link #toBuilder()}, set the properties you + * want to change, build the new {@code Blob} instance, and then call {@link + * #update(BlobTargetOption...)}. + * + *

The property update details are described in {@link Storage#update(BlobInfo)}. {@link + * Storage#update(BlobInfo, BlobTargetOption...)} describes how to specify preconditions. + * + *

Example of updating the content type: + * + *

{@code
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Blob blob = storage.get(blobId);
+   * blob.toBuilder().setContentType("text/plain").build().update();
+   * }
+ * + * @param options preconditions to apply the update + * @return the updated {@code Blob} + * @throws StorageException upon failure + * @see https://cloud.google.com/storage/docs/json_api/v1/objects/update + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob update(BlobTargetOption... options) { + return storage.update(this, options); + } + + /** + * Deletes this blob. + * + *

Example of deleting the blob, if its generation matches the {@link Blob#getGeneration()} + * value, otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * boolean deleted = blob.delete(BlobSourceOption.generationMatch());
+   * if (deleted) {
+   *   // the blob was deleted
+   * } else {
+   *   // the blob was not found
+   * }
+   * }
+ * + * @param options blob delete options + * @return {@code true} if blob was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean delete(BlobSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.delete(getBlobId(), BlobSourceOption.toSourceOptions(this, options)); + } + + /** + * Sends a copy request for the current blob to the target blob. Possibly also some of the + * metadata are copied (e.g. content-type). + * + *

Example of copying the blob to a different bucket with a different name. + * + *

{@code
+   * String bucketName = "my_unique_bucket";
+   * String blobName = "copy_blob_name";
+   * CopyWriter copyWriter = blob.copyTo(BlobId.of(bucketName, blobName));
+   * Blob copiedBlob = copyWriter.getResult();
+   * }
+ * + * @param targetBlob target blob's id + * @param options source blob options + * @return a {@link CopyWriter} object that can be used to get information on the newly created + * blob or to complete the copy if more than one RPC request is needed + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public CopyWriter copyTo(BlobId targetBlob, BlobSourceOption... options) { + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(getBucket(), getName()) + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + .setSourceOptions(BlobSourceOption.toSourceOptions(this, options)) + .setTarget(targetBlob) + .build(); + return storage.copy(copyRequest); + } + + /** + * Sends a copy request for the current blob to the target bucket, preserving its name. Possibly + * copying also some of the metadata (e.g. content-type). + * + *

Example of copying the blob to a different bucket, keeping the original name. + * + *

{@code
+   * String bucketName = "my_unique_bucket";
+   * CopyWriter copyWriter = blob.copyTo(bucketName);
+   * Blob copiedBlob = copyWriter.getResult();
+   * }
+ * + * @param targetBucket target bucket's name + * @param options source blob options + * @return a {@link CopyWriter} object that can be used to get information on the newly created + * blob or to complete the copy if more than one RPC request is needed + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public CopyWriter copyTo(String targetBucket, BlobSourceOption... options) { + return copyTo(targetBucket, getName(), options); + } + + /** + * Sends a copy request for the current blob to the target blob. Possibly also some of the + * metadata are copied (e.g. content-type). + * + *

Example of copying the blob to a different bucket with a different name. + * + *

{@code
+   * String bucketName = "my_unique_bucket";
+   * String blobName = "copy_blob_name";
+   * CopyWriter copyWriter = blob.copyTo(bucketName, blobName);
+   * Blob copiedBlob = copyWriter.getResult();
+   * }
+ * + *

Example of moving a blob to a different bucket with a different name. + * + *

{@code
+   * String destBucket = "my_unique_bucket";
+   * String destBlob = "move_blob_name";
+   * CopyWriter copyWriter = blob.copyTo(destBucket, destBlob);
+   * Blob copiedBlob = copyWriter.getResult();
+   * boolean deleted = blob.delete();
+   * }
+ * + * @param targetBucket target bucket's name + * @param targetBlob target blob's name + * @param options source blob options + * @return a {@link CopyWriter} object that can be used to get information on the newly created + * blob or to complete the copy if more than one RPC request is needed + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public CopyWriter copyTo(String targetBucket, String targetBlob, BlobSourceOption... options) { + return copyTo(BlobId.of(targetBucket, targetBlob), options); + } + + /** + * Returns a {@code ReadChannel} object for reading this blob's content. + * + *

Example of reading the blob's content through a reader. + * + *

{@code
+   * try (ReadChannel reader = blob.reader()) {
+   *   ByteBuffer bytes = ByteBuffer.allocate(64 * 1024);
+   *   while (reader.read(bytes) > 0) {
+   *     bytes.flip();
+   *     // do something with bytes
+   *     bytes.clear();
+   *   }
+   * }
+   * }
+ * + *

Example of reading just a portion of the blob's content. + * + *

{@code
+   * int start = 1;
+   * int end = 8;
+   * try (ReadChannel reader = blob.reader()) {
+   *   reader.seek(start);
+   *   ByteBuffer bytes = ByteBuffer.allocate(end - start);
+   *   reader.read(bytes);
+   *   return bytes.array();
+   * }
+   * }
+ * + * @param options blob read options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public ReadChannel reader(BlobSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.reader(getBlobId(), BlobSourceOption.toSourceOptions(this, options)); + } + + /** + * Returns a {@code WriteChannel} object for writing to this blob. By default any md5 and crc32c + * values in the current blob are ignored unless requested via the {@code + * BlobWriteOption.md5Match} and {@code BlobWriteOption.crc32cMatch} options. + * + *

Example of writing the blob's content through a writer. + * + *

{@code
+   * byte[] content = "Hello, World!".getBytes(UTF_8);
+   * try (WriteChannel writer = blob.writer()) {
+   *     writer.write(ByteBuffer.wrap(content, 0, content.length));
+   * } catch (IOException ex) {
+   *   // handle exception
+   * }
+   * blob = blob.reload();
+   * }
+ * + * @param options target blob options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public WriteChannel writer(BlobWriteOption... options) { + return storage.writer(this, options); + } + + /** + * Generates a signed URL for this blob. If you want to allow access for a fixed amount of time to + * this blob, you can use this method to generate a URL that is only valid within a certain time + * period. This is particularly useful if you don't want publicly accessible blobs, but also don't + * want to require users to explicitly log in. Signing a URL requires a service account signer. If + * an instance of {@link com.google.auth.ServiceAccountSigner} was passed to {@link + * StorageOptions}' builder via {@code setCredentials(Credentials)} or the default credentials are + * being used and the environment variable {@code GOOGLE_APPLICATION_CREDENTIALS} is set or your + * application is running in App Engine, then {@code signUrl} will use that credentials to sign + * the URL. If the credentials passed to {@link StorageOptions} do not implement {@link + * ServiceAccountSigner} (this is the case, for instance, for Compute Engine credentials and + * Google Cloud SDK credentials) then {@code signUrl} will throw an {@link IllegalStateException} + * unless an implementation of {@link ServiceAccountSigner} is passed using the {@link + * SignUrlOption#signWith(ServiceAccountSigner)} option. + * + *

A service account signer is looked for in the following order: + * + *

    + *
  1. The signer passed with the option {@link SignUrlOption#signWith(ServiceAccountSigner)} + *
  2. The credentials passed to {@link StorageOptions} + *
  3. The default credentials, if no credentials were passed to {@link StorageOptions} + *
+ * + *

Example of creating a signed URL for the blob that is valid for 2 weeks, using the default + * credentials for signing the URL: + * + *

{@code
+   * URL signedUrl = blob.signUrl(14, TimeUnit.DAYS);
+   * }
+ * + *

Example of creating a signed URL for the blob passing the {@link + * SignUrlOption#signWith(ServiceAccountSigner)} option, that will be used to sign the URL: + * + *

{@code
+   * String keyPath = "/path/to/key.json";
+   * URL signedUrl = blob.signUrl(14, TimeUnit.DAYS, SignUrlOption.signWith(
+   *     ServiceAccountCredentials.fromStream(new FileInputStream(keyPath))));
+   * }
+ * + *

Example of creating a signed URL for a blob generation: + * + *

{@code
+   * URL signedUrl = blob.signUrl(1, TimeUnit.HOURS,
+   *     SignUrlOption.withQueryParams(ImmutableMap.of("generation", "1576656755290328")));
+   * }
+ * + * @param duration time until the signed URL expires, expressed in {@code unit}. The finer + * granularity supported is 1 second, finer granularities will be truncated + * @param unit time unit of the {@code duration} parameter + * @param options optional URL signing options + * @return a signed URL for this blob and the specified options + * @throws IllegalStateException if {@link SignUrlOption#signWith(ServiceAccountSigner)} was not + * used and no implementation of {@link ServiceAccountSigner} was provided to {@link + * StorageOptions} + * @throws IllegalArgumentException if {@code SignUrlOption.withMd5()} option is used and {@code + * blobInfo.md5()} is {@code null} + * @throws IllegalArgumentException if {@code SignUrlOption.withContentType()} option is used and + * {@code blobInfo.contentType()} is {@code null} + * @throws SigningException if the attempt to sign the URL failed + * @see Signed-URLs + */ + @TransportCompatibility(Transport.HTTP) + public URL signUrl(long duration, TimeUnit unit, SignUrlOption... options) { + return storage.signUrl(this, duration, unit, options); + } + + /** + * Returns the ACL entry for the specified entity on this blob or {@code null} if not found. + * + *

Example of getting the ACL entry for an entity. + * + *

{@code
+   * Acl acl = blob.getAcl(User.ofAllAuthenticatedUsers());
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl getAcl(Entity entity) { + return storage.getAcl(getBlobId(), entity); + } + + /** + * Deletes the ACL entry for the specified entity on this blob. + * + *

Example of deleting the ACL entry for an entity. + * + *

{@code
+   * boolean deleted = blob.deleteAcl(User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean deleteAcl(Entity entity) { + return storage.deleteAcl(getBlobId(), entity); + } + + /** + * Creates a new ACL entry on this blob. + * + *

Example of creating a new ACL entry. + * + *

{@code
+   * Acl acl = blob.createAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.READER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl createAcl(Acl acl) { + return storage.createAcl(getBlobId(), acl); + } + + /** + * Updates an ACL entry on this blob. + * + *

Example of updating a new ACL entry. + * + *

{@code
+   * Acl acl = blob.updateAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.OWNER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl updateAcl(Acl acl) { + return storage.updateAcl(getBlobId(), acl); + } + + /** + * Lists the ACL entries for this blob. + * + *

Example of listing the ACL entries. + * + *

{@code
+   * List acls = blob.listAcls();
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public List listAcls() { + return storage.listAcls(getBlobId()); + } + + /** Returns the blob's {@code Storage} object used to issue requests. */ + public Storage getStorage() { + return storage; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns true if obj instance {@code Blob.toPb()} metadata representation and {@code + * Blob.options} instance of StorageOptions are both equal. + */ + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Blob.class)) { + return false; + } + Blob other = (Blob) obj; + return super.equals(other) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + /** + * Drop the held {@link Storage} instance. + * + * @since 2.14.0 + */ + public BlobInfo asBlobInfo() { + return this.toBuilder().infoBuilder.build(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.storage = options.getService(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java new file mode 100644 index 000000000000..056f665ab632 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java @@ -0,0 +1,201 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.Storage.BlobWriteOption; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.TimeUnit; + +/** + * Interface representing those methods which can be used to write to and interact with an + * appendable upload. + * + * @see Storage#blobAppendableUpload(BlobInfo, BlobAppendableUploadConfig, BlobWriteOption...) + */ +@BetaApi +@InternalExtensionOnly +public interface BlobAppendableUpload extends BlobWriteSession { + + /** + * Open the {@link AppendableUploadWriteableByteChannel AppendableUploadWriteableByteChannel} for + * this session. + * + *

A session may only be {@code open}ed once. If multiple calls to open are made, an illegal + * state exception will be thrown + * + *

The returned {@code AppendableUploadWriteableByteChannel} can throw IOExceptions from any of + * its usual methods. Any {@link IOException} thrown can have a cause of a {@link + * StorageException}. However, not all {@code IOExceptions} will have {@code StorageException}s. + * + * @throws IOException When creating the {@link AppendableUploadWriteableByteChannel} if an + * unrecoverable underlying IOException occurs it can be rethrown + * @throws IllegalStateException if open is called more than once + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + AppendableUploadWriteableByteChannel open() throws IOException; + + /** + * Return an {@link ApiFuture}{@code } which will represent the state of the object in + * Google Cloud Storage. + * + *

This future will not resolve until: + * + *

    + *
  1. The object is successfully finalized in Google Cloud Storage by calling {@link + * AppendableUploadWriteableByteChannel#finalizeAndClose() + * AppendableUploadWriteableByteChannel#finalizeAndClose()} + *
  2. This session is detached from the upload without finalizing by calling {@link + * AppendableUploadWriteableByteChannel#closeWithoutFinalizing() + * AppendableUploadWriteableByteChannel#closeWithoutFinalizing()} + *
  3. The session is closed by calling {@link AppendableUploadWriteableByteChannel#close() + * AppendableUploadWriteableByteChannel#close()} + *
  4. A terminal failure occurs, the terminal failure will become the exception result + *
+ * + *

NOTICE: Some fields may not be populated unless finalization has completed. + * + *

If a terminal failure is encountered, calling either {@link ApiFuture#get()} or {@link + * ApiFuture#get(long, TimeUnit)} will result in an {@link + * java.util.concurrent.ExecutionException} with the cause. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + ApiFuture getResult(); + + /** + * The {@link WritableByteChannel} returned from {@link BlobAppendableUpload#open()}. + * + *

This interface allows writing bytes to an Appendable Upload, and provides methods to close + * this channel -- optionally finalizing the upload. + * + *

The {@link #write(ByteBuffer)} method of this channel is non-blocking. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @InternalExtensionOnly + interface AppendableUploadWriteableByteChannel extends WritableByteChannel { + + /** + * This method is non-blocking + * + *

Consume as many bytes as can fit in the underlying outbound queue. The size of the + * outbound queue is determined from {@link BlobAppendableUploadConfig#getFlushPolicy()}{@code + * .}{@link FlushPolicy#getMaxPendingBytes() getMaxPendingBytes()}. If the outbound queue is + * full, and can not fit more bytes, this method will return 0. + * + *

If your application needs to empty its ByteBuffer before progressing, use our helper + * method {@link StorageChannelUtils#blockingEmptyTo(ByteBuffer, WritableByteChannel)} like so: + * + *

{@code
+     * try (AppendableUploadWriteableByteChannel channel = session.open()) {
+     *   int written = StorageChannelUtils.blockingEmptyTo(byteBuffer, channel);
+     * }
+     * }
+ * + * @param src The buffer from which bytes are to be retrieved + * @return The number of bytes written, possibly zero + * @throws ClosedChannelException If this channel is closed + * @throws IOException If some other I/O error occurs + */ + @Override + int write(ByteBuffer src) throws IOException; + + /** + * This method is blocking + * + *

Block the invoking thread, waiting until the number of bytes written so far has been + * acknowledged by Google Cloud Storage. + * + * @throws IOException if an error happens while waiting for the flush to complete + * @throws java.io.InterruptedIOException if the current thread is interrupted while waiting + * @since 2.56.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + void flush() throws IOException; + + /** + * This method is blocking + * + *

Finalize the upload and close this instance to further {@link #write(ByteBuffer)}ing. This + * will close any underlying stream and release any releasable resources once out of scope. + * + *

Once this method is called, and returns no more writes to the object will be allowed by + * GCS. + * + *

This method and {@link #close()} are mutually exclusive. If one of the other methods are + * called before this method, this method will be a no-op. + * + * @see Storage#blobAppendableUpload(BlobInfo, BlobAppendableUploadConfig, BlobWriteOption...) + * @see BlobAppendableUploadConfig.CloseAction#FINALIZE_WHEN_CLOSING + * @see BlobAppendableUploadConfig#getCloseAction() + * @see BlobAppendableUploadConfig#withCloseAction(CloseAction) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + void finalizeAndClose() throws IOException; + + /** + * This method is blocking + * + *

Close this instance to further {@link #write(ByteBuffer)}ing without finalizing the + * upload. This will close any underlying stream and release any releasable resources once out + * of scope. + * + *

This method, {@link AppendableUploadWriteableByteChannel#finalizeAndClose()} and {@link + * AppendableUploadWriteableByteChannel#close()} are mutually exclusive. If one of the other + * methods are called before this method, this method will be a no-op. + * + * @see Storage#blobAppendableUpload(BlobInfo, BlobAppendableUploadConfig, BlobWriteOption...) + * @see BlobAppendableUploadConfig.CloseAction#CLOSE_WITHOUT_FINALIZING + * @see BlobAppendableUploadConfig#getCloseAction() + * @see BlobAppendableUploadConfig#withCloseAction(CloseAction) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + void closeWithoutFinalizing() throws IOException; + + /** + * This method is blocking + * + *

Close this instance to further {@link #write(ByteBuffer)}ing. + * + *

Whether the upload is finalized during this depends on the {@link + * BlobAppendableUploadConfig#getCloseAction()} provided to create the {@link + * BlobAppendableUpload}. If {@link BlobAppendableUploadConfig#getCloseAction()}{@code == + * }{@link CloseAction#FINALIZE_WHEN_CLOSING}, {@link #finalizeAndClose()} will be called. If + * {@link BlobAppendableUploadConfig#getCloseAction()}{@code == }{@link + * CloseAction#CLOSE_WITHOUT_FINALIZING}, {@link #closeWithoutFinalizing()} will be called. + * + * @see Storage#blobAppendableUpload(BlobInfo, BlobAppendableUploadConfig, BlobWriteOption...) + * @see BlobAppendableUploadConfig#getCloseAction() + * @see BlobAppendableUploadConfig#withCloseAction(CloseAction) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + void close() throws IOException; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java new file mode 100644 index 000000000000..afb3ae109706 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java @@ -0,0 +1,318 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BidiUploadState.TakeoverAppendableUploadState; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadImpl.AppendableObjectBufferedWritableByteChannel; +import com.google.cloud.storage.FlushPolicy.MinFlushSizeFlushPolicy; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ServiceConstants.Values; +import java.util.Objects; +import java.util.function.BiFunction; +import javax.annotation.concurrent.Immutable; + +/** + * Configuration parameters for an appendable uploads channel. + * + *

Instances of this class are immutable and thread safe. + * + * @see Storage#blobAppendableUpload(BlobInfo, BlobAppendableUploadConfig, BlobWriteOption...) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@Immutable +@BetaApi +@TransportCompatibility({Transport.GRPC}) +public final class BlobAppendableUploadConfig { + + private static final BlobAppendableUploadConfig INSTANCE = + new BlobAppendableUploadConfig( + FlushPolicy.minFlushSize(), CloseAction.CLOSE_WITHOUT_FINALIZING, 3); + + private final FlushPolicy flushPolicy; + private final CloseAction closeAction; + private final int maxRedirectsAllowed; + + private BlobAppendableUploadConfig( + FlushPolicy flushPolicy, CloseAction closeAction, int maxRedirectsAllowed) { + this.flushPolicy = flushPolicy; + this.closeAction = closeAction; + this.maxRedirectsAllowed = maxRedirectsAllowed; + } + + /** + * The {@link FlushPolicy} which will be used to determine when and how many bytes to flush to + * GCS. + * + *

Default: {@link FlushPolicy#minFlushSize()} + * + * @see #withFlushPolicy(FlushPolicy) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public FlushPolicy getFlushPolicy() { + return flushPolicy; + } + + /** + * Return an instance with the {@code FlushPolicy} set to be the specified value. + * + *

Default: {@link FlushPolicy#minFlushSize()} + * + * @see #getFlushPolicy() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public BlobAppendableUploadConfig withFlushPolicy(FlushPolicy flushPolicy) { + requireNonNull(flushPolicy, "flushPolicy must be non null"); + if (this.flushPolicy.equals(flushPolicy)) { + return this; + } + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); + } + + /** + * The {@link CloseAction} which will dictate the behavior of {@link + * AppendableUploadWriteableByteChannel#close()}. + * + *

Default: {@link CloseAction#CLOSE_WITHOUT_FINALIZING} + * + * @see #withCloseAction(CloseAction) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public CloseAction getCloseAction() { + return closeAction; + } + + /** + * Return an instance with the {@code CloseAction} set to be the specified value. + * + *

Default: {@link CloseAction#CLOSE_WITHOUT_FINALIZING} + * + * @see #getCloseAction() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public BlobAppendableUploadConfig withCloseAction(CloseAction closeAction) { + requireNonNull(closeAction, "closeAction must be non null"); + if (this.closeAction == closeAction) { + return this; + } + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); + } + + /** + * The {@code maxRedirectsAllowed} set to be the specified value. + * + *

Default: 3 + * + * @see #withMaxRedirectsAllowed(int) + * @since 2.56.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + int getMaxRedirectsAllowed() { + return maxRedirectsAllowed; + } + + /** + * Return an instance with the {@code maxRedirectsAllowed} set to be the specified value. + * + *

Default: 3 + * + * @see #getMaxRedirectsAllowed() + * @since 2.56.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + BlobAppendableUploadConfig withMaxRedirectsAllowed(int maxRedirectsAllowed) { + Preconditions.checkArgument( + maxRedirectsAllowed >= 0, "maxRedirectsAllowed >= 0 (%s >= 0)", maxRedirectsAllowed); + if (this.maxRedirectsAllowed == maxRedirectsAllowed) { + return this; + } + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobAppendableUploadConfig)) { + return false; + } + BlobAppendableUploadConfig that = (BlobAppendableUploadConfig) o; + return maxRedirectsAllowed == that.maxRedirectsAllowed + && Objects.equals(flushPolicy, that.flushPolicy) + && closeAction == that.closeAction; + } + + @Override + public int hashCode() { + return Objects.hash(flushPolicy, closeAction, maxRedirectsAllowed); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("flushPolicy", flushPolicy) + .add("closeAction", closeAction) + .add("maxRedirectsAllowed", maxRedirectsAllowed) + .toString(); + } + + /** + * Default instance factory method. + * + *

The {@link FlushPolicy} of this instance is equivalent to the following: + * + *

{@code
+   * BlobAppendableUploadConfig.of()
+   *   .withFlushPolicy(FlushPolicy.minFlushSize(256 * 1024))
+   *   .withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING)
+   * }
+ * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see FlushPolicy#minFlushSize(int) + */ + @BetaApi + public static BlobAppendableUploadConfig of() { + return INSTANCE; + } + + /** + * Enum providing the possible actions which can be taken during the {@link + * AppendableUploadWriteableByteChannel#close()} call. + * + * @see AppendableUploadWriteableByteChannel#close() + * @see BlobAppendableUploadConfig#withCloseAction(CloseAction) + * @see BlobAppendableUploadConfig#getCloseAction() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public enum CloseAction { + /** + * Designate that when {@link AppendableUploadWriteableByteChannel#close()} is called, the + * appendable upload should be finalized. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see AppendableUploadWriteableByteChannel#finalizeAndClose() + */ + @BetaApi + FINALIZE_WHEN_CLOSING, + /** + * Designate that when {@link AppendableUploadWriteableByteChannel#close()} is called, the + * appendable upload should NOT be finalized, allowing for takeover by another session or + * client. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see AppendableUploadWriteableByteChannel#closeWithoutFinalizing() + */ + @BetaApi + CLOSE_WITHOUT_FINALIZING + } + + BlobAppendableUpload create(GrpcStorageImpl storage, BlobInfo info, Opts opts) { + long maxPendingBytes = this.getFlushPolicy().getMaxPendingBytes(); + AppendableUploadState state = storage.getAppendableState(info, opts, maxPendingBytes); + WritableByteChannelSession + build = + new AppendableSession( + ApiFutures.immediateFuture(state), + (start, resultFuture) -> { + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + start, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + maxRedirectsAllowed, + storage.storageDataClient.retryContextProvider.create()); + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter( + Hasher.enabled(), + ByteStringStrategy.copy(), + Math.min( + Values.MAX_WRITE_CHUNK_BYTES_VALUE, Math.toIntExact(maxPendingBytes)), + /* blockSize= */ 1); + BidiAppendableUnbufferedWritableByteChannel c; + if (state instanceof TakeoverAppendableUploadState) { + // start the takeover reconciliation + stream.awaitTakeoverStateReconciliation(); + c = + new BidiAppendableUnbufferedWritableByteChannel( + stream, + chunkSegmenter, + flushInterval(flushPolicy), + state.getConfirmedBytes()); + } else { + c = + new BidiAppendableUnbufferedWritableByteChannel( + stream, chunkSegmenter, flushInterval(flushPolicy), 0); + } + return new AppendableObjectBufferedWritableByteChannel( + flushPolicy.createBufferedChannel(c, /* blocking= */ false), + c, + this.closeAction == CloseAction.FINALIZE_WHEN_CLOSING); + }, + state.getResultFuture()); + + return new BlobAppendableUploadImpl( + new DefaultBlobWriteSessionConfig.DecoratedWritableByteChannelSession<>( + build, BidiBlobWriteSessionConfig.Factory.WRITE_OBJECT_RESPONSE_BLOB_INFO_DECODER)); + } + + private static long flushInterval(FlushPolicy fp) { + if (fp instanceof MinFlushSizeFlushPolicy) { + MinFlushSizeFlushPolicy min = (MinFlushSizeFlushPolicy) fp; + return min.getMinFlushSize(); + } else { + return fp.getMaxPendingBytes(); + } + } + + private static final class AppendableSession + extends ChannelSession< + AppendableUploadState, + BidiWriteObjectResponse, + AppendableObjectBufferedWritableByteChannel> + implements WritableByteChannelSession< + AppendableObjectBufferedWritableByteChannel, BidiWriteObjectResponse> { + private AppendableSession( + ApiFuture startFuture, + BiFunction< + AppendableUploadState, + SettableApiFuture, + AppendableObjectBufferedWritableByteChannel> + f, + SettableApiFuture resultFuture) { + super(startFuture, f, resultFuture); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java new file mode 100644 index 000000000000..5445473e6ab6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java @@ -0,0 +1,156 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReentrantLock; + +@BetaApi +final class BlobAppendableUploadImpl implements BlobAppendableUpload { + private final WritableByteChannelSession + delegate; + private boolean open; + + BlobAppendableUploadImpl( + WritableByteChannelSession delegate) { + this.delegate = delegate; + this.open = false; + } + + @Override + public AppendableUploadWriteableByteChannel open() throws IOException { + synchronized (this) { + Preconditions.checkState(!open, "already open"); + open = true; + return delegate.open(); + } + } + + @Override + public ApiFuture getResult() { + return delegate.getResult(); + } + + /** + * This class extends BufferedWritableByteChannel to handle a special case for Appendable writes, + * namely closing the stream without finalizing the write. It adds the {@code finalizeWrite} + * method, which must be manually called to finalize the write. This couldn't be accomplished with + * the base BufferedWritableByteChannel class because it only has a close() method, which it + * assumes should finalize the write before the close. It also re-implements + * SynchronizedBufferedWritableByteChannel to avoid needing to make a decorator class for it and + * wrap it over this one. + */ + static final class AppendableObjectBufferedWritableByteChannel + implements BufferedWritableByteChannel, + BlobAppendableUpload.AppendableUploadWriteableByteChannel { + private final BufferedWritableByteChannel buffered; + private final BidiAppendableUnbufferedWritableByteChannel unbuffered; + private final boolean finalizeOnClose; + private final ReentrantLock lock; + + AppendableObjectBufferedWritableByteChannel( + BufferedWritableByteChannel buffered, + BidiAppendableUnbufferedWritableByteChannel unbuffered, + boolean finalizeOnClose) { + this.buffered = buffered; + this.unbuffered = unbuffered; + this.finalizeOnClose = finalizeOnClose; + this.lock = new ReentrantLock(); + } + + @Override + public void flush() throws IOException { + lock.lock(); + try { + buffered.flush(); + try { + unbuffered.flush(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException interruptedIOException = new InterruptedIOException(); + interruptedIOException.initCause(e); + throw interruptedIOException; + } + } finally { + lock.unlock(); + } + } + + @Override + public int write(ByteBuffer src) throws IOException { + boolean locked = lock.tryLock(); + if (!locked) { + return 0; + } + try { + return buffered.write(src); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return buffered.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void finalizeAndClose() throws IOException { + lock.lock(); + try { + if (buffered.isOpen()) { + unbuffered.nextWriteShouldFinalize(); + buffered.close(); + } + } finally { + lock.unlock(); + } + } + + @Override + public void closeWithoutFinalizing() throws IOException { + lock.lock(); + try { + if (buffered.isOpen()) { + buffered.close(); + } + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + if (finalizeOnClose) { + finalizeAndClose(); + } else { + closeWithoutFinalizing(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobId.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobId.java new file mode 100644 index 000000000000..c65fc7cbd960 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobId.java @@ -0,0 +1,156 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.core.InternalApi; +import com.google.common.base.MoreObjects; +import java.io.Serializable; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Google Storage Object identifier. A {@code BlobId} object includes the name of the containing + * bucket, the blob's name and possibly the blob's generation. If {@link #getGeneration()} is {@code + * null} the identifier refers to the latest blob's generation. + */ +public final class BlobId implements Serializable { + + private static final long serialVersionUID = 8201580858265557469L; + private static final Pattern gsUtilUriPattern = Pattern.compile("^gs://(.+?)/(.+?)(?:#(\\d+))?$"); + private final String bucket; + private final String name; + private final Long generation; + + private BlobId(String bucket, String name, Long generation) { + this.bucket = bucket; + this.name = name; + this.generation = generation; + } + + /** Returns the name of the bucket containing the blob. */ + public String getBucket() { + return bucket; + } + + /** Returns the name of the blob. */ + public String getName() { + return name; + } + + /** Returns blob's data generation. Used for versioning. */ + public Long getGeneration() { + return generation; + } + + /** + * Returns this blob's Storage url which can be used with gsutil. If {@link #generation} is + * non-null it will not be included in the uri. + */ + public String toGsUtilUri() { + return "gs://" + bucket + "/" + name; + } + + /** + * Returns this blob's Storage url which can be used with gsutil. If {@link #generation} is + * non-null it will be included in the uri + * + * @since 2.22.1 + */ + public String toGsUtilUriWithGeneration() { + return "gs://" + bucket + "/" + name + (generation == null ? "" : ("#" + generation)); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", getBucket()) + .add("name", getName()) + .add("generation", getGeneration()) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, name, generation); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(BlobId.class)) { + return false; + } + BlobId other = (BlobId) obj; + return Objects.equals(bucket, other.bucket) + && Objects.equals(name, other.name) + && Objects.equals(generation, other.generation); + } + + @InternalApi + BlobId withGeneration(long generation) { + return new BlobId(bucket, name, generation); + } + + /** + * Creates a blob identifier. Generation is set to {@code null}. + * + * @param bucket the name of the bucket that contains the blob + * @param name the name of the blob + */ + public static BlobId of(String bucket, String name) { + return new BlobId(checkNotNull(bucket), checkNotNull(name), null); + } + + /** + * Creates a {@code BlobId} object. + * + * @param bucket name of the containing bucket + * @param name blob's name + * @param generation blob's data generation, used for versioning. If {@code null} the identifier + * refers to the latest blob's generation + */ + public static BlobId of(String bucket, String name, Long generation) { + return new BlobId(checkNotNull(bucket), checkNotNull(name), generation); + } + + /** + * Creates a {@code BlobId} object. + * + * @param gsUtilUri the Storage url to create the blob from + */ + public static BlobId fromGsUtilUri(String gsUtilUri) { + Matcher m = gsUtilUriPattern.matcher(gsUtilUri); + if (!m.matches()) { + throw new IllegalArgumentException( + gsUtilUri + + " is not a valid gsutil URI (i.e. \"gs://bucket/blob\" or" + + " \"gs://bucket/blob#generation\")"); + } + + String bucket = m.group(1); + String name = m.group(2); + String generationGroup = m.group(3); + Long generation = generationGroup == null ? null : Long.parseLong(generationGroup); + + return BlobId.of(bucket, name, generation); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobInfo.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobInfo.java new file mode 100644 index 000000000000..a083338e745c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobInfo.java @@ -0,0 +1,2079 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BackwardCompatibilityUtils.millisOffsetDateTimeCodec; +import static com.google.cloud.storage.Utils.diffMaps; +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.client.util.Data; +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.BaseEncoding; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.AbstractMap; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Information about an object in Google Cloud Storage. A {@code BlobInfo} object includes the + * {@code BlobId} instance and the set of properties, such as the blob's access control + * configuration, user provided metadata, the CRC32C checksum, etc. Instances of this class are used + * to create a new object in Google Cloud Storage or update the properties of an existing object. To + * deal with existing Storage objects the API includes the {@link Blob} class which extends {@code + * BlobInfo} and declares methods to perform operations on the object. Neither {@code BlobInfo} nor + * {@code Blob} instances keep the object content, just the object properties. + * + *

Example of usage {@code BlobInfo} to create an object in Google Cloud Storage: + * + *

{@code
+ * BlobId blobId = BlobId.of(bucketName, blobName);
+ * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+ * Blob blob = storage.create(blobInfo, "Hello, world".getBytes(StandardCharsets.UTF_8));
+ * }
+ * + * @see Concepts and + * Terminology + */ +@TransportCompatibility({Transport.HTTP, Transport.GRPC}) +public class BlobInfo implements Serializable { + + private static final long serialVersionUID = -2490471217826624578L; + private final BlobId blobId; + private final String generatedId; + private final String selfLink; + private final String cacheControl; + private final List acl; + private final Acl.Entity owner; + private final Long size; + private final String etag; + private final String md5; + private final String crc32c; + private final OffsetDateTime customTime; + private final String mediaLink; + + /** + * The getter for this property never returns null, however null awareness is critical for + * encoding + * + * @see JsonConversions#blobInfo() encoder + */ + final Map metadata; + + private final Long metageneration; + private final OffsetDateTime deleteTime; + private final OffsetDateTime updateTime; + private final OffsetDateTime createTime; + private final String contentType; + private final String contentEncoding; + private final String contentDisposition; + private final String contentLanguage; + private final StorageClass storageClass; + private final OffsetDateTime timeStorageClassUpdated; + private final Integer componentCount; + private final boolean isDirectory; + private final CustomerEncryption customerEncryption; + private final String kmsKeyName; + private final Boolean eventBasedHold; + private final Boolean temporaryHold; + private final OffsetDateTime retentionExpirationTime; + private final Retention retention; + private final OffsetDateTime softDeleteTime; + private final OffsetDateTime hardDeleteTime; + private final ObjectContexts contexts; + private final transient ImmutableSet modifiedFields; + + /** This class is meant for internal use only. Users are discouraged from using this class. */ + public static final class ImmutableEmptyMap extends AbstractMap { + + @Override + public Set> entrySet() { + return ImmutableSet.of(); + } + } + + /** + * Objects of this class hold information on the customer-supplied encryption key, if the blob is + * encrypted using such a key. + */ + public static class CustomerEncryption implements Serializable { + + private static final long serialVersionUID = -7427738060808591323L; + + private final String encryptionAlgorithm; + private final String keySha256; + + CustomerEncryption(String encryptionAlgorithm, String keySha256) { + this.encryptionAlgorithm = encryptionAlgorithm; + this.keySha256 = keySha256; + } + + /** Returns the algorithm used to encrypt the blob. */ + public String getEncryptionAlgorithm() { + return encryptionAlgorithm; + } + + /** Returns the SHA256 hash of the encryption key. */ + public String getKeySha256() { + return keySha256; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("encryptionAlgorithm", getEncryptionAlgorithm()) + .add("keySha256", getKeySha256()) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(encryptionAlgorithm, keySha256); + } + + @Override + public final boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CustomerEncryption)) { + return false; + } + CustomerEncryption that = (CustomerEncryption) o; + return Objects.equals(encryptionAlgorithm, that.encryptionAlgorithm) + && Objects.equals(keySha256, that.keySha256); + } + } + + /** + * Defines a blob's Retention policy. Can only be used on objects in a retention-enabled bucket. + */ + public static final class Retention implements Serializable { + + private static final long serialVersionUID = 5046718464542688444L; + + private Mode mode; + + private OffsetDateTime retainUntilTime; + + /** Returns the retention policy's Mode. Can be Locked or Unlocked. */ + public Mode getMode() { + return mode; + } + + /** Returns what time this object will be retained until, if the mode is Locked. */ + public OffsetDateTime getRetainUntilTime() { + return retainUntilTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Retention)) { + return false; + } + Retention that = (Retention) o; + return Objects.equals(mode, that.mode) + && Objects.equals(retainUntilTime, that.retainUntilTime); + } + + @Override + public int hashCode() { + return Objects.hash(mode, retainUntilTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("mode", mode) + .add("retainUntilTime", retainUntilTime) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder().setMode(this.mode).setRetainUntilTime(this.retainUntilTime); + } + + private Retention() {} + + public Retention(Builder builder) { + this.mode = builder.mode; + this.retainUntilTime = builder.retainUntilTime; + } + + public static final class Builder { + private Mode mode; + private OffsetDateTime retainUntilTime; + + /** Sets the retention policy's Mode. Can be Locked or Unlocked. */ + public Builder setMode(Mode mode) { + this.mode = mode; + return this; + } + + /** Sets what time this object will be retained until, if the mode is Locked. */ + public Builder setRetainUntilTime(OffsetDateTime retainUntilTime) { + this.retainUntilTime = retainUntilTime; + return this; + } + + public Retention build() { + return new Retention(this); + } + } + + public static final class Mode extends StringEnumValue { + private static final long serialVersionUID = 1973143582659557184L; + + private Mode(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = Mode::new; + + private static final StringEnumType type = + new StringEnumType<>(Mode.class, CONSTRUCTOR); + + public static final Mode UNLOCKED = type.createAndRegister("Unlocked"); + + public static final Mode LOCKED = type.createAndRegister("Locked"); + + public static Mode valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + public static Mode valueOf(String constant) { + return type.valueOf(constant); + } + + public static Mode[] values() { + return type.values(); + } + } + } + + public static final class ObjectContexts implements Serializable { + + private static final long serialVersionUID = -5993852233545224424L; + + private final Map custom; + + private ObjectContexts(Builder builder) { + this.custom = builder.custom; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder().setCustom(this.custom); + } + + /** Returns the map of user-defined object contexts. */ + public Map getCustom() { + return custom; + } + + @Override + public int hashCode() { + return Objects.hash(custom); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final ObjectContexts other = (ObjectContexts) obj; + return Objects.equals(this.custom, other.custom); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("custom", custom).toString(); + } + + public static final class Builder { + + private Map custom; + + private Builder() {} + + public Builder setCustom(Map custom) { + this.custom = + custom == null ? ImmutableMap.of() : Collections.unmodifiableMap(new HashMap<>(custom)); + return this; + } + + public ObjectContexts build() { + return new ObjectContexts(this); + } + } + } + + /** Represents the payload of a user-defined object context. */ + public static final class ObjectCustomContextPayload implements Serializable { + + private static final long serialVersionUID = 557621132294323214L; + + private final String value; + private final OffsetDateTime createTime; + private final OffsetDateTime updateTime; + + private ObjectCustomContextPayload(Builder builder) { + this.value = builder.value; + this.createTime = builder.createTime; + this.updateTime = builder.updateTime; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder() + .setValue(this.value) + .setCreateTime(this.createTime) + .setUpdateTime(this.updateTime); + } + + public String getValue() { + return value; + } + + public OffsetDateTime getCreateTime() { + return createTime; + } + + public OffsetDateTime getUpdateTime() { + return updateTime; + } + + @Override + public int hashCode() { + return Objects.hash(value, createTime, updateTime); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + ObjectCustomContextPayload other = (ObjectCustomContextPayload) obj; + return Objects.equals(value, other.value) + && Objects.equals(createTime, other.createTime) + && Objects.equals(updateTime, other.updateTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("value", value) + .add("createTime", createTime) + .add("updateTime", updateTime) + .toString(); + } + + public static final class Builder { + + private String value; + private OffsetDateTime createTime; + private OffsetDateTime updateTime; + + private Builder() {} + + public Builder(String value) { + setValue(value); + } + + public Builder setValue(String value) { + this.value = value; + return this; + } + + public Builder setCreateTime(OffsetDateTime createTime) { + this.createTime = createTime; + return this; + } + + public Builder setUpdateTime(OffsetDateTime updateTime) { + this.updateTime = updateTime; + return this; + } + + public ObjectCustomContextPayload build() { + return new ObjectCustomContextPayload(this); + } + } + } + + /** Builder for {@code BlobInfo}. */ + public abstract static class Builder { + + /** Sets the blob identity. */ + public abstract Builder setBlobId(BlobId blobId); + + abstract Builder setGeneratedId(String generatedId); + + /** + * Sets the blob's data content type. + * + * @see Content-Type + */ + public abstract Builder setContentType(String contentType); + + /** + * Sets the blob's data content disposition. + * + * @see Content-Disposition + */ + public abstract Builder setContentDisposition(String contentDisposition); + + /** + * Sets the blob's data content language. + * + * @see Content-Language + */ + public abstract Builder setContentLanguage(String contentLanguage); + + /** + * Sets the blob's data content encoding. + * + * @see Content-Encoding + */ + public abstract Builder setContentEncoding(String contentEncoding); + + abstract Builder setComponentCount(Integer componentCount); + + /** + * Sets the blob's data cache control. + * + * @see Cache-Control + */ + public abstract Builder setCacheControl(String cacheControl); + + /** + * Sets the blob's access control configuration. + * + * @see + * About Access Control Lists + */ + public abstract Builder setAcl(List acl); + + abstract Builder setOwner(Acl.Entity owner); + + abstract Builder setSize(Long size); + + abstract Builder setEtag(String etag); + + abstract Builder setSelfLink(String selfLink); + + /** + * Sets the MD5 hash of blob's data. MD5 value must be encoded in base64. + * + * @see Hashes and ETags: + * Best Practices + */ + public abstract Builder setMd5(String md5); + + /** + * Sets the MD5 hash of blob's data from hex string. + * + * @see Hashes and ETags: + * Best Practices + * @throws IllegalArgumentException when given an invalid hexadecimal value. + */ + public abstract Builder setMd5FromHexString(String md5HexString); + + /** + * Sets the CRC32C checksum of blob's data as described in RFC 4960, Appendix B; encoded in + * base64 in big-endian order. + * + * @see Hashes and ETags: + * Best Practices + */ + public abstract Builder setCrc32c(String crc32c); + + /** + * Sets the custom time for an object. Once set it can't be unset and only changed to a custom + * datetime in the future. To unset the custom time, you must either perform a rewrite operation + * or upload the data again. + * + *

Example of setting the custom time. + * + *

{@code
+     * String bucketName = "my-unique-bucket";
+     * String blobName = "my-blob-name";
+     * long customTime = 1598423868301L;
+     * BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).setCustomTime(customTime).build();
+     * }
+ * + * @deprecated Use {@link #setCustomTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setCustomTime(Long customTime) { + throw new UnsupportedOperationException( + "Override setCustomTime with your own implementation," + + " or use com.google.cloud.storage.Blob."); + } + + /** + * Sets the custom time for an object. Once set it can't be unset and only changed to a custom + * datetime in the future. To unset the custom time, you must either perform a rewrite operation + * or upload the data again. + * + *

Example of setting the custom time. + * + *

{@code
+     * String bucketName = "my-unique-bucket";
+     * String blobName = "my-blob-name";
+     * OffsetDateTime customTime = Instant.ofEpochMilli(1598423868301L).atOffset(0); // UTC
+     * BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).setCustomTime(customTime).build();
+     * }
+ */ + public Builder setCustomTimeOffsetDateTime(OffsetDateTime customTime) { + // provide an implementation for source and binary compatibility which we override ourselves + return setCustomTime(millisOffsetDateTimeCodec.decode(customTime)); + } + + /** + * Sets the CRC32C checksum of blob's data as described in RFC 4960, Appendix B; from hex + * string. + * + * @see Hashes and ETags: + * Best Practices + * @throws IllegalArgumentException when given an invalid hexadecimal value. + */ + public abstract Builder setCrc32cFromHexString(String crc32cHexString); + + abstract Builder setMediaLink(String mediaLink); + + /** Sets the blob's storage class. */ + public abstract Builder setStorageClass(StorageClass storageClass); + + /** + * Sets the modification time of an object's storage class. Once set it can't be unset directly, + * the only way is to rewrite the object with the desired storage class. + * + * @deprecated Use {@link #setTimeStorageClassUpdatedOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setTimeStorageClassUpdated(Long timeStorageClassUpdated) { + throw new UnsupportedOperationException( + "Override setTimeStorageClassUpdated with your own implementation," + + " or use com.google.cloud.storage.Blob."); + } + + public Builder setTimeStorageClassUpdatedOffsetDateTime( + OffsetDateTime timeStorageClassUpdated) { + // provide an implementation for source and binary compatibility which we override ourselves + return setTimeStorageClassUpdated(millisOffsetDateTimeCodec.decode(timeStorageClassUpdated)); + } + + /** Sets the blob's user provided metadata. */ + public abstract Builder setMetadata(@Nullable Map<@NonNull String, @Nullable String> metadata); + + abstract Builder setMetageneration(Long metageneration); + + /** + * @deprecated Use {@link #setDeleteTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + abstract Builder setDeleteTime(Long deleteTime); + + Builder setDeleteTimeOffsetDateTime(OffsetDateTime deleteTime) { + // provide an implementation for source and binary compatibility which we override ourselves + return setDeleteTime(millisOffsetDateTimeCodec.decode(deleteTime)); + } + + /** + * @deprecated Use {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + abstract Builder setUpdateTime(Long updateTime); + + Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + // provide an implementation for source and binary compatibility which we override ourselves + return setUpdateTime(millisOffsetDateTimeCodec.decode(updateTime)); + } + + /** + * @deprecated Use {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + abstract Builder setCreateTime(Long createTime); + + Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + // provide an implementation for source and binary compatibility which we override ourselves + return setCreateTime(millisOffsetDateTimeCodec.decode(createTime)); + } + + abstract Builder setIsDirectory(boolean isDirectory); + + abstract Builder setCustomerEncryption(CustomerEncryption customerEncryption); + + /** + * Sets a customer-managed key for server-side encryption of the blob. Note that when a KMS key + * is used to encrypt Cloud Storage object, object resource metadata will store the version of + * the KMS cryptographic. If a {@code Blob} with KMS Key metadata is used to upload a new + * version of the object then the existing kmsKeyName version value can't be used in the upload + * request and the client instead ignores it. + * + *

Example of setting the KMS key name + * + *

{@code
+     * String bucketName = "my-unique-bucket";
+     * String blobName = "my-blob-name";
+     * String kmsKeyName = "projects/project-id/locations/us/keyRings/lab1/cryptoKeys/test-key"
+     * BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build();
+     * Blob blob = storage.create(blobInfo, Storage.BlobTargetOption.kmsKeyName(kmsKeyName));
+     * }
+ */ + abstract Builder setKmsKeyName(String kmsKeyName); + + /** Sets the blob's event-based hold. */ + @BetaApi + public abstract Builder setEventBasedHold(Boolean eventBasedHold); + + /** Sets the blob's temporary hold. */ + @BetaApi + public abstract Builder setTemporaryHold(Boolean temporaryHold); + + /** + * @deprecated {@link #setRetentionExpirationTimeOffsetDateTime(OffsetDateTime)} + */ + @BetaApi + @Deprecated + abstract Builder setRetentionExpirationTime(Long retentionExpirationTime); + + @BetaApi + Builder setRetentionExpirationTimeOffsetDateTime(OffsetDateTime retentionExpirationTime) { + // provide an implementation for source and binary compatibility which we override ourselves + return setRetentionExpirationTime(millisOffsetDateTimeCodec.decode(retentionExpirationTime)); + } + + abstract Builder setSoftDeleteTime(OffsetDateTime offsetDateTime); + + abstract Builder setHardDeleteTime(OffsetDateTime hardDeleteTIme); + + public abstract Builder setRetention(Retention retention); + + public abstract Builder setContexts(ObjectContexts contexts); + + /** Creates a {@code BlobInfo} object. */ + public abstract BlobInfo build(); + + abstract BlobId getBlobId(); + + abstract Builder clearBlobId(); + + abstract Builder clearGeneratedId(); + + abstract Builder clearContentType(); + + abstract Builder clearContentEncoding(); + + abstract Builder clearContentDisposition(); + + abstract Builder clearContentLanguage(); + + abstract Builder clearComponentCount(); + + abstract Builder clearCacheControl(); + + abstract Builder clearAcl(); + + abstract Builder clearOwner(); + + abstract Builder clearSize(); + + abstract Builder clearEtag(); + + abstract Builder clearSelfLink(); + + abstract Builder clearMd5(); + + abstract Builder clearCrc32c(); + + abstract Builder clearCustomTime(); + + abstract Builder clearMediaLink(); + + abstract Builder clearMetadata(); + + abstract Builder clearMetageneration(); + + abstract Builder clearDeleteTime(); + + abstract Builder clearUpdateTime(); + + abstract Builder clearCreateTime(); + + abstract Builder clearIsDirectory(); + + abstract Builder clearCustomerEncryption(); + + abstract Builder clearStorageClass(); + + abstract Builder clearTimeStorageClassUpdated(); + + abstract Builder clearKmsKeyName(); + + abstract Builder clearEventBasedHold(); + + abstract Builder clearTemporaryHold(); + + abstract Builder clearRetentionExpirationTime(); + + abstract Builder clearContexts(); + } + + static final class BuilderImpl extends Builder { + private static final String hexDecimalValues = "0123456789abcdef"; + public static final NamedField NAMED_FIELD_LITERAL_VALUE = NamedField.literal("value"); + private BlobId blobId; + private String generatedId; + private String contentType; + private String contentEncoding; + private String contentDisposition; + private String contentLanguage; + private Integer componentCount; + private String cacheControl; + private List acl; + private Acl.Entity owner; + private Long size; + private String etag; + private String selfLink; + private String md5; + private String crc32c; + private OffsetDateTime customTime; + private String mediaLink; + private Map metadata; + private Long metageneration; + private OffsetDateTime deleteTime; + private OffsetDateTime updateTime; + private OffsetDateTime createTime; + private Boolean isDirectory; + private CustomerEncryption customerEncryption; + private StorageClass storageClass; + private OffsetDateTime timeStorageClassUpdated; + private String kmsKeyName; + private Boolean eventBasedHold; + private Boolean temporaryHold; + private OffsetDateTime retentionExpirationTime; + private Retention retention; + private OffsetDateTime softDeleteTime; + private OffsetDateTime hardDeleteTime; + private ObjectContexts contexts; + private final ImmutableSet.Builder modifiedFields = ImmutableSet.builder(); + + BuilderImpl(BlobId blobId) { + this.blobId = blobId; + } + + BuilderImpl(BlobInfo blobInfo) { + blobId = blobInfo.blobId; + generatedId = blobInfo.generatedId; + cacheControl = blobInfo.cacheControl; + contentEncoding = blobInfo.contentEncoding; + contentType = blobInfo.contentType; + contentDisposition = blobInfo.contentDisposition; + contentLanguage = blobInfo.contentLanguage; + componentCount = blobInfo.componentCount; + customerEncryption = blobInfo.customerEncryption; + acl = blobInfo.acl; + owner = blobInfo.owner; + size = blobInfo.size; + etag = blobInfo.etag; + selfLink = blobInfo.selfLink; + md5 = blobInfo.md5; + crc32c = blobInfo.crc32c; + customTime = blobInfo.customTime; + mediaLink = blobInfo.mediaLink; + metadata = blobInfo.metadata; + metageneration = blobInfo.metageneration; + deleteTime = blobInfo.deleteTime; + updateTime = blobInfo.updateTime; + createTime = blobInfo.createTime; + isDirectory = blobInfo.isDirectory; + storageClass = blobInfo.storageClass; + timeStorageClassUpdated = blobInfo.timeStorageClassUpdated; + kmsKeyName = blobInfo.kmsKeyName; + eventBasedHold = blobInfo.eventBasedHold; + temporaryHold = blobInfo.temporaryHold; + retentionExpirationTime = blobInfo.retentionExpirationTime; + retention = blobInfo.retention; + softDeleteTime = blobInfo.softDeleteTime; + hardDeleteTime = blobInfo.hardDeleteTime; + contexts = blobInfo.contexts; + } + + @Override + public Builder setBlobId(BlobId blobId) { + checkNotNull(blobId); + if (!Objects.equals(this.blobId, blobId)) { + if (!Objects.equals(this.blobId.getBucket(), blobId.getBucket())) { + modifiedFields.add(BlobField.BUCKET); + } + if (!Objects.equals(this.blobId.getName(), blobId.getName())) { + modifiedFields.add(BlobField.NAME); + } + if (!Objects.equals(this.blobId.getGeneration(), blobId.getGeneration())) { + modifiedFields.add(BlobField.GENERATION); + } + } + this.blobId = blobId; + return this; + } + + @Override + Builder setGeneratedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + public Builder setContentType(String contentType) { + String tmp = firstNonNull(contentType, Data.nullOf(String.class)); + if (!Objects.equals(this.contentType, tmp)) { + modifiedFields.add(BlobField.CONTENT_TYPE); + } + this.contentType = tmp; + return this; + } + + @Override + public Builder setContentDisposition(String contentDisposition) { + String tmp = firstNonNull(contentDisposition, Data.nullOf(String.class)); + if (!Objects.equals(this.contentDisposition, tmp)) { + modifiedFields.add(BlobField.CONTENT_DISPOSITION); + } + this.contentDisposition = tmp; + return this; + } + + @Override + public Builder setContentLanguage(String contentLanguage) { + String tmp = firstNonNull(contentLanguage, Data.nullOf(String.class)); + if (!Objects.equals(this.contentLanguage, tmp)) { + modifiedFields.add(BlobField.CONTENT_LANGUAGE); + } + this.contentLanguage = tmp; + return this; + } + + @Override + public Builder setContentEncoding(String contentEncoding) { + String tmp = firstNonNull(contentEncoding, Data.nullOf(String.class)); + if (!Objects.equals(this.contentEncoding, tmp)) { + modifiedFields.add(BlobField.CONTENT_ENCODING); + } + this.contentEncoding = tmp; + return this; + } + + @Override + Builder setComponentCount(Integer componentCount) { + this.componentCount = componentCount; + return this; + } + + @Override + public Builder setCacheControl(String cacheControl) { + String tmp = firstNonNull(cacheControl, Data.nullOf(String.class)); + if (!Objects.equals(this.cacheControl, tmp)) { + modifiedFields.add(BlobField.CACHE_CONTROL); + } + this.cacheControl = tmp; + return this; + } + + @Override + public Builder setAcl(List acl) { + if (!Objects.equals(this.acl, acl)) { + modifiedFields.add(BlobField.ACL); + } + if (acl != null) { + if (acl instanceof ImmutableList) { + this.acl = acl; + } else { + this.acl = ImmutableList.copyOf(acl); + } + } else { + this.acl = null; + } + return this; + } + + @Override + Builder setOwner(Acl.Entity owner) { + if (!Objects.equals(this.owner, owner)) { + modifiedFields.add(BlobField.OWNER); + } + this.owner = owner; + return this; + } + + @Override + Builder setSize(Long size) { + this.size = size; + return this; + } + + @Override + Builder setEtag(String etag) { + if (!Objects.equals(this.etag, etag)) { + modifiedFields.add(BlobField.ETAG); + } + this.etag = etag; + return this; + } + + @Override + Builder setSelfLink(String selfLink) { + this.selfLink = selfLink; + return this; + } + + @Override + public Builder setMd5(String md5) { + String tmp = firstNonNull(md5, Data.nullOf(String.class)); + if (!Objects.equals(this.md5, tmp)) { + modifiedFields.add(BlobField.MD5HASH); + } + this.md5 = tmp; + return this; + } + + @Override + public Builder setMd5FromHexString(String md5HexString) { + if (md5HexString == null) { + return this; + } + if (md5HexString.length() % 2 != 0) { + throw new IllegalArgumentException( + "each byte must be represented by 2 valid hexadecimal characters"); + } + String md5HexStringLower = md5HexString.toLowerCase(); + ByteBuffer md5ByteBuffer = ByteBuffer.allocate(md5HexStringLower.length() / 2); + for (int charIndex = 0; charIndex < md5HexStringLower.length(); charIndex += 2) { + int higherOrderBits = this.hexDecimalValues.indexOf(md5HexStringLower.charAt(charIndex)); + int lowerOrderBits = this.hexDecimalValues.indexOf(md5HexStringLower.charAt(charIndex + 1)); + if (higherOrderBits == -1 || lowerOrderBits == -1) { + throw new IllegalArgumentException( + "each byte must be represented by 2 valid hexadecimal characters"); + } + md5ByteBuffer.put((byte) (higherOrderBits << 4 | lowerOrderBits)); + } + return setMd5(BaseEncoding.base64().encode(md5ByteBuffer.array())); + } + + @Override + public Builder setCrc32c(String crc32c) { + String tmp = firstNonNull(crc32c, Data.nullOf(String.class)); + if (!Objects.equals(this.crc32c, tmp)) { + modifiedFields.add(BlobField.CRC32C); + } + this.crc32c = tmp; + return this; + } + + /** + * @deprecated {@link #setCustomTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + public Builder setCustomTime(Long customTime) { + return setCustomTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(customTime)); + } + + @Override + public Builder setCustomTimeOffsetDateTime(OffsetDateTime customTime) { + if (!Objects.equals(this.customTime, customTime)) { + modifiedFields.add(BlobField.CUSTOM_TIME); + } + this.customTime = customTime; + return this; + } + + @Override + public Builder setCrc32cFromHexString(String crc32cHexString) { + if (crc32cHexString == null) { + return this; + } + if (crc32cHexString.length() % 2 != 0) { + throw new IllegalArgumentException( + "each byte must be represented by 2 valid hexadecimal characters"); + } + String crc32cHexStringLower = crc32cHexString.toLowerCase(); + ByteBuffer crc32cByteBuffer = ByteBuffer.allocate(crc32cHexStringLower.length() / 2); + for (int charIndex = 0; charIndex < crc32cHexStringLower.length(); charIndex += 2) { + int higherOrderBits = this.hexDecimalValues.indexOf(crc32cHexStringLower.charAt(charIndex)); + int lowerOrderBits = + this.hexDecimalValues.indexOf(crc32cHexStringLower.charAt(charIndex + 1)); + if (higherOrderBits == -1 || lowerOrderBits == -1) { + throw new IllegalArgumentException( + "each byte must be represented by 2 valid hexadecimal characters"); + } + crc32cByteBuffer.put((byte) (higherOrderBits << 4 | lowerOrderBits)); + } + return setCrc32c(BaseEncoding.base64().encode(crc32cByteBuffer.array())); + } + + @Override + Builder setMediaLink(String mediaLink) { + this.mediaLink = mediaLink; + return this; + } + + @SuppressWarnings({"UnnecessaryLocalVariable"}) + @Override + public Builder setMetadata(@Nullable Map<@NonNull String, @Nullable String> metadata) { + Map left = this.metadata; + Map right = metadata; + if (!Objects.equals(left, right)) { + diffMaps(BlobField.METADATA, left, right, modifiedFields::add); + if (right != null) { + this.metadata = new HashMap<>(right); + } else { + this.metadata = (Map) Data.nullOf(ImmutableEmptyMap.class); + } + } + return this; + } + + @Override + public Builder setStorageClass(StorageClass storageClass) { + if (!Objects.equals(this.storageClass, storageClass)) { + modifiedFields.add(BlobField.STORAGE_CLASS); + } + this.storageClass = storageClass; + return this; + } + + /** + * @deprecated Use {@link #setTimeStorageClassUpdatedOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + @Override + public Builder setTimeStorageClassUpdated(Long timeStorageClassUpdated) { + return setTimeStorageClassUpdatedOffsetDateTime( + millisOffsetDateTimeCodec.encode(timeStorageClassUpdated)); + } + + @Override + public Builder setTimeStorageClassUpdatedOffsetDateTime( + OffsetDateTime timeStorageClassUpdated) { + if (!Objects.equals(this.timeStorageClassUpdated, timeStorageClassUpdated)) { + modifiedFields.add(BlobField.TIME_STORAGE_CLASS_UPDATED); + } + this.timeStorageClassUpdated = timeStorageClassUpdated; + return this; + } + + @Override + Builder setMetageneration(Long metageneration) { + this.metageneration = metageneration; + return this; + } + + /** + * @deprecated Use {@link #setDeleteTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + @Override + Builder setDeleteTime(Long deleteTime) { + return setDeleteTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(deleteTime)); + } + + @Override + Builder setDeleteTimeOffsetDateTime(OffsetDateTime deleteTime) { + if (!Objects.equals(this.deleteTime, deleteTime)) { + modifiedFields.add(BlobField.TIME_DELETED); + } + this.deleteTime = deleteTime; + return this; + } + + /** + * @deprecated Use {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + Builder setUpdateTime(Long updateTime) { + return setUpdateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(updateTime)); + } + + @Override + Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + if (!Objects.equals(this.updateTime, updateTime)) { + modifiedFields.add(BlobField.UPDATED); + } + this.updateTime = updateTime; + return this; + } + + /** + * @deprecated Use {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + @Override + Builder setCreateTime(Long createTime) { + return setCreateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(createTime)); + } + + @Override + Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + if (!Objects.equals(this.createTime, createTime)) { + modifiedFields.add(BlobField.TIME_CREATED); + } + this.createTime = createTime; + return this; + } + + @Override + Builder setIsDirectory(boolean isDirectory) { + this.isDirectory = isDirectory; + return this; + } + + @Override + Builder setCustomerEncryption(CustomerEncryption customerEncryption) { + if (!Objects.equals(this.customerEncryption, customerEncryption)) { + modifiedFields.add(BlobField.CUSTOMER_ENCRYPTION); + } + this.customerEncryption = customerEncryption; + return this; + } + + @Override + Builder setKmsKeyName(String kmsKeyName) { + if (!Objects.equals(this.kmsKeyName, kmsKeyName)) { + modifiedFields.add(BlobField.KMS_KEY_NAME); + } + this.kmsKeyName = kmsKeyName; + return this; + } + + @Override + public Builder setEventBasedHold(Boolean eventBasedHold) { + if (!Objects.equals(this.eventBasedHold, eventBasedHold)) { + modifiedFields.add(BlobField.EVENT_BASED_HOLD); + } + this.eventBasedHold = eventBasedHold; + return this; + } + + @Override + public Builder setTemporaryHold(Boolean temporaryHold) { + if (!Objects.equals(this.temporaryHold, temporaryHold)) { + modifiedFields.add(BlobField.TEMPORARY_HOLD); + } + this.temporaryHold = temporaryHold; + return this; + } + + /** + * @deprecated {@link #setRetentionExpirationTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setRetentionExpirationTime(Long retentionExpirationTime) { + return setRetentionExpirationTimeOffsetDateTime( + millisOffsetDateTimeCodec.encode(retentionExpirationTime)); + } + + @Override + Builder setRetentionExpirationTimeOffsetDateTime(OffsetDateTime retentionExpirationTime) { + if (!Objects.equals(this.retentionExpirationTime, retentionExpirationTime)) { + modifiedFields.add(BlobField.RETENTION_EXPIRATION_TIME); + } + this.retentionExpirationTime = retentionExpirationTime; + return this; + } + + @Override + Builder setSoftDeleteTime(OffsetDateTime softDeleteTime) { + if (!Objects.equals(this.softDeleteTime, softDeleteTime)) { + modifiedFields.add(BlobField.SOFT_DELETE_TIME); + } + this.softDeleteTime = softDeleteTime; + return this; + } + + @Override + Builder setHardDeleteTime(OffsetDateTime hardDeleteTime) { + if (!Objects.equals(this.hardDeleteTime, hardDeleteTime)) { + modifiedFields.add(BlobField.HARD_DELETE_TIME); + } + this.hardDeleteTime = hardDeleteTime; + return this; + } + + @Override + public Builder setRetention(Retention retention) { + // todo: b/308194853 + modifiedFields.add(BlobField.RETENTION); + this.retention = retention; + return this; + } + + @Override + public Builder setContexts(ObjectContexts contexts) { + // Maps.difference uses object equality to determine if a value is the same. We don't care + // about the timestamps when determining if a value needs to be patched. Create a new map + // where we remove the timestamps so equals is usable. + Map left = + this.contexts == null ? null : this.contexts.getCustom(); + Map right = + contexts == null ? null : contexts.getCustom(); + if (!Objects.equals(left, right)) { + if (right != null) { + diffMaps( + NamedField.nested(BlobField.OBJECT_CONTEXTS, NamedField.literal("custom")), + left, + right, + modifiedFields::add); + this.contexts = contexts; + } else { + modifiedFields.add(BlobField.OBJECT_CONTEXTS); + this.contexts = null; + } + } + return this; + } + + @Override + public BlobInfo build() { + checkNotNull(blobId); + return new BlobInfo(this); + } + + @Override + BlobId getBlobId() { + return blobId; + } + + @Override + Builder clearBlobId() { + this.blobId = null; + return this; + } + + @Override + Builder clearGeneratedId() { + this.generatedId = null; + return this; + } + + @Override + Builder clearContentType() { + this.contentType = null; + return this; + } + + @Override + Builder clearContentEncoding() { + this.contentEncoding = null; + return this; + } + + @Override + Builder clearContentDisposition() { + this.contentDisposition = null; + return this; + } + + @Override + Builder clearContentLanguage() { + this.contentLanguage = null; + return this; + } + + @Override + Builder clearComponentCount() { + this.componentCount = null; + return this; + } + + @Override + Builder clearCacheControl() { + this.cacheControl = null; + return this; + } + + @Override + Builder clearAcl() { + this.acl = null; + return this; + } + + @Override + Builder clearOwner() { + this.owner = null; + return this; + } + + @Override + Builder clearSize() { + this.size = null; + return this; + } + + @Override + Builder clearEtag() { + this.etag = null; + return this; + } + + @Override + Builder clearSelfLink() { + this.selfLink = null; + return this; + } + + @Override + Builder clearMd5() { + this.md5 = null; + return this; + } + + @Override + Builder clearCrc32c() { + this.crc32c = null; + return this; + } + + @Override + Builder clearCustomTime() { + this.customTime = null; + return this; + } + + @Override + Builder clearMediaLink() { + this.mediaLink = null; + return this; + } + + @Override + Builder clearMetadata() { + this.metadata = null; + return this; + } + + @Override + Builder clearMetageneration() { + this.metageneration = null; + return this; + } + + @Override + Builder clearDeleteTime() { + this.deleteTime = null; + return this; + } + + @Override + Builder clearUpdateTime() { + this.updateTime = null; + return this; + } + + @Override + Builder clearCreateTime() { + this.createTime = null; + return this; + } + + @Override + Builder clearIsDirectory() { + this.isDirectory = null; + return this; + } + + @Override + Builder clearCustomerEncryption() { + this.customerEncryption = null; + return this; + } + + @Override + Builder clearStorageClass() { + this.storageClass = null; + return this; + } + + @Override + Builder clearTimeStorageClassUpdated() { + this.timeStorageClassUpdated = null; + return this; + } + + @Override + Builder clearKmsKeyName() { + this.kmsKeyName = null; + return this; + } + + @Override + Builder clearEventBasedHold() { + this.eventBasedHold = null; + return this; + } + + @Override + Builder clearTemporaryHold() { + this.temporaryHold = null; + return this; + } + + @Override + Builder clearRetentionExpirationTime() { + this.retentionExpirationTime = null; + return this; + } + + @Override + Builder clearContexts() { + this.contexts = null; + return this; + } + } + + BlobInfo(BuilderImpl builder) { + blobId = builder.blobId; + generatedId = builder.generatedId; + cacheControl = builder.cacheControl; + contentEncoding = builder.contentEncoding; + contentType = builder.contentType; + contentDisposition = builder.contentDisposition; + contentLanguage = builder.contentLanguage; + componentCount = builder.componentCount; + customerEncryption = builder.customerEncryption; + acl = builder.acl; + owner = builder.owner; + size = builder.size; + etag = builder.etag; + selfLink = builder.selfLink; + md5 = builder.md5; + crc32c = builder.crc32c; + customTime = builder.customTime; + mediaLink = builder.mediaLink; + metadata = builder.metadata; + metageneration = builder.metageneration; + deleteTime = builder.deleteTime; + updateTime = builder.updateTime; + createTime = builder.createTime; + isDirectory = firstNonNull(builder.isDirectory, Boolean.FALSE); + storageClass = builder.storageClass; + timeStorageClassUpdated = builder.timeStorageClassUpdated; + kmsKeyName = builder.kmsKeyName; + eventBasedHold = builder.eventBasedHold; + temporaryHold = builder.temporaryHold; + retentionExpirationTime = builder.retentionExpirationTime; + retention = builder.retention; + softDeleteTime = builder.softDeleteTime; + hardDeleteTime = builder.hardDeleteTime; + contexts = builder.contexts; + modifiedFields = builder.modifiedFields.build(); + } + + /** Returns the blob's identity. */ + public BlobId getBlobId() { + return blobId; + } + + /** Returns the name of the containing bucket. */ + public String getBucket() { + return getBlobId().getBucket(); + } + + /** Returns the service-generated for the blob. */ + public String getGeneratedId() { + return generatedId; + } + + /** Returns the blob's name. */ + public String getName() { + return getBlobId().getName(); + } + + /** + * Returns the blob's data cache control. + * + * @see Cache-Control + */ + public String getCacheControl() { + return Data.isNull(cacheControl) ? null : cacheControl; + } + + /** + * Returns the blob's access control configuration. + * + * @see + * About Access Control Lists + */ + public List getAcl() { + return acl; + } + + /** Returns the blob's owner. This will always be the uploader of the blob. */ + public Acl.Entity getOwner() { + return owner; + } + + /** + * Returns the content length of the data in bytes. + * + * @see Content-Length + */ + public Long getSize() { + return size; + } + + /** + * Returns the blob's data content type. + * + * @see Content-Type + */ + public String getContentType() { + return Data.isNull(contentType) ? null : contentType; + } + + /** + * Returns the blob's data content encoding. + * + * @see Content-Encoding + */ + public String getContentEncoding() { + return Data.isNull(contentEncoding) ? null : contentEncoding; + } + + /** + * Returns the blob's data content disposition. + * + * @see Content-Disposition + */ + public String getContentDisposition() { + return Data.isNull(contentDisposition) ? null : contentDisposition; + } + + /** + * Returns the blob's data content language. + * + * @see Content-Language + */ + public String getContentLanguage() { + return Data.isNull(contentLanguage) ? null : contentLanguage; + } + + /** + * Returns the number of components that make up this blob. Components are accumulated through the + * {@link Storage#compose(Storage.ComposeRequest)} operation and are limited to a count of 1024, + * counting 1 for each non-composite component blob and componentCount for each composite + * component blob. This value is set only for composite blobs. + * + * @see Component Count + * Property + */ + public Integer getComponentCount() { + return componentCount; + } + + /** + * Returns HTTP 1.1 Entity tag for the blob. + * + * @see Entity Tags + */ + public String getEtag() { + return etag; + } + + /** Returns the URI of this blob as a string. */ + public String getSelfLink() { + return selfLink; + } + + /** + * Returns the MD5 hash of blob's data encoded in base64. + * + * @see Hashes and ETags: + * Best Practices + */ + public String getMd5() { + return Data.isNull(md5) ? null : md5; + } + + /** + * Returns the MD5 hash of blob's data decoded to string. + * + * @see Hashes and ETags: + * Best Practices + */ + public String getMd5ToHexString() { + if (md5 == null) { + return null; + } + byte[] decodedMd5 = BaseEncoding.base64().decode(md5); + StringBuilder stringBuilder = new StringBuilder(); + for (byte b : decodedMd5) { + stringBuilder.append(String.format(Locale.US, "%02x", b & 0xff)); + } + return stringBuilder.toString(); + } + + /** + * Returns the CRC32C checksum of blob's data as described in RFC 4960, Appendix B; encoded in + * base64 in big-endian order. + * + * @see Hashes and ETags: + * Best Practices + */ + public String getCrc32c() { + return Data.isNull(crc32c) ? null : crc32c; + } + + /** + * Returns the CRC32C checksum of blob's data as described in RFC 4960, Appendix B; decoded to + * string. + * + * @see Hashes and ETags: + * Best Practices + */ + public String getCrc32cToHexString() { + if (crc32c == null) { + return null; + } + byte[] decodeCrc32c = BaseEncoding.base64().decode(crc32c); + StringBuilder stringBuilder = new StringBuilder(); + for (byte b : decodeCrc32c) { + stringBuilder.append(String.format(Locale.US, "%02x", b & 0xff)); + } + return stringBuilder.toString(); + } + + /** Returns the blob's media download link. */ + public String getMediaLink() { + return mediaLink; + } + + /** Returns blob's user provided metadata. */ + @Nullable + public Map<@NonNull String, @Nullable String> getMetadata() { + return metadata == null || Data.isNull(metadata) ? null : Collections.unmodifiableMap(metadata); + } + + /** Returns blob's data generation. Used for blob versioning. */ + public Long getGeneration() { + return getBlobId().getGeneration(); + } + + /** + * Returns blob's metageneration. Used for preconditions and for detecting changes in metadata. A + * metageneration number is only meaningful in the context of a particular generation of a + * particular blob. + */ + public Long getMetageneration() { + return metageneration; + } + + /** + * Returns the deletion time of the blob expressed as the number of milliseconds since the Unix + * epoch. + * + * @deprecated Use {@link #getDeleteTimeOffsetDateTime()} + */ + @Deprecated + public Long getDeleteTime() { + return millisOffsetDateTimeCodec.decode(deleteTime); + } + + /** Returns the deletion time of the blob. */ + public OffsetDateTime getDeleteTimeOffsetDateTime() { + return deleteTime; + } + + /** + * Returns the last modification time of the blob's metadata expressed as the number of + * milliseconds since the Unix epoch. + * + * @deprecated Use {@link #getUpdateTimeOffsetDateTime()} + */ + @Deprecated + public Long getUpdateTime() { + return millisOffsetDateTimeCodec.decode(updateTime); + } + + /** Returns the last modification time of the blob's metadata. */ + public OffsetDateTime getUpdateTimeOffsetDateTime() { + return updateTime; + } + + /** + * Returns the creation time of the blob expressed as the number of milliseconds since the Unix + * epoch. + * + * @deprecated Use {@link #getCreateTimeOffsetDateTime()} + */ + @Deprecated + public Long getCreateTime() { + return millisOffsetDateTimeCodec.decode(createTime); + } + + /** Returns the creation time of the blob. */ + public OffsetDateTime getCreateTimeOffsetDateTime() { + return createTime; + } + + /** + * Returns the custom time specified by the user for an object. + * + * @deprecated Use {@link #getCustomTimeOffsetDateTime()} + */ + @Deprecated + public Long getCustomTime() { + return millisOffsetDateTimeCodec.decode(customTime); + } + + /** Returns the custom time specified by the user for an object. */ + public OffsetDateTime getCustomTimeOffsetDateTime() { + return customTime; + } + + /** + * Returns {@code true} if the current blob represents a directory. This can only happen if the + * blob is returned by {@link Storage#list(String, Storage.BlobListOption...)} when the {@link + * Storage.BlobListOption#currentDirectory()} option is used. When this is the case only {@link + * #getBlobId()} and {@link #getSize()} are set for the current blob: {@link BlobId#getName()} + * ends with the '/' character, {@link BlobId#getGeneration()} returns {@code null} and {@link + * #getSize()} is {@code 0}. + */ + public boolean isDirectory() { + return isDirectory; + } + + /** + * Returns information on the customer-supplied encryption key, if the blob is encrypted using + * such a key. + */ + public CustomerEncryption getCustomerEncryption() { + return customerEncryption; + } + + /** Returns the storage class of the blob. */ + public StorageClass getStorageClass() { + return storageClass; + } + + /** + * Returns the time that the object's storage class was last changed or the time of the object + * creation. + * + * @deprecated Use {@link #getTimeStorageClassUpdatedOffsetDateTime()} + */ + @Deprecated + public Long getTimeStorageClassUpdated() { + return millisOffsetDateTimeCodec.decode(timeStorageClassUpdated); + } + + /** + * Returns the time that the object's storage class was last changed or the time of the object + * creation. + */ + public OffsetDateTime getTimeStorageClassUpdatedOffsetDateTime() { + return timeStorageClassUpdated; + } + + /** Returns the Cloud KMS key used to encrypt the blob, if any. */ + public String getKmsKeyName() { + return kmsKeyName; + } + + /** + * Returns a {@code Boolean} with either {@code true}, {@code null} and in certain cases {@code + * false}. + * + *

Case 1: {@code true} the field {@link + * com.google.cloud.storage.Storage.BlobField#EVENT_BASED_HOLD} is selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)} and event-based hold for the blob is enabled. + * + *

Case 2.1: {@code null} the field {@link + * com.google.cloud.storage.Storage.BlobField#EVENT_BASED_HOLD} is selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)}, but event-based hold for the blob is not + * enabled. This case can be considered implicitly {@code false}. + * + *

Case 2.2: {@code null} the field {@link + * com.google.cloud.storage.Storage.BlobField#EVENT_BASED_HOLD} is not selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)}, and the state for this field is unknown. + * + *

Case 3: {@code false} event-based hold is explicitly set to false using in a {@link + * Builder#setEventBasedHold(Boolean)} client side for a follow-up request e.g. {@link + * Storage#update(BlobInfo, Storage.BlobTargetOption...)} in which case the value of event-based + * hold will remain {@code false} for the given instance. + */ + @BetaApi + public Boolean getEventBasedHold() { + return Data.isNull(eventBasedHold) ? null : eventBasedHold; + } + + /** + * Returns a {@code Boolean} with either {@code true}, {@code null} and in certain cases {@code + * false}. + * + *

Case 1: {@code true} the field {@link + * com.google.cloud.storage.Storage.BlobField#TEMPORARY_HOLD} is selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)} and temporary hold for the blob is enabled. + * + *

Case 2.1: {@code null} the field {@link + * com.google.cloud.storage.Storage.BlobField#TEMPORARY_HOLD} is selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)}, but temporary hold for the blob is not enabled. + * This case can be considered implicitly {@code false}. + * + *

Case 2.2: {@code null} the field {@link + * com.google.cloud.storage.Storage.BlobField#TEMPORARY_HOLD} is not selected in a {@link + * Storage#get(BlobId, Storage.BlobGetOption...)}, and the state for this field is unknown. + * + *

Case 3: {@code false} event-based hold is explicitly set to false using in a {@link + * Builder#setEventBasedHold(Boolean)} client side for a follow-up request e.g. {@link + * Storage#update(BlobInfo, Storage.BlobTargetOption...)} in which case the value of temporary + * hold will remain {@code false} for the given instance. + */ + @BetaApi + public Boolean getTemporaryHold() { + return Data.isNull(temporaryHold) ? null : temporaryHold; + } + + /** + * Returns the retention expiration time of the blob as {@code Long}, if a retention period is + * defined. If retention period is not defined this value returns {@code null} + * + * @deprecated Use {@link #getRetentionExpirationTimeOffsetDateTime()} + */ + @BetaApi + @Deprecated + public Long getRetentionExpirationTime() { + return Data.isNull(retentionExpirationTime) + ? null + : millisOffsetDateTimeCodec.decode(retentionExpirationTime); + } + + /** + * Returns the retention expiration time of the blob, if a retention period is defined. If + * retention period is not defined this value returns {@code null} + */ + @BetaApi + public OffsetDateTime getRetentionExpirationTimeOffsetDateTime() { + return retentionExpirationTime; + } + + /** If this object has been soft-deleted, returns the time it was soft-deleted. */ + public OffsetDateTime getSoftDeleteTime() { + return softDeleteTime; + } + + /** + * If this object has been soft-deleted, returns the time at which it will be permanently deleted. + */ + public OffsetDateTime getHardDeleteTime() { + return hardDeleteTime; + } + + /** Returns the object's Retention policy. */ + public Retention getRetention() { + return retention; + } + + public ObjectContexts getContexts() { + return contexts; + } + + /** Returns a builder for the current blob. */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", getBucket()) + .add("name", getName()) + .add("generation", getGeneration()) + .add("size", getSize()) + .add("content-type", getContentType()) + .add("metadata", getMetadata()) + .add("contexts", getContexts()) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash( + blobId, + generatedId, + selfLink, + cacheControl, + acl, + owner, + size, + etag, + md5, + crc32c, + customTime, + mediaLink, + metadata, + metageneration, + deleteTime, + updateTime, + createTime, + contentType, + contentEncoding, + contentDisposition, + contentLanguage, + storageClass, + timeStorageClassUpdated, + componentCount, + isDirectory, + customerEncryption, + kmsKeyName, + eventBasedHold, + temporaryHold, + retention, + retentionExpirationTime, + softDeleteTime, + hardDeleteTime, + contexts); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobInfo)) { + return false; + } + BlobInfo blobInfo = (BlobInfo) o; + return isDirectory == blobInfo.isDirectory + && Objects.equals(blobId, blobInfo.blobId) + && Objects.equals(generatedId, blobInfo.generatedId) + && Objects.equals(selfLink, blobInfo.selfLink) + && Objects.equals(cacheControl, blobInfo.cacheControl) + && Objects.equals(acl, blobInfo.acl) + && Objects.equals(owner, blobInfo.owner) + && Objects.equals(size, blobInfo.size) + && Objects.equals(etag, blobInfo.etag) + && Objects.equals(md5, blobInfo.md5) + && Objects.equals(crc32c, blobInfo.crc32c) + && Objects.equals(customTime, blobInfo.customTime) + && Objects.equals(mediaLink, blobInfo.mediaLink) + && Objects.equals(metadata, blobInfo.metadata) + && Objects.equals(metageneration, blobInfo.metageneration) + && Objects.equals(deleteTime, blobInfo.deleteTime) + && Objects.equals(updateTime, blobInfo.updateTime) + && Objects.equals(createTime, blobInfo.createTime) + && Objects.equals(contentType, blobInfo.contentType) + && Objects.equals(contentEncoding, blobInfo.contentEncoding) + && Objects.equals(contentDisposition, blobInfo.contentDisposition) + && Objects.equals(contentLanguage, blobInfo.contentLanguage) + && Objects.equals(storageClass, blobInfo.storageClass) + && Objects.equals(timeStorageClassUpdated, blobInfo.timeStorageClassUpdated) + && Objects.equals(componentCount, blobInfo.componentCount) + && Objects.equals(customerEncryption, blobInfo.customerEncryption) + && Objects.equals(kmsKeyName, blobInfo.kmsKeyName) + && Objects.equals(eventBasedHold, blobInfo.eventBasedHold) + && Objects.equals(temporaryHold, blobInfo.temporaryHold) + && Objects.equals(retentionExpirationTime, blobInfo.retentionExpirationTime) + && Objects.equals(retention, blobInfo.retention) + && Objects.equals(softDeleteTime, blobInfo.softDeleteTime) + && Objects.equals(hardDeleteTime, blobInfo.hardDeleteTime) + && Objects.equals(contexts, blobInfo.contexts); + } + + ImmutableSet getModifiedFields() { + return modifiedFields; + } + + /** + * Attach this instance to an instance of {@link Storage} thereby allowing RPCs to be performed + * using the methods from the resulting {@link Blob} + */ + Blob asBlob(Storage storage) { + return new Blob(storage, new BuilderImpl(this)); + } + + /** Returns a {@code BlobInfo} builder where blob identity is set using the provided values. */ + public static Builder newBuilder(BucketInfo bucketInfo, String name) { + return newBuilder(bucketInfo.getName(), name); + } + + /** Returns a {@code BlobInfo} builder where blob identity is set using the provided values. */ + public static Builder newBuilder(String bucket, String name) { + return newBuilder(BlobId.of(bucket, name)); + } + + /** Returns a {@code BlobInfo} builder where blob identity is set using the provided values. */ + public static Builder newBuilder(BucketInfo bucketInfo, String name, Long generation) { + return newBuilder(bucketInfo.getName(), name, generation); + } + + /** Returns a {@code BlobInfo} builder where blob identity is set using the provided values. */ + public static Builder newBuilder(String bucket, String name, Long generation) { + return newBuilder(BlobId.of(bucket, name, generation)); + } + + /** Returns a {@code BlobInfo} builder where blob identity is set using the provided value. */ + public static Builder newBuilder(BlobId blobId) { + return new BuilderImpl(blobId); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java new file mode 100644 index 000000000000..d54c79ce1422 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java @@ -0,0 +1,166 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.Objects; + +/** + * Hierarchy retained for {@link RestorableState#restore()}. Will be removed in next major version! + */ +@Deprecated +class BlobReadChannel implements ReadChannel { + + private BlobReadChannel() {} + + @Override + public RestorableState capture() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public boolean isOpen() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public void close() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public void seek(long position) throws IOException { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public void setChunkSize(int chunkSize) { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public int read(ByteBuffer byteBuffer) throws IOException { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public ReadChannel limit(long limit) { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public long limit() { + throw new IllegalStateException("Illegal method access"); + } + + /** Retained for binary compatibility. Will be removed at next major version! */ + @SuppressWarnings("unused") + @Deprecated + @VisibleForTesting + static class StateImpl implements RestorableState, Serializable { + + private static final long serialVersionUID = 7784852608213694645L; + + // the following fields are dangling as they are only set via object deserialization, and only + // read in #restore() + private HttpStorageOptions serviceOptions; + private BlobId blob; + private Map requestOptions; + private String lastEtag; + private long position; + private boolean isOpen; + private boolean endOfStream; + private int chunkSize; + private long limit; + + private StateImpl() {} + + @Override + public ReadChannel restore() { + StorageObject encode = Conversions.json().blobId().encode(blob); + BlobReadChannelV2 channel = + new BlobReadChannelV2( + encode, requestOptions, BlobReadChannelContext.from(serviceOptions)); + try { + channel.seek(position); + channel.limit(limit); + channel.setChunkSize(chunkSize); + if (!isOpen) { + channel.close(); + } + } catch (IOException e) { + throw StorageException.coalesce(e); + } + return channel; + } + + @Override + public int hashCode() { + return Objects.hash( + serviceOptions, + blob, + requestOptions, + lastEtag, + position, + isOpen, + endOfStream, + chunkSize, + limit); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof StateImpl)) { + return false; + } + final StateImpl other = (StateImpl) obj; + return Objects.equals(this.serviceOptions, other.serviceOptions) + && Objects.equals(this.blob, other.blob) + && Objects.equals(this.requestOptions, other.requestOptions) + && Objects.equals(this.lastEtag, other.lastEtag) + && this.position == other.position + && this.isOpen == other.isOpen + && this.endOfStream == other.endOfStream + && this.chunkSize == other.chunkSize + && this.limit == other.limit; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("blob", blob) + .add("position", position) + .add("isOpen", isOpen) + .add("endOfStream", endOfStream) + .add("limit", limit) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java new file mode 100644 index 000000000000..8df640b7490a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java @@ -0,0 +1,244 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.storage.ApiaryUnbufferedReadableByteChannel.ApiaryReadRequest; +import com.google.cloud.storage.HttpDownloadSessionBuilder.ReadableByteChannelSessionBuilder; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.MoreObjects; +import java.io.Serializable; +import java.util.Map; +import java.util.Objects; + +final class BlobReadChannelV2 extends BaseStorageReadChannel { + + private final StorageObject storageObject; + private final Map opts; + private final BlobReadChannelContext blobReadChannelContext; + private final boolean autoGzipDecompression; + + BlobReadChannelV2( + StorageObject storageObject, + Map opts, + BlobReadChannelContext blobReadChannelContext) { + super(Conversions.json().blobInfo()); + this.storageObject = storageObject; + this.opts = opts; + this.blobReadChannelContext = blobReadChannelContext; + this.autoGzipDecompression = + // RETURN_RAW_INPUT_STREAM means do not add GZIPInputStream to the pipeline. Meaning, if + // RETURN_RAW_INPUT_STREAM is false, automatically attempt to decompress if Content-Encoding + // gzip. + Boolean.FALSE.equals(opts.get(StorageRpc.Option.RETURN_RAW_INPUT_STREAM)); + } + + @Override + public RestorableState capture() { + lock.lock(); + try { + ApiaryReadRequest apiaryReadRequest = getApiaryReadRequest(); + return new BlobReadChannelV2State( + apiaryReadRequest, blobReadChannelContext.getStorageOptions(), getChunkSize()); + } finally { + lock.unlock(); + } + } + + protected LazyReadChannel newLazyReadChannel() { + return new LazyReadChannel<>( + () -> { + ReadableByteChannelSessionBuilder b = + ResumableMedia.http() + .read() + .byteChannel(blobReadChannelContext) + .setAutoGzipDecompression(autoGzipDecompression); + BufferHandle bufferHandle = getBufferHandle(); + // because we're erasing the specific type of channel, we need to declare it here. + // If we don't, the compiler complains we're not returning a compliant type. + ReadableByteChannelSession session; + if (bufferHandle.capacity() > 0) { + session = b.buffered(bufferHandle).setApiaryReadRequest(getApiaryReadRequest()).build(); + } else { + session = b.unbuffered().setApiaryReadRequest(getApiaryReadRequest()).build(); + } + return session; + }); + } + + private ApiaryReadRequest getApiaryReadRequest() { + StorageObject object = getResolvedObject() != null ? getResolvedObject() : storageObject; + return new ApiaryReadRequest(object, opts, getByteRangeSpec()); + } + + static final class BlobReadChannelV2State implements RestorableState, Serializable { + + private static final long serialVersionUID = -7595661593080505431L; + + private final ApiaryReadRequest request; + private final HttpStorageOptions options; + + private final Integer chunkSize; + + private BlobReadChannelV2State( + ApiaryReadRequest request, HttpStorageOptions options, Integer chunkSize) { + this.request = request; + this.options = options; + this.chunkSize = chunkSize; + } + + @Override + public ReadChannel restore() { + BlobReadChannelV2 channel = + new BlobReadChannelV2( + request.getObject(), request.getOptions(), BlobReadChannelContext.from(options)); + channel.setByteRangeSpec(request.getByteRangeSpec()); + if (chunkSize != null) { + channel.setChunkSize(chunkSize); + } + return channel; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobReadChannelV2State)) { + return false; + } + BlobReadChannelV2State that = (BlobReadChannelV2State) o; + return Objects.equals(request, that.request) + && Objects.equals(options, that.options) + && Objects.equals(chunkSize, that.chunkSize); + } + + @Override + public int hashCode() { + return Objects.hash(request, options, chunkSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("request", request) + .add("options", options) + .add("chunkSize", chunkSize) + .toString(); + } + } + + static final class BlobReadChannelContext { + private final HttpStorageOptions storageOptions; + private final HttpRetryAlgorithmManager retryAlgorithmManager; + private final HttpClientContext httpClientContext; + private final Storage apiaryClient; + private final Retrier retrier; + + private BlobReadChannelContext( + HttpStorageOptions storageOptions, + HttpRetryAlgorithmManager retryAlgorithmManager, + HttpClientContext httpClientContext, + Storage apiaryClient, + Retrier retrier) { + this.storageOptions = storageOptions; + this.retryAlgorithmManager = retryAlgorithmManager; + this.httpClientContext = httpClientContext; + this.apiaryClient = apiaryClient; + this.retrier = retrier; + } + + public HttpStorageOptions getStorageOptions() { + return storageOptions; + } + + public HttpRetryAlgorithmManager getRetryAlgorithmManager() { + return retryAlgorithmManager; + } + + public HttpClientContext getHttpClientContext() { + return httpClientContext; + } + + public Storage getApiaryClient() { + return apiaryClient; + } + + public Retrier getRetrier() { + return retrier; + } + + /** + * This method is pretty unsafe, but so is all of the Capture/Restore API, and it leaks its + * sludge all over everything. In general, prefer {@link + * #from(com.google.cloud.storage.StorageImpl)} over this method. + * + *

Essentially, cause options to instantiate a StorageImpl if it hasn't done so already, then + * root around to try and find its retrier. + */ + static BlobReadChannelContext from(HttpStorageOptions options) { + com.google.cloud.storage.Storage storage = options.getService(); + if (storage instanceof OtelStorageDecorator) { + OtelStorageDecorator decorator = (OtelStorageDecorator) storage; + storage = decorator.delegate; + } + if (storage instanceof StorageImpl) { + StorageImpl impl = (StorageImpl) storage; + return from(impl); + } + throw new IllegalArgumentException( + "Unable to restore context from provided options instance"); + } + + static BlobReadChannelContext from(com.google.cloud.storage.StorageImpl s) { + HttpStorageOptions options = s.getOptions(); + return new BlobReadChannelContext( + options, + options.getRetryAlgorithmManager(), + HttpClientContext.from(options.getStorageRpcV1()), + options.getStorageRpcV1().getStorage(), + s.retrier); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobReadChannelContext)) { + return false; + } + BlobReadChannelContext that = (BlobReadChannelContext) o; + return Objects.equals(storageOptions, that.storageOptions) + && Objects.equals(retryAlgorithmManager, that.retryAlgorithmManager); + } + + @Override + public int hashCode() { + return Objects.hash(storageOptions, retryAlgorithmManager); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("storageOptions", storageOptions).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSession.java new file mode 100644 index 000000000000..eb424fcde05e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSession.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import java.io.Closeable; +import java.io.IOException; + +/** + * A session for reading bytes from a Blob + * + * @see Storage#blobReadSession(BlobId, BlobSourceOption...) + * @see ReadProjectionConfigs + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@InternalExtensionOnly +@TransportCompatibility({Transport.GRPC}) +public interface BlobReadSession extends AutoCloseable, Closeable { + + /** + * The resolved metadata for the object being read + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC}) + BlobInfo getBlobInfo(); + + /** + * Read from this session as a specific {@code Projection} as dictated by the provided {@code + * config} + * + * @see ReadProjectionConfig + * @see ReadProjectionConfigs + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC}) + Projection readAs(ReadProjectionConfig config); + + /** + * Close this session and any {@code Projection}s produced by {@link + * #readAs(ReadProjectionConfig)}. + * + *

If a projection is not fully consumed/resolved it will be transitioned to a failed state. + * + *

This method MUST be called to ensure cleanup of any inflight buffers, and to avoid a memory + * leak. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + @BetaApi + @TransportCompatibility({Transport.GRPC}) + void close() throws IOException; +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSessionAdapter.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSessionAdapter.java new file mode 100644 index 000000000000..1823a13283a1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadSessionAdapter.java @@ -0,0 +1,62 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; + +final class BlobReadSessionAdapter implements BlobReadSession { + + @VisibleForTesting final ObjectReadSession session; + + BlobReadSessionAdapter(ObjectReadSession session) { + this.session = session; + } + + @Override + public BlobInfo getBlobInfo() { + return Conversions.grpc().blobInfo().decode(session.getResource()); + } + + // ApiFutures type is erased, but that's okay. We're only decorating the errors. not changing + // the return type. + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public Projection readAs(ReadProjectionConfig config) { + Projection projection = session.readAs(config); + if (projection instanceof ApiFuture) { + ApiFuture apiFuture = (ApiFuture) projection; + return (Projection) StorageException.coalesceAsync(apiFuture); + } + return projection; + } + + @Override + public void close() throws IOException { + session.close(); + } + + static ApiFuture wrap(ApiFuture session) { + return ApiFutures.transform( + StorageException.coalesceAsync(session), + BlobReadSessionAdapter::new, + MoreExecutors.directExecutor()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannel.java new file mode 100644 index 000000000000..6494fc66a6d7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannel.java @@ -0,0 +1,95 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.BaseWriteChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; + +/** + * Hierarchy retained for {@link RestorableState#restore()}. Will be removed in next major version! + */ +@Deprecated +class BlobWriteChannel extends BaseWriteChannel + implements StorageWriteChannel { + + private BlobWriteChannel() { + super(null, null, null); + throw new IllegalStateException("Illegal method access"); + } + + @Override + public ApiFuture getObject() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + protected HttpStorageOptions getOptions() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + protected void flushBuffer(final int length, final boolean lastChunk) { + throw new IllegalStateException("Illegal method access"); + } + + protected StateImpl.Builder stateBuilder() { + throw new IllegalStateException("Illegal method access"); + } + + @Override + public RestorableState capture() { + throw new IllegalStateException("Illegal method access"); + } + + /** Retained for binary compatibility. Will be removed at next major version! */ + @SuppressWarnings("unused") + @Deprecated + @VisibleForTesting + static class StateImpl extends BaseWriteChannel.BaseState { + + private static final long serialVersionUID = -6700378962714601115L; + + private ResultRetryAlgorithm algorithmForWrite; + + private StateImpl(Builder builder) { + super(builder); + } + + @Override + public WriteChannel restore() { + try { + StorageObject encode = entity != null ? Conversions.json().blobInfo().encode(entity) : null; + return new BlobWriteChannelV2.BlobWriteChannelV2State( + (HttpStorageOptions) serviceOptions, + JsonResumableWrite.of(encode, ImmutableMap.of(), uploadId, position), + position, + isOpen, + chunkSize, + buffer) + .restore(); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannelV2.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannelV2.java new file mode 100644 index 000000000000..edda3ce245e7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteChannelV2.java @@ -0,0 +1,165 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFutures; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Objects; + +final class BlobWriteChannelV2 extends BaseStorageWriteChannel { + + private final BlobReadChannelContext blobChannelContext; + private final JsonResumableWrite start; + + BlobWriteChannelV2(BlobReadChannelContext blobChannelContext, JsonResumableWrite start) { + super(Conversions.json().blobInfo()); + this.start = start; + this.blobChannelContext = blobChannelContext; + } + + @Override + public RestorableState capture() { + lock.lock(); + try { + final byte[] bufferSnapshot; + BufferHandle handle = getBufferHandle(); + if (handle.position() > 0) { + ByteBuffer byteBuffer = handle.get(); + // duplicate so we don't actually modify the existing instance + ByteBuffer dup = byteBuffer.duplicate(); + dup.flip(); + int remaining = dup.remaining(); + bufferSnapshot = new byte[remaining]; + dup.get(bufferSnapshot); + } else { + bufferSnapshot = new byte[0]; + } + return new BlobWriteChannelV2State( + blobChannelContext.getStorageOptions(), + start, + getCommittedPosition(), + isOpen(), + getChunkSize(), + bufferSnapshot); + } finally { + lock.unlock(); + } + } + + @Override + protected LazyWriteChannel newLazyWriteChannel() { + return new LazyWriteChannel<>( + () -> + ResumableMedia.http() + .write() + .byteChannel(blobChannelContext.getHttpClientContext()) + .resumable() + .setCommittedBytesCallback(this::setCommittedPosition) + .withRetryConfig( + blobChannelContext + .getRetrier() + .withAlg(blobChannelContext.getRetryAlgorithmManager().idempotent())) + .setHasher(start.getHasher()) + .buffered(getBufferHandle()) + .setStartAsync(ApiFutures.immediateFuture(start)) + .build()); + } + + static final class BlobWriteChannelV2State + implements RestorableState, Serializable { + + private static final long serialVersionUID = -1901664719924133474L; + + private final HttpStorageOptions options; + private final JsonResumableWrite resumableWrite; + + private final Long position; + private final Boolean isOpen; + private final Integer chunkSize; + private final byte[] bufferSnapshot; + + BlobWriteChannelV2State( + HttpStorageOptions options, + JsonResumableWrite resumableWrite, + Long position, + Boolean isOpen, + Integer chunkSize, + byte[] bufferSnapshot) { + this.options = options; + this.resumableWrite = resumableWrite; + this.position = position; + this.isOpen = isOpen; + this.chunkSize = chunkSize; + this.bufferSnapshot = bufferSnapshot; + } + + @Override + public WriteChannel restore() { + JsonResumableWrite resumableWrite = this.resumableWrite; + if (position != null) { + resumableWrite = resumableWrite.withBeginOffset(position); + } + BlobWriteChannelV2 channel = + new BlobWriteChannelV2(BlobReadChannelContext.from(options), resumableWrite); + if (chunkSize != null) { + channel.setChunkSize(chunkSize); + } + if (bufferSnapshot != null && bufferSnapshot.length > 0) { + BufferHandle handle = channel.getBufferHandle(); + ByteBuffer byteBuffer = handle.get(); + byteBuffer.put(bufferSnapshot); + } + if (position != null) { + channel.setCommittedPosition(position); + } + if (isOpen != null) { + channel.setOpen(isOpen); + } + return channel; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobWriteChannelV2State)) { + return false; + } + BlobWriteChannelV2State that = (BlobWriteChannelV2State) o; + return Objects.equals(options, that.options) + && Objects.equals(resumableWrite, that.resumableWrite) + && Objects.equals(position, that.position) + && Objects.equals(isOpen, that.isOpen) + && Objects.equals(chunkSize, that.chunkSize) + && Arrays.equals(bufferSnapshot, that.bufferSnapshot); + } + + @Override + public int hashCode() { + int result = Objects.hash(options, resumableWrite, position, isOpen, chunkSize); + result = 31 * result + Arrays.hashCode(bufferSnapshot); + return result; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSession.java new file mode 100644 index 000000000000..d22fa6e534f5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSession.java @@ -0,0 +1,85 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import java.io.IOException; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.TimeUnit; + +/** + * A session to write an object to Google Cloud Storage. + * + *

A session can only write a single version of an object. If writing multiple versions of an + * object a new session must be created each time. + * + *

Provides an api that allows writing to and retrieving the resulting {@link BlobInfo} after + * write finalization. + * + *

The underlying implementation is dictated based upon the specified {@link + * BlobWriteSessionConfig} provided at {@link StorageOptions} creation time. + * + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see HttpStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see BlobWriteSessionConfig + * @see BlobWriteSessionConfigs + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@InternalExtensionOnly +public interface BlobWriteSession { + + /** + * Open the {@link WritableByteChannel} for this session. + * + *

A session may only be {@code open}ed once. If multiple calls to open are made, an illegal + * state exception will be thrown + * + *

Upon calling {@link WritableByteChannel#close()} the object creation will be finalized, and + * {@link #getResult()}s future should resolve. + * + *

The returned {@code WritableByteChannel} can throw IOExceptions from any of its usual + * methods. Any {@link IOException} thrown can have a cause of a {@link StorageException}. + * However, not all {@code IOExceptions} will have {@code StorageException}s. + * + * @throws IOException When creating the {@link WritableByteChannel} if an unrecoverable + * underlying IOException occurs it can be rethrown + * @throws IllegalStateException if open is called more than once + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + WritableByteChannel open() throws IOException; + + /** + * Return an {@link ApiFuture}{@code } which will represent the state of the object upon + * finalization and success response from Google Cloud Storage. + * + *

This future will not resolve until: 1. The object is successfully finalized and created in + * Google Cloud Storage 2. A terminal failure occurs, the terminal failure will become the + * exception result + * + *

If a terminal failure is encountered, calling either {@link ApiFuture#get()} or {@link + * ApiFuture#get(long, TimeUnit)} will result in an {@link + * java.util.concurrent.ExecutionException} with a cause that is the {@link StorageException}. + * + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + ApiFuture getResult(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfig.java new file mode 100644 index 000000000000..7595822cef75 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfig.java @@ -0,0 +1,79 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalApi; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import java.io.IOException; +import java.io.Serializable; +import java.time.Clock; + +/** + * A sealed internal implementation only class which provides the means of configuring a {@link + * BlobWriteSession}. + * + *

A {@code BlobWriteSessionConfig} will be used to configure all {@link BlobWriteSession}s + * produced by an instance of {@link Storage}. + * + * @see BlobWriteSessionConfigs + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ +// When we have java modules, actually seal this to internal extension only +@InternalApi +public abstract class BlobWriteSessionConfig implements Serializable { + + @InternalApi + BlobWriteSessionConfig() {} + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); + + @InternalApi + abstract WriterFactory createFactory(Clock clock) throws IOException; + + @InternalApi + interface WriterFactory { + @InternalApi + WritableByteChannelSession writeSession( + StorageInternal s, BlobInfo info, Opts opts); + } + + /** + * Internal marker interface to signify an implementation of {@link BlobWriteSessionConfig} is + * compatible with {@link com.google.cloud.storage.TransportCompatibility.Transport#HTTP} + * + *

We could evaluate the annotations, but the code for that is more complicated and probably + * not worth the effort. + */ + interface HttpCompatible {} + + /** + * Internal marker interface to signify an implementation of {@link BlobWriteSessionConfig} is + * compatible with {@link com.google.cloud.storage.TransportCompatibility.Transport#GRPC} + * + *

We could evaluate the annotations, but the code for that is more complicated and probably + * not worth the effort. + */ + interface GrpcCompatible {} +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfigs.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfigs.java new file mode 100644 index 000000000000..9e9479e144e1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessionConfigs.java @@ -0,0 +1,349 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.cloud.storage.GrpcStorageOptions.GrpcStorageDefaults; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collection; + +/** + * Factory class to select and construct {@link BlobWriteSessionConfig}s. + * + *

There are several strategies which can be used to upload a {@link Blob} to Google Cloud + * Storage. This class provides factories which allow you to select the appropriate strategy for + * your workload. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Comparison of Strategies
StrategyFactory Method(s)DescriptionTransport(s) SupportedConsiderationsRetry SupportCloud Storage API used
Default (Chunk based upload){@link #getDefault()} + * Buffer up to a configurable amount of bytes in memory, write to Cloud Storage when + * full or close. Buffer size is configurable via + * {@link DefaultBlobWriteSessionConfig#withChunkSize(int)} + * gRPC, HTTPThe network will only be used for the following operations: + *
    + *
  1. Creating the Resumable Upload Session
  2. + *
  3. Transmitting zero or more incremental chunks
  4. + *
  5. Transmitting the final chunk and finalizing the Resumable Upload Session
  6. + *
  7. + * If any of the above are interrupted with a retryable error, the Resumable Upload Session + * will be queried to reconcile client side state with Cloud Storage + *
  8. + *
+ *
+ * Each chunk is retried up to the limitations specified in + * {@link StorageOptions#getRetrySettings()} + * Resumable Upload
Buffer to disk then upload + *
    + *
  • {@link #bufferToDiskThenUpload(Path)}
  • + *
  • {@link #bufferToDiskThenUpload(Collection) bufferToDiskThenUpload(Collection<Path>)}
  • + *
  • {@link #bufferToTempDirThenUpload()}
  • + *
+ *
+ * Buffer bytes to a temporary file on disk. On {@link WritableByteChannel#close() close()} + * upload the entire files contents to Cloud Storage. Delete the temporary file. + * gRPC, HTTP + *
    + *
  1. A Resumable Upload Session will be used to upload the file on disk.
  2. + *
  3. + * If the upload is interrupted with a retryable error, the Resumable Upload Session will + * be queried to restart the upload from Cloud Storage's last received byte + *
  4. + *
+ *
+ * Upload the file in the fewest number of RPC possible retrying within the limitations + * specified in {@link StorageOptions#getRetrySettings()} + * Resumable Upload
Journal to disk while uploading{@link #journaling(Collection) journaling(Collection<Path>)} + * Create a Resumable Upload Session, before transmitting bytes to Cloud Storage write + * to a recovery file on disk. If the stream to Cloud Storage is interrupted with a + * retryable error query the offset of the Resumable Upload Session, then open the recovery + * file from the offset and transmit the bytes to Cloud Storage. + * gRPC + *
    + *
  1. + * The stream to Cloud Storage will be held open until a) the write is complete + * b) the stream is interrupted + *
  2. + *
  3. + * Because the bytes are journaled to disk, the upload to Cloud Storage can only + * be as fast as the disk. + *
  4. + *
  5. + * The use of Compute + * Engine Local NVMe SSD is strongly encouraged compared to Compute Engine Persistent + * Disk. + *
  6. + *
+ *
+ * Opening the stream for upload will be retried up to the limitations specified in {@link StorageOptions#getRetrySettings()} + * All bytes are buffered to disk and allow for recovery from any arbitrary offset. + * Resumable Upload
Parallel Composite Upload{@link #parallelCompositeUpload()} + * Break the stream of bytes into smaller part objects uploading each part in parallel. Then + * composing the parts together to make the ultimate object. + * gRPC, HTTP + *
    + *
  1. + * Performing parallel composite uploads costs more money. + * Class A + * operations are performed to create each part and to perform each compose. If a storage + * tier other than + * STANDARD + * is used, early deletion fees apply to deletion of the parts. + *

    An illustrative example. Upload a 5GiB object using 64MiB as the max size per part. + *

      + *
    1. 80 Parts will be created (Class A)
    2. + *
    3. 3 compose calls will be performed (Class A)
    4. + *
    5. Delete 80 Parts along with 2 intermediary Compose objects (Free tier as long as {@code STANDARD} class)
    6. + *
    + * + * Once the parts and intermediary compose objects are deleted, there will be no storage charges related to those temporary objects. + *
  2. + *
  3. + * The service account/credentials used to perform the parallel composite upload require + * {@code storage.objects.delete} + * in order to cleanup the temporary part and intermediary compose objects. + *

    To handle handle part and intermediary compose object deletion out of band + * passing {@link PartCleanupStrategy#never()} to {@link ParallelCompositeUploadBlobWriteSessionConfig#withPartCleanupStrategy(PartCleanupStrategy)} + * will prevent automatic cleanup. + *

  4. + *
  5. + * Please see the + * Parallel composite uploads documentation for a more in depth explanation of the + * limitations of Parallel composite uploads. + *
  6. + *
  7. + * A failed upload can leave part and intermediary compose objects behind which will count + * as storage usage, and you will be billed for it. + *

    By default if an upload fails, an attempt to cleanup the part and intermediary compose + * will be made. However if the program were to crash there is no means for the client to + * perform the cleanup. + *

    Every part and intermediary compose object will be created with a name which ends in + * {@code .part}. An Object Lifecycle Management rule can be setup on your bucket to automatically + * cleanup objects with the suffix after some period of time. See + * Object Lifecycle Management + * for full details and a guide on how to setup a Delete + * rule with a suffix match condition. + *

  8. + *
  9. + * Using parallel composite uploads are not a one size fits all solution. They have very + * real overhead until uploading a large enough object. The inflection point is dependent + * upon many factors, and there is no one size fits all value. You will need to experiment + * with your deployment and workload to determine if parallel composite uploads are useful + * to you. + *
  10. + *
+ *
+ * Automatic retires will be applied for the following: + *
    + *
  1. Creation of each individual part
  2. + *
  3. Performing an intermediary compose
  4. + *
  5. Performing a delete to cleanup each part and intermediary compose object
  6. + *
+ * + * Retrying the creation of the final object is contingent upon if an appropriate precondition + * is supplied when calling {@link Storage#blobWriteSession(BlobInfo, BlobWriteOption...)}. + * Either {@link BlobTargetOption#doesNotExist()} or {@link Storage.BlobTargetOption#generationMatch(long)} + * should be specified in order to make the final request idempotent. + *

Each operation will be retried up to the limitations specified in {@link StorageOptions#getRetrySettings()} + *

+ * + *
+ * + * @see BlobWriteSessionConfig + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +public final class BlobWriteSessionConfigs { + + private BlobWriteSessionConfigs() {} + + /** + * Factory to produce the default configuration for uploading an object to Cloud Storage. + * + *

Configuration of the chunk size can be performed via {@link + * DefaultBlobWriteSessionConfig#withChunkSize(int)}. + * + * @see GrpcStorageDefaults#getDefaultStorageWriterConfig() + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + public static DefaultBlobWriteSessionConfig getDefault() { + return new DefaultBlobWriteSessionConfig(ByteSizeConstants._16MiB); + } + + /** + * Factory to produce a resumable upload using a bi-directional stream. This should provide a + * small performance increase compared to a regular resumable upload. + * + *

Configuration of the buffer size can be performed via {@link + * BidiBlobWriteSessionConfig#withBufferSize(int)}. + * + * @since 2.34.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC}) + public static BidiBlobWriteSessionConfig bidiWrite() { + return new BidiBlobWriteSessionConfig(ByteSizeConstants._16MiB); + } + + /** + * Create a new {@link BlobWriteSessionConfig} which will first buffer the content of the object + * to a temporary file under {@code java.io.tmpdir}. + * + *

Once the file on disk is closed, the entire file will then be uploaded to Cloud Storage. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + public static BlobWriteSessionConfig bufferToTempDirThenUpload() throws IOException { + return bufferToDiskThenUpload( + Paths.get(System.getProperty("java.io.tmpdir"), "google-cloud-storage")); + } + + /** + * Create a new {@link BlobWriteSessionConfig} which will first buffer the content of the object + * to a temporary file under the specified {@code path}. + * + *

Once the file on disk is closed, the entire file will then be uploaded to Cloud Storage. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + public static BufferToDiskThenUpload bufferToDiskThenUpload(Path path) throws IOException { + return bufferToDiskThenUpload(ImmutableList.of(path)); + } + + /** + * Create a new {@link BlobWriteSessionConfig} which will first buffer the content of the object + * to a temporary file under one of the specified {@code paths}. + * + *

Once the file on disk is closed, the entire file will then be uploaded to Cloud Storage. + * + *

The specifics of how the work is spread across multiple paths is undefined and subject to + * change. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + public static BufferToDiskThenUpload bufferToDiskThenUpload(Collection paths) + throws IOException { + return new BufferToDiskThenUpload(ImmutableList.copyOf(paths), false); + } + + /** + * Create a new {@link BlobWriteSessionConfig} which will journal writes to a temporary file under + * one of the specified {@code paths} before transmitting the bytes to Cloud Storage. + * + *

The specifics of how the work is spread across multiple paths is undefined and subject to + * change. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @since 2.27.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility(Transport.GRPC) + public static JournalingBlobWriteSessionConfig journaling(Collection paths) { + return new JournalingBlobWriteSessionConfig(ImmutableList.copyOf(paths), false); + } + + /** + * Create a new {@link BlobWriteSessionConfig} which will perform a Parallel Composite + * Upload by breaking the stream into parts and composing the parts together to make the + * ultimate object. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + public static ParallelCompositeUploadBlobWriteSessionConfig parallelCompositeUpload() { + return ParallelCompositeUploadBlobWriteSessionConfig.withDefaults(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessions.java new file mode 100644 index 000000000000..c9da9ee05c22 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobWriteSessions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.nio.channels.WritableByteChannel; + +final class BlobWriteSessions { + + private BlobWriteSessions() {} + + static BlobWriteSession of(WritableByteChannelSession s) { + return new WritableByteChannelSessionAdapter(s); + } + + static final class WritableByteChannelSessionAdapter implements BlobWriteSession { + private final WritableByteChannelSession delegate; + private boolean open; + + private WritableByteChannelSessionAdapter(WritableByteChannelSession delegate) { + this.delegate = delegate; + open = false; + } + + @Override + public WritableByteChannel open() throws IOException { + synchronized (this) { + Preconditions.checkState(!open, "already open"); + open = true; + return delegate.open(); + } + } + + @Override + public ApiFuture getResult() { + return delegate.getResult(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Bucket.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Bucket.java new file mode 100644 index 000000000000..7581a38e002e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Bucket.java @@ -0,0 +1,1619 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.BucketOptExtractor; +import com.google.cloud.storage.UnifiedOpts.BucketSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectOptExtractor; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.math.BigInteger; +import java.security.Key; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A Google cloud storage bucket. + * + *

Objects of this class are immutable. Operations that modify the bucket like {@link #update} + * return a new object. To get a {@code Bucket} object with the most recent information use {@link + * #reload}. {@code Bucket} adds a layer of service-related functionality over {@link BucketInfo}. + */ +@TransportCompatibility({Transport.HTTP, Transport.GRPC}) +public class Bucket extends BucketInfo { + + private static final long serialVersionUID = 3599706574671671516L; + + private final StorageOptions options; + private transient Storage storage; + + /** Class for specifying bucket source options when {@code Bucket} methods are used. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static class BucketSourceOption extends Option { + + private static final long serialVersionUID = 6765489853972162215L; + + private BucketSourceOption(BucketSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for bucket's metageneration match. If this option is used the request will + * fail if metageneration does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption metagenerationMatch() { + return new BucketSourceOption(UnifiedOpts.metagenerationMatchExtractor()); + } + + /** + * Returns an option for bucket's metageneration mismatch. If this option is used the request + * will fail if metageneration matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption metagenerationNotMatch() { + return new BucketSourceOption(UnifiedOpts.metagenerationNotMatchExtractor()); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption userProject(@NonNull String userProject) { + return new BucketSourceOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BucketSourceOption[] dedupe(BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketSourceOption[] dedupe( + Collection collection, BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketSourceOption[] dedupe( + BucketSourceOption[] array, BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, array, os); + } + + static Storage.BucketSourceOption[] toSourceOptions( + BucketInfo bucketInfo, BucketSourceOption... options) { + Storage.BucketSourceOption[] convertedOptions = + new Storage.BucketSourceOption[options.length]; + for (int i = 0; i < options.length; i++) { + BucketSourceOpt opt = options[i].getOpt(); + if (opt instanceof BucketOptExtractor) { + BucketOptExtractor ex = (BucketOptExtractor) opt; + BucketSourceOpt bucketSourceOpt = ex.extractFromBucketInfo(bucketInfo); + convertedOptions[i] = new Storage.BucketSourceOption(bucketSourceOpt); + } else { + convertedOptions[i] = new Storage.BucketSourceOption(options[i].getOpt()); + } + } + return convertedOptions; + } + + static Storage.BucketGetOption[] toGetOptions( + BucketInfo bucketInfo, BucketSourceOption... options) { + Storage.BucketGetOption[] convertedOptions = new Storage.BucketGetOption[options.length]; + for (int i = 0; i < options.length; i++) { + BucketSourceOpt opt = options[i].getOpt(); + if (opt instanceof BucketOptExtractor) { + BucketOptExtractor ex = (BucketOptExtractor) opt; + BucketSourceOpt bucketSourceOpt = ex.extractFromBucketInfo(bucketInfo); + convertedOptions[i] = new BucketGetOption(bucketSourceOpt); + } else { + convertedOptions[i] = new BucketGetOption(options[i].getOpt()); + } + } + return convertedOptions; + } + } + + /** Class for specifying blob target options when {@code Bucket} methods are used. */ + public static class BlobTargetOption extends Option { + + private static final long serialVersionUID = -7203767045761758606L; + + private BlobTargetOption(ObjectTargetOpt opt) { + super(opt); + } + + /** Returns an option for specifying blob's predefined ACL configuration. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption predefinedAcl(Storage.@NonNull PredefinedAcl acl) { + return new BlobTargetOption(UnifiedOpts.predefinedAcl(acl)); + } + + /** + * Returns an option that causes an operation to succeed only if the target blob does not exist. + * This option can not be provided together with {@link #generationMatch(long)} or {@link + * #generationNotMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption doesNotExist() { + return new BlobTargetOption(UnifiedOpts.doesNotExist()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if generation does not match the provided value. This option can not be provided + * together with {@link #generationNotMatch(long)} or {@link #doesNotExist()}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationMatch(long generation) { + return new BlobTargetOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches the provided value. This option can not be provided + * together with {@link #generationMatch(long)} or {@link #doesNotExist()}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationNotMatch(long generation) { + return new BlobTargetOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if metageneration does not match the provided value. This option can not be provided + * together with {@link #metagenerationNotMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationMatch(long metageneration) { + return new BlobTargetOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches the provided value. This option can not be provided together + * with {@link #metagenerationMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationNotMatch(long metageneration) { + return new BlobTargetOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption encryptionKey(@NonNull Key key) { + return new BlobTargetOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption encryptionKey(@NonNull String key) { + return new BlobTargetOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option to set a customer-managed KMS key for server-side encryption of the blob. + * + * @param kmsKeyName the KMS key resource id + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption kmsKeyName(@NonNull String kmsKeyName) { + return new BlobTargetOption(UnifiedOpts.kmsKeyName(kmsKeyName)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption userProject(@NonNull String userProject) { + return new BlobTargetOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option for overriding an Unlocked Retention policy. This must be set to true in + * order to change a policy from Unlocked to Locked, to set it to null, or to reduce its + * retainUntilTime attribute. + */ + @TransportCompatibility({Transport.HTTP}) + public static BlobTargetOption overrideUnlockedRetention(boolean overrideUnlockedRetention) { + return new BlobTargetOption(UnifiedOpts.overrideUnlockedRetention(overrideUnlockedRetention)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobTargetOption[] dedupe(BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobTargetOption[] dedupe( + Collection collection, BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobTargetOption[] dedupe(BlobTargetOption[] array, BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, array, os); + } + + static Storage.BlobTargetOption[] toTargetOptions( + BlobInfo blobInfo, BlobTargetOption... options) { + Storage.BlobTargetOption[] targetOptions = new Storage.BlobTargetOption[options.length]; + for (int i = 0; i < options.length; i++) { + ObjectTargetOpt opt = options[i].getOpt(); + if (opt instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) opt; + ObjectTargetOpt objectTargetOpt = ex.extractFromBlobInfo(blobInfo); + targetOptions[i] = new Storage.BlobTargetOption(objectTargetOpt); + } else { + targetOptions[i] = new Storage.BlobTargetOption(options[i].getOpt()); + } + } + return targetOptions; + } + } + + /** Class for specifying blob write options when {@code Bucket} methods are used. */ + public static class BlobWriteOption extends Option implements Serializable { + + private static final long serialVersionUID = 59762268190041584L; + + private BlobWriteOption(ObjectTargetOpt opt) { + super(opt); + } + + /** Returns an option for specifying blob's predefined ACL configuration. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption predefinedAcl(Storage.@NonNull PredefinedAcl acl) { + return new BlobWriteOption(UnifiedOpts.predefinedAcl(acl)); + } + + /** + * Returns an option that causes an operation to succeed only if the target blob does not exist. + * This option can not be provided together with {@link #generationMatch(long)} or {@link + * #generationNotMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption doesNotExist() { + return new BlobWriteOption(UnifiedOpts.doesNotExist()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if generation does not match the provided value. This option can not be provided + * together with {@link #generationNotMatch(long)} or {@link #doesNotExist()}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationMatch(long generation) { + return new BlobWriteOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if generation matches the provided value. This option can not be provided together + * with {@link #generationMatch(long)} or {@link #doesNotExist()}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationNotMatch(long generation) { + return new BlobWriteOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if metageneration does not match the provided value. This option can not be provided + * together with {@link #metagenerationNotMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationMatch(long metageneration) { + return new BlobWriteOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches the provided value. This option can not be provided together + * with {@link #metagenerationMatch(long)}. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationNotMatch(long metageneration) { + return new BlobWriteOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option for blob's data MD5 hash match. If this option is used the request will + * fail if blobs' data MD5 hash does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption md5Match(@NonNull String md5) { + return new BlobWriteOption(UnifiedOpts.md5Match(md5)); + } + + /** + * Returns an option for blob's data CRC32C checksum match. If this option is used the request + * will fail if blobs' data CRC32C checksum does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption crc32cMatch(@NonNull String crc32c) { + return new BlobWriteOption(UnifiedOpts.crc32cMatch(crc32c)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption encryptionKey(@NonNull Key key) { + return new BlobWriteOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption encryptionKey(@NonNull String key) { + return new BlobWriteOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption userProject(@NonNull String userProject) { + return new BlobWriteOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobWriteOption[] dedupe(BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobWriteOption[] dedupe( + Collection collection, BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobWriteOption[] dedupe(BlobWriteOption[] array, BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, array, os); + } + + static Storage.BlobWriteOption[] toWriteOptions(BlobInfo blobInfo, BlobWriteOption... options) { + Storage.BlobWriteOption[] convertedOptions = new Storage.BlobWriteOption[options.length]; + for (int i = 0; i < options.length; i++) { + ObjectTargetOpt opt = options[i].getOpt(); + if (opt instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) opt; + ObjectTargetOpt objectTargetOpt = ex.extractFromBlobInfo(blobInfo); + convertedOptions[i] = new Storage.BlobWriteOption(objectTargetOpt); + } else { + convertedOptions[i] = new Storage.BlobWriteOption(options[i].getOpt()); + } + } + return convertedOptions; + } + } + + /** Builder for {@code Bucket}. */ + public static class Builder extends BucketInfo.Builder { + private final Storage storage; + private final BucketInfo.BuilderImpl infoBuilder; + + Builder(Bucket bucket) { + this.storage = bucket.storage; + this.infoBuilder = new BucketInfo.BuilderImpl(bucket); + } + + @Override + public Builder setName(String name) { + infoBuilder.setName(name); + return this; + } + + @Override + Builder setProject(BigInteger project) { + infoBuilder.setProject(project); + return this; + } + + @Override + Builder setGeneratedId(String generatedId) { + infoBuilder.setGeneratedId(generatedId); + return this; + } + + @Override + Builder setOwner(Entity owner) { + infoBuilder.setOwner(owner); + return this; + } + + @Override + Builder setSelfLink(String selfLink) { + infoBuilder.setSelfLink(selfLink); + return this; + } + + @Override + public Builder setVersioningEnabled(Boolean enable) { + infoBuilder.setVersioningEnabled(enable); + return this; + } + + @Override + public Builder setRequesterPays(Boolean requesterPays) { + infoBuilder.setRequesterPays(requesterPays); + return this; + } + + @Override + public Builder setIndexPage(String indexPage) { + infoBuilder.setIndexPage(indexPage); + return this; + } + + @Override + public Builder setNotFoundPage(String notFoundPage) { + infoBuilder.setNotFoundPage(notFoundPage); + return this; + } + + /** + * @deprecated Use {@link #setLifecycleRules(Iterable)} instead, as in {@code + * setLifecycleRules(Collections.singletonList( new BucketInfo.LifecycleRule( + * LifecycleAction.newDeleteAction(), LifecycleCondition.newBuilder().setAge(5).build())));} + */ + @Override + @Deprecated + public Builder setDeleteRules(Iterable rules) { + infoBuilder.setDeleteRules(rules); + return this; + } + + @Override + public Builder setLifecycleRules(Iterable rules) { + infoBuilder.setLifecycleRules(rules); + return this; + } + + @Override + public Builder deleteLifecycleRules() { + infoBuilder.deleteLifecycleRules(); + return this; + } + + @Override + public Builder setRpo(Rpo rpo) { + infoBuilder.setRpo(rpo); + return this; + } + + @Override + public Builder setStorageClass(StorageClass storageClass) { + infoBuilder.setStorageClass(storageClass); + return this; + } + + @Override + public Builder setLocation(String location) { + infoBuilder.setLocation(location); + return this; + } + + @Override + Builder setEtag(String etag) { + infoBuilder.setEtag(etag); + return this; + } + + /** + * @deprecated Use {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setCreateTime(Long createTime) { + infoBuilder.setCreateTime(createTime); + return this; + } + + @Override + BucketInfo.Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + infoBuilder.setCreateTimeOffsetDateTime(createTime); + return this; + } + + /** + * @deprecated Use {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setUpdateTime(Long updateTime) { + infoBuilder.setUpdateTime(updateTime); + return this; + } + + @Override + BucketInfo.Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + infoBuilder.setUpdateTimeOffsetDateTime(updateTime); + return this; + } + + @Override + Builder setMetageneration(Long metageneration) { + infoBuilder.setMetageneration(metageneration); + return this; + } + + @Override + public Builder setCors(Iterable cors) { + infoBuilder.setCors(cors); + return this; + } + + @Override + public Builder setAcl(Iterable acl) { + infoBuilder.setAcl(acl); + return this; + } + + @Override + public Builder setDefaultAcl(Iterable acl) { + infoBuilder.setDefaultAcl(acl); + return this; + } + + @Override + public Builder setLabels(@Nullable Map<@NonNull String, @Nullable String> labels) { + infoBuilder.setLabels(labels); + return this; + } + + @Override + public Builder setDefaultKmsKeyName(String defaultKmsKeyName) { + infoBuilder.setDefaultKmsKeyName(defaultKmsKeyName); + return this; + } + + @Override + public Builder setDefaultEventBasedHold(Boolean defaultEventBasedHold) { + infoBuilder.setDefaultEventBasedHold(defaultEventBasedHold); + return this; + } + + /** + * @deprecated {@link #setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setRetentionEffectiveTime(Long retentionEffectiveTime) { + infoBuilder.setRetentionEffectiveTime(retentionEffectiveTime); + return this; + } + + @Override + BucketInfo.Builder setRetentionEffectiveTimeOffsetDateTime( + OffsetDateTime retentionEffectiveTime) { + infoBuilder.setRetentionEffectiveTimeOffsetDateTime(retentionEffectiveTime); + return this; + } + + @Override + Builder setRetentionPolicyIsLocked(Boolean retentionIsLocked) { + infoBuilder.setRetentionPolicyIsLocked(retentionIsLocked); + return this; + } + + /** + * @deprecated Use {@link #setRetentionPeriodDuration(Duration)} + */ + @Override + @Deprecated + public Builder setRetentionPeriod(Long retentionPeriod) { + infoBuilder.setRetentionPeriod(retentionPeriod); + return this; + } + + @Override + public BucketInfo.Builder setRetentionPeriodDuration(Duration retentionPeriod) { + infoBuilder.setRetentionPeriodDuration(retentionPeriod); + return this; + } + + @Override + public Builder setIamConfiguration(IamConfiguration iamConfiguration) { + infoBuilder.setIamConfiguration(iamConfiguration); + return this; + } + + @Override + public Builder setAutoclass(Autoclass autoclass) { + infoBuilder.setAutoclass(autoclass); + return this; + } + + @Override + public Builder setLogging(Logging logging) { + infoBuilder.setLogging(logging); + return this; + } + + @Override + Builder setLocationType(String locationType) { + infoBuilder.setLocationType(locationType); + return this; + } + + @Override + public Builder setCustomPlacementConfig(CustomPlacementConfig customPlacementConfig) { + infoBuilder.setCustomPlacementConfig(customPlacementConfig); + return this; + } + + @Override + Builder setObjectRetention(ObjectRetention objectRetention) { + infoBuilder.setObjectRetention(objectRetention); + return this; + } + + @Override + public Builder setSoftDeletePolicy(SoftDeletePolicy softDeletePolicy) { + infoBuilder.setSoftDeletePolicy(softDeletePolicy); + return this; + } + + @Override + public Builder setHierarchicalNamespace(HierarchicalNamespace hierarchicalNamespace) { + infoBuilder.setHierarchicalNamespace(hierarchicalNamespace); + return this; + } + + @Override + public Builder setIpFilter(IpFilter ipFilter) { + infoBuilder.setIpFilter(ipFilter); + return this; + } + + @Override + public Builder setGoogleManagedEncryptionEnforcementConfig( + GoogleManagedEncryptionEnforcementConfig googleManagedEncryptionEnforcementConfig) { + infoBuilder.setGoogleManagedEncryptionEnforcementConfig( + googleManagedEncryptionEnforcementConfig); + return this; + } + + @Override + public Builder setCustomerManagedEncryptionEnforcementConfig( + CustomerManagedEncryptionEnforcementConfig customerManagedEncryptionEnforcementConfig) { + infoBuilder.setCustomerManagedEncryptionEnforcementConfig( + customerManagedEncryptionEnforcementConfig); + return this; + } + + @Override + public Builder setCustomerSuppliedEncryptionEnforcementConfig( + CustomerSuppliedEncryptionEnforcementConfig customerSuppliedEncryptionEnforcementConfig) { + infoBuilder.setCustomerSuppliedEncryptionEnforcementConfig( + customerSuppliedEncryptionEnforcementConfig); + return this; + } + + @Override + public Builder setIsUnreachable(Boolean isUnreachable) { + infoBuilder.setIsUnreachable(isUnreachable); + return this; + } + + @Override + public Bucket build() { + return new Bucket(storage, infoBuilder); + } + + @Override + Builder clearGeneratedId() { + infoBuilder.clearGeneratedId(); + return this; + } + + @Override + Builder clearProject() { + infoBuilder.clearProject(); + return this; + } + + @Override + Builder clearName() { + infoBuilder.clearName(); + return this; + } + + @Override + Builder clearOwner() { + infoBuilder.clearOwner(); + return this; + } + + @Override + Builder clearSelfLink() { + infoBuilder.clearSelfLink(); + return this; + } + + @Override + Builder clearRequesterPays() { + infoBuilder.clearRequesterPays(); + return this; + } + + @Override + Builder clearVersioningEnabled() { + infoBuilder.clearVersioningEnabled(); + return this; + } + + @Override + Builder clearIndexPage() { + infoBuilder.clearIndexPage(); + return this; + } + + @Override + Builder clearNotFoundPage() { + infoBuilder.clearNotFoundPage(); + return this; + } + + @Override + Builder clearLifecycleRules() { + infoBuilder.clearLifecycleRules(); + return this; + } + + @Override + Builder clearRpo() { + infoBuilder.clearRpo(); + return this; + } + + @Override + Builder clearStorageClass() { + infoBuilder.clearStorageClass(); + return this; + } + + @Override + Builder clearLocation() { + infoBuilder.clearLocation(); + return this; + } + + @Override + Builder clearEtag() { + infoBuilder.clearEtag(); + return this; + } + + @Override + Builder clearCreateTime() { + infoBuilder.clearCreateTime(); + return this; + } + + @Override + Builder clearUpdateTime() { + infoBuilder.clearUpdateTime(); + return this; + } + + @Override + Builder clearMetageneration() { + infoBuilder.clearMetageneration(); + return this; + } + + @Override + Builder clearCors() { + infoBuilder.clearCors(); + return this; + } + + @Override + Builder clearAcl() { + infoBuilder.clearAcl(); + return this; + } + + @Override + Builder clearDefaultAcl() { + infoBuilder.clearDefaultAcl(); + return this; + } + + @Override + Builder clearLabels() { + infoBuilder.clearLabels(); + return this; + } + + @Override + Builder clearDefaultKmsKeyName() { + infoBuilder.clearDefaultKmsKeyName(); + return this; + } + + @Override + Builder clearDefaultEventBasedHold() { + infoBuilder.clearDefaultEventBasedHold(); + return this; + } + + @Override + Builder clearRetentionEffectiveTime() { + infoBuilder.clearRetentionEffectiveTime(); + return this; + } + + @Override + Builder clearRetentionPolicyIsLocked() { + infoBuilder.clearRetentionPolicyIsLocked(); + return this; + } + + @Override + Builder clearRetentionPeriod() { + infoBuilder.clearRetentionPeriod(); + return this; + } + + @Override + Builder clearIamConfiguration() { + infoBuilder.clearIamConfiguration(); + return this; + } + + @Override + Builder clearLocationType() { + infoBuilder.clearLocationType(); + return this; + } + + @Override + Builder clearLogging() { + infoBuilder.clearLogging(); + return this; + } + + @Override + Builder clearCustomPlacementConfig() { + infoBuilder.clearCustomPlacementConfig(); + return this; + } + + @Override + Builder clearIpFilter() { + infoBuilder.clearIpFilter(); + return this; + } + + @Override + public Builder clearGoogleManagedEncryptionEnforcementConfig() { + infoBuilder.clearGoogleManagedEncryptionEnforcementConfig(); + return this; + } + + @Override + public Builder clearCustomerManagedEncryptionEnforcementConfig() { + infoBuilder.clearCustomerManagedEncryptionEnforcementConfig(); + return this; + } + + @Override + public Builder clearCustomerSuppliedEncryptionEnforcementConfig() { + infoBuilder.clearCustomerSuppliedEncryptionEnforcementConfig(); + return this; + } + + @Override + Builder clearIsUnreachable() { + infoBuilder.clearIsUnreachable(); + return this; + } + } + + Bucket(Storage storage, BucketInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.storage = checkNotNull(storage); + this.options = storage.getOptions(); + } + + /** + * Checks if this bucket exists. + * + *

Example of checking if the bucket exists. + * + *

{@code
+   * boolean exists = bucket.exists();
+   * if (exists) {
+   *   // the bucket exists
+   * } else {
+   *   // the bucket was not found
+   * }
+   * }
+ * + * @return true if this bucket exists, false otherwise + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean exists(BucketSourceOption... options) { + int length = options.length; + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + Storage.BucketGetOption[] getOptions = + Arrays.copyOf(BucketSourceOption.toGetOptions(this, options), length + 1); + getOptions[length] = Storage.BucketGetOption.fields(); + return storage.get(getName(), getOptions) != null; + } + + /** + * Fetches current bucket's latest information. Returns {@code null} if the bucket does not exist. + * + *

Example of getting the bucket's latest information, if its generation does not match the + * {@link Bucket#getMetageneration()} value, otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * Bucket latestBucket = bucket.reload(BucketSourceOption.metagenerationMatch());
+   * if (latestBucket == null) {
+   *   // the bucket was not found
+   * }
+   * }
+ * + * @param options bucket read options + * @return a {@code Bucket} object with latest information or {@code null} if not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Bucket reload(BucketSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.get(getName(), BucketSourceOption.toGetOptions(this, options)); + } + + /** + * Updates the bucket's information. Bucket's name cannot be changed. A new {@code Bucket} object + * is returned. By default no checks are made on the metadata generation of the current bucket. If + * you want to update the information only if the current bucket metadata are at their latest + * version use the {@code metagenerationMatch} option: {@code + * bucket.update(BucketTargetOption.metagenerationMatch())} + * + *

Example of updating the bucket's information. + * + *

{@code
+   * Bucket updatedBucket = bucket.toBuilder().setVersioningEnabled(true).build().update();
+   * }
+ * + * @param options update options + * @return a {@code Bucket} object with updated information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Bucket update(BucketTargetOption... options) { + return storage.update(this, options); + } + + /** + * Deletes this bucket. + * + *

Example of deleting the bucket, if its metageneration matches the {@link + * Bucket#getMetageneration()} value, otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * boolean deleted = bucket.delete(BucketSourceOption.metagenerationMatch());
+   * if (deleted) {
+   *   // the bucket was deleted
+   * } else {
+   *   // the bucket was not found
+   * }
+   * }
+ * + * @param options bucket delete options + * @return {@code true} if bucket was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean delete(BucketSourceOption... options) { + // Don't use static imports of BlobSourceOption, it causes import resolution issues + // with the new UnifiedOpts shim interfaces + return storage.delete(getName(), BucketSourceOption.toSourceOptions(this, options)); + } + + /** + * Returns the paginated list of {@code Blob} in this bucket. + * + *

Example of listing the blobs in the bucket. + * + *

{@code
+   * Page blobs = bucket.list();
+   * Iterator blobIterator = blobs.iterateAll();
+   * while (blobIterator.hasNext()) {
+   *   Blob blob = blobIterator.next();
+   *   // do something with the blob
+   * }
+   * }
+ * + * @param options options for listing blobs + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Page list(BlobListOption... options) { + return storage.list(getName(), options); + } + + /** + * Returns the requested blob in this bucket or {@code null} if not found. + * + *

Example of getting a blob in the bucket, only if its metageneration matches a value, + * otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * long generation = 42;
+   * Blob blob = bucket.get(blobName, BlobGetOption.generationMatch(generation));
+   * }
+ * + * @param blob name of the requested blob + * @param options blob search options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob get(String blob, BlobGetOption... options) { + return storage.get(BlobId.of(getName(), blob), options); + } + + /** + * Returns the requested blob in this bucket of a specific generation or {@code null} if not + * found. + * + *

Example of getting a blob of a specific in the bucket. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * long generation = 42;
+   * Blob blob = bucket.get(blobName, generation);
+   * }
+ * + * @param blob name of the requested blob + * @param generation the generation to get + * @param options blob search options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob get(String blob, Long generation, BlobGetOption... options) { + return storage.get(BlobId.of(getName(), blob, generation), options); + } + + /** + * Returns a list of requested blobs in this bucket. Blobs that do not exist are null. + * + *

Example of getting some blobs in the bucket, using a batch request. + * + *

{@code
+   * String blobName1 = "my_blob_name1";
+   * String blobName2 = "my_blob_name2";
+   * List blobs = bucket.get(blobName1, blobName2);
+   * for (Blob blob : blobs) {
+   *   if (blob == null) {
+   *     // the blob was not found
+   *   }
+   * }
+   * }
+ * + * @param blobName1 first blob to get + * @param blobName2 second blob to get + * @param blobNames other blobs to get + * @return an immutable list of {@code Blob} objects + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + public List get(String blobName1, String blobName2, String... blobNames) { + List blobIds = Lists.newArrayListWithCapacity(blobNames.length + 2); + blobIds.add(BlobId.of(getName(), blobName1)); + blobIds.add(BlobId.of(getName(), blobName2)); + for (String blobName : blobNames) { + blobIds.add(BlobId.of(getName(), blobName)); + } + return storage.get(blobIds); + } + + /** + * Returns a list of requested blobs in this bucket. Blobs that do not exist are null. + * + *

Example of getting some blobs in the bucket, using a batch request. + * + *

{@code
+   * String blobName1 = "my_blob_name1";
+   * String blobName2 = "my_blob_name2";
+   * List blobNames = new LinkedList<>();
+   * blobNames.add(blobName1);
+   * blobNames.add(blobName2);
+   * List blobs = bucket.get(blobNames);
+   * for (Blob blob : blobs) {
+   *   if (blob == null) {
+   *     // the blob was not found
+   *   }
+   * }
+   * }
+ * + * @param blobNames blobs to get + * @return an immutable list of {@code Blob} objects + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + public List get(Iterable blobNames) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (String blobName : blobNames) { + builder.add(BlobId.of(getName(), blobName)); + } + return storage.get(builder.build()); + } + + /** + * Creates a new blob in this bucket. Direct upload is used to upload {@code content}. For large + * content, {@link Blob#writer(com.google.cloud.storage.Storage.BlobWriteOption...)} is + * recommended as it uses resumable upload. MD5 and CRC32C hashes of {@code content} are computed + * and used for validating transferred data. + * + *

Example of creating a blob in the bucket from a byte array with a content type. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * Blob blob = bucket.create(blobName, "Hello, World!".getBytes(UTF_8), "text/plain");
+   * }
+ * + * @param blob a blob name + * @param content the blob content + * @param contentType the blob content type + * @param options options for blob creation + * @return a complete blob information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob create(String blob, byte[] content, String contentType, BlobTargetOption... options) { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of(getName(), blob)).setContentType(contentType).build(); + Storage.BlobTargetOption[] targetOptions = BlobTargetOption.toTargetOptions(blobInfo, options); + return storage.create(blobInfo, content, targetOptions); + } + + /** + * Creates a new blob in this bucket. Direct upload is used to upload {@code content}. For large + * content, {@link Blob#writer(com.google.cloud.storage.Storage.BlobWriteOption...)} is + * recommended as it uses resumable upload. + * + *

Example of creating a blob in the bucket from an input stream with a content type. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * InputStream content = new ByteArrayInputStream("Hello, World!".getBytes(UTF_8));
+   * Blob blob = bucket.create(blobName, content, "text/plain");
+   * }
+ * + * @param blob a blob name + * @param content the blob content as a stream + * @param contentType the blob content type + * @param options options for blob creation + * @return a complete blob information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob create( + String blob, InputStream content, String contentType, BlobWriteOption... options) { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of(getName(), blob)).setContentType(contentType).build(); + Storage.BlobWriteOption[] writeOptions = BlobWriteOption.toWriteOptions(blobInfo, options); + return storage.create(blobInfo, content, writeOptions); + } + + /** + * Creates a new blob in this bucket. Direct upload is used to upload {@code content}. For large + * content, {@link Blob#writer(com.google.cloud.storage.Storage.BlobWriteOption...)} is + * recommended as it uses resumable upload. MD5 and CRC32C hashes of {@code content} are computed + * and used for validating transferred data. + * + *

Example of creating a blob in the bucket from a byte array. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * Blob blob = bucket.create(blobName, "Hello, World!".getBytes(UTF_8));
+   * }
+ * + * @param blob a blob name + * @param content the blob content + * @param options options for blob creation + * @return a complete blob information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob create(String blob, byte[] content, BlobTargetOption... options) { + BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of(getName(), blob)).build(); + Storage.BlobTargetOption[] targetOptions = BlobTargetOption.toTargetOptions(blobInfo, options); + return storage.create(blobInfo, content, targetOptions); + } + + /** + * Creates a new blob in this bucket. Direct upload is used to upload {@code content}. For large + * content, {@link Blob#writer(com.google.cloud.storage.Storage.BlobWriteOption...)} is + * recommended as it uses resumable upload. + * + *

Example of creating a blob in the bucket from an input stream. + * + *

{@code
+   * String blobName = "my_blob_name";
+   * InputStream content = new ByteArrayInputStream("Hello, World!".getBytes(UTF_8));
+   * Blob blob = bucket.create(blobName, content);
+   * }
+ * + * @param blob a blob name + * @param content the blob content as a stream + * @param options options for blob creation + * @return a complete blob information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Blob create(String blob, InputStream content, BlobWriteOption... options) { + BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of(getName(), blob)).build(); + Storage.BlobWriteOption[] write = BlobWriteOption.toWriteOptions(blobInfo, options); + return storage.create(blobInfo, content, write); + } + + /** + * Returns the ACL entry for the specified entity on this bucket or {@code null} if not found. + * + *

Example of getting the ACL entry for an entity. + * + *

{@code
+   * Acl acl = bucket.getAcl(User.ofAllAuthenticatedUsers());
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl getAcl(Entity entity) { + return storage.getAcl(getName(), entity); + } + + /** + * Deletes the ACL entry for the specified entity on this bucket. + * + *

Example of deleting the ACL entry for an entity. + * + *

{@code
+   * boolean deleted = bucket.deleteAcl(User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean deleteAcl(Entity entity) { + return storage.deleteAcl(getName(), entity); + } + + /** + * Creates a new ACL entry on this bucket. + * + *

Example of creating a new ACL entry. + * + *

{@code
+   * Acl acl = bucket.createAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.READER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl createAcl(Acl acl) { + return storage.createAcl(getName(), acl); + } + + /** + * Updates an ACL entry on this bucket. + * + *

Example of updating a new ACL entry. + * + *

{@code
+   * Acl acl = bucket.updateAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.OWNER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl updateAcl(Acl acl) { + return storage.updateAcl(getName(), acl); + } + + /** + * Lists the ACL entries for this bucket. + * + *

Example of listing the ACL entries. + * + *

{@code
+   * List acls = bucket.listAcls();
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public List listAcls() { + return storage.listAcls(getName()); + } + + /** + * Returns the default object ACL entry for the specified entity on this bucket or {@code null} if + * not found. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of getting the default ACL entry for an entity. + * + *

{@code
+   * Acl acl = bucket.getDefaultAcl(User.ofAllAuthenticatedUsers());
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl getDefaultAcl(Entity entity) { + return storage.getDefaultAcl(getName(), entity); + } + + /** + * Deletes the default object ACL entry for the specified entity on this bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of deleting the default ACL entry for an entity. + * + *

{@code
+   * boolean deleted = bucket.deleteDefaultAcl(User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public boolean deleteDefaultAcl(Entity entity) { + return storage.deleteDefaultAcl(getName(), entity); + } + + /** + * Creates a new default blob ACL entry on this bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of creating a new default ACL entry. + * + *

{@code
+   * Acl acl = bucket.createDefaultAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.READER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl createDefaultAcl(Acl acl) { + return storage.createDefaultAcl(getName(), acl); + } + + /** + * Updates a default blob ACL entry on this bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of updating a new default ACL entry. + * + *

{@code
+   * Acl acl = bucket.updateDefaultAcl(Acl.of(User.ofAllAuthenticatedUsers(), Acl.Role.OWNER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Acl updateDefaultAcl(Acl acl) { + return storage.updateDefaultAcl(getName(), acl); + } + + /** + * Lists the default blob ACL entries for this bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of listing the default ACL entries. + * + *

{@code
+   * List acls = bucket.listDefaultAcls();
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public List listDefaultAcls() { + return storage.listDefaultAcls(getName()); + } + + /** + * Locks bucket retention policy. Requires a local metageneration value in the request. Review + * example below. + * + *

Accepts an optional userProject {@link BucketTargetOption} option which defines the project + * id to assign operational costs. + * + *

Warning: Once a retention policy is locked, it can't be unlocked, removed, or shortened. + * + *

Example of locking a retention policy on a bucket, only if its local metageneration value + * matches the bucket's service metageneration otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my_unique_bucket";
+   * Bucket bucket = storage.get(bucketName, BucketGetOption.fields(BucketField.METAGENERATION));
+   * storage.lockRetentionPolicy(bucket, BucketTargetOption.metagenerationMatch());
+   * }
+ * + * @return a {@code Bucket} object of the locked bucket + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public Bucket lockRetentionPolicy(BucketTargetOption... options) { + return storage.lockRetentionPolicy(this, options); + } + + /** Returns the bucket's {@code Storage} object used to issue requests. */ + public Storage getStorage() { + return storage; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Bucket.class)) { + return false; + } + Bucket other = (Bucket) obj; + return super.equals(other) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + /** + * Drop the held {@link Storage} instance. + * + * @since 2.14.0 + */ + public BucketInfo asBucketInfo() { + return this.toBuilder().infoBuilder.build(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.storage = options.getService(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BucketInfo.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BucketInfo.java new file mode 100644 index 000000000000..e6ec27cca423 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BucketInfo.java @@ -0,0 +1,4050 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BackwardCompatibilityUtils.millisOffsetDateTimeCodec; +import static com.google.cloud.storage.BackwardCompatibilityUtils.millisUtcCodec; +import static com.google.cloud.storage.BackwardCompatibilityUtils.nullableDurationSecondsCodec; +import static com.google.cloud.storage.Utils.diffMaps; +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.collect.Lists.newArrayList; + +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.util.Data; +import com.google.api.client.util.DateTime; +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.BlobInfo.ImmutableEmptyMap; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Streams; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.math.BigInteger; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.logging.Logger; +import java.util.stream.Stream; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Google Storage bucket metadata; + * + * @see Concepts and + * Terminology + */ +@TransportCompatibility({Transport.HTTP, Transport.GRPC}) +public class BucketInfo implements Serializable { + + // this class reference (LifecycleRule.DeleteLifecycleAction) must be long form. + // if it is not long form, and instead an import it creates a cycle of serializable classes + // which breaks the compiler. + // + // The error message looks like the following: + // java: cannot find symbol + // symbol: class Serializable + // location: class com.google.cloud.storage.BucketInfo + private static final Predicate IS_DELETE_LIFECYCLE_RULE = + r -> r.getAction().getActionType().equals(LifecycleRule.DeleteLifecycleAction.TYPE); + + private static final long serialVersionUID = 4793572058456298945L; + private final String generatedId; + private final BigInteger project; + private final String name; + private final Acl.Entity owner; + private final String selfLink; + private final Boolean requesterPays; + private final Boolean versioningEnabled; + private final String indexPage; + private final String notFoundPage; + + /** + * The getter for this property never returns null, however null awareness is critical for + * encoding to properly determine how to process rules conversion. + * + * @see JsonConversions#bucketInfo() encoder + */ + final List lifecycleRules; + + private final String etag; + private final OffsetDateTime createTime; + private final OffsetDateTime updateTime; + private final Long metageneration; + private final List cors; + private final List acl; + private final List defaultAcl; + private final String location; + private final Rpo rpo; + private final StorageClass storageClass; + @Nullable private final Map labels; + private final String defaultKmsKeyName; + private final Boolean defaultEventBasedHold; + private final OffsetDateTime retentionEffectiveTime; + private final Boolean retentionPolicyIsLocked; + private final Duration retentionPeriod; + private final IamConfiguration iamConfiguration; + private final Autoclass autoclass; + private final String locationType; + private final Logging logging; + private final CustomPlacementConfig customPlacementConfig; + private final ObjectRetention objectRetention; + private final HierarchicalNamespace hierarchicalNamespace; + + private final SoftDeletePolicy softDeletePolicy; + private final @Nullable IpFilter ipFilter; + private final @Nullable GoogleManagedEncryptionEnforcementConfig + googleManagedEncryptionEnforcementConfig; + private final @Nullable CustomerManagedEncryptionEnforcementConfig + customerManagedEncryptionEnforcementConfig; + private final @Nullable CustomerSuppliedEncryptionEnforcementConfig + customerSuppliedEncryptionEnforcementConfig; + private final Boolean isUnreachable; + + private final transient ImmutableSet modifiedFields; + + /** + * non-private for backward compatibility on message class. log messages are now emitted from + * + * @see JsonConversions#lifecycleRule() + */ + static final Logger log = Logger.getLogger(BucketInfo.class.getName()); + + /** + * Public Access Prevention enum with expected values. + * + * @see public-access-prevention + */ + public enum PublicAccessPrevention { + ENFORCED("enforced"), + /** + * Default value for Public Access Prevention + * + * @deprecated use {@link #INHERITED} + */ + @Deprecated + UNSPECIFIED("inherited"), + /** + * If the api returns a value that isn't defined in {@link PublicAccessPrevention} this value + * will be returned. + */ + UNKNOWN(null), + INHERITED("inherited"); + + private final String value; + + PublicAccessPrevention(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static PublicAccessPrevention parse(String value) { + String upper = value.toUpperCase(); + switch (upper) { + case "ENFORCED": + return ENFORCED; + case "UNSPECIFIED": + case "INHERITED": + return INHERITED; + default: + return UNKNOWN; + } + } + } + + /** + * The Bucket's IAM Configuration. + * + * @see uniform + * bucket-level access + * @see public-access-prevention + */ + public static class IamConfiguration implements Serializable { + private static final long serialVersionUID = -7564209362829587435L; + + private final Boolean isUniformBucketLevelAccessEnabled; + private final OffsetDateTime uniformBucketLevelAccessLockedTime; + private final PublicAccessPrevention publicAccessPrevention; + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof IamConfiguration)) { + return false; + } + IamConfiguration that = (IamConfiguration) o; + return Objects.equals( + isUniformBucketLevelAccessEnabled, that.isUniformBucketLevelAccessEnabled) + && Objects.equals( + uniformBucketLevelAccessLockedTime, that.uniformBucketLevelAccessLockedTime) + && publicAccessPrevention == that.publicAccessPrevention; + } + + @Override + public int hashCode() { + return Objects.hash( + isUniformBucketLevelAccessEnabled, + uniformBucketLevelAccessLockedTime, + publicAccessPrevention); + } + + private IamConfiguration(Builder builder) { + this.isUniformBucketLevelAccessEnabled = builder.isUniformBucketLevelAccessEnabled; + this.uniformBucketLevelAccessLockedTime = builder.uniformBucketLevelAccessLockedTime; + this.publicAccessPrevention = builder.publicAccessPrevention; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + Builder builder = new Builder(); + builder.isUniformBucketLevelAccessEnabled = isUniformBucketLevelAccessEnabled; + builder.uniformBucketLevelAccessLockedTime = uniformBucketLevelAccessLockedTime; + builder.publicAccessPrevention = publicAccessPrevention; + return builder; + } + + /** Deprecated in favor of isUniformBucketLevelAccessEnabled(). */ + @Deprecated + public Boolean isBucketPolicyOnlyEnabled() { + return isUniformBucketLevelAccessEnabled; + } + + /** Deprecated in favor of uniformBucketLevelAccessLockedTime(). */ + @Deprecated + public Long getBucketPolicyOnlyLockedTime() { + return getUniformBucketLevelAccessLockedTime(); + } + + public Boolean isUniformBucketLevelAccessEnabled() { + return isUniformBucketLevelAccessEnabled; + } + + /** + * @deprecated {@link #getUniformBucketLevelAccessLockedTimeOffsetDateTime()} + */ + @Deprecated + public Long getUniformBucketLevelAccessLockedTime() { + return millisOffsetDateTimeCodec.decode(uniformBucketLevelAccessLockedTime); + } + + public OffsetDateTime getUniformBucketLevelAccessLockedTimeOffsetDateTime() { + return uniformBucketLevelAccessLockedTime; + } + + /** Returns the Public Access Prevention. * */ + public PublicAccessPrevention getPublicAccessPrevention() { + return publicAccessPrevention; + } + + /** Builder for {@code IamConfiguration} */ + public static class Builder { + private Boolean isUniformBucketLevelAccessEnabled; + private OffsetDateTime uniformBucketLevelAccessLockedTime; + private PublicAccessPrevention publicAccessPrevention; + + /** Deprecated in favor of setIsUniformBucketLevelAccessEnabled(). */ + @Deprecated + public Builder setIsBucketPolicyOnlyEnabled(Boolean isBucketPolicyOnlyEnabled) { + this.isUniformBucketLevelAccessEnabled = isBucketPolicyOnlyEnabled; + return this; + } + + /** + * @deprecated in favor of {@link #setUniformBucketLevelAccessLockedTime(Long)}. + */ + @Deprecated + Builder setBucketPolicyOnlyLockedTime(Long bucketPolicyOnlyLockedTime) { + return setUniformBucketLevelAccessLockedTime(bucketPolicyOnlyLockedTime); + } + + /** + * Sets whether uniform bucket-level access is enabled for this bucket. When this is enabled, + * access to the bucket will be configured through IAM, and legacy ACL policies will not work. + * When this is first enabled, {@code uniformBucketLevelAccess.lockedTime} will be set by the + * API automatically. This field can then be disabled until the time specified, after which it + * will become immutable and calls to change it will fail. If this is enabled, calls to access + * legacy ACL information will fail. + */ + public Builder setIsUniformBucketLevelAccessEnabled( + Boolean isUniformBucketLevelAccessEnabled) { + this.isUniformBucketLevelAccessEnabled = isUniformBucketLevelAccessEnabled; + return this; + } + + /** + * Sets the deadline for switching {@code uniformBucketLevelAccess.enabled} back to false. + * After this time passes, calls to do so will fail. This is package-private, since in general + * this field should never be set by a user--it's automatically set by the backend when {@code + * enabled} is set to true. + * + * @deprecated {@link #setUniformBucketLevelAccessLockedTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + Builder setUniformBucketLevelAccessLockedTime(Long uniformBucketLevelAccessLockedTime) { + return setUniformBucketLevelAccessLockedTimeOffsetDateTime( + millisOffsetDateTimeCodec.encode(uniformBucketLevelAccessLockedTime)); + } + + /** + * Sets the deadline for switching {@code uniformBucketLevelAccess.enabled} back to false. + * After this time passes, calls to do so will fail. This is package-private, since in general + * this field should never be set by a user--it's automatically set by the backend when {@code + * enabled} is set to true. + */ + Builder setUniformBucketLevelAccessLockedTimeOffsetDateTime( + OffsetDateTime uniformBucketLevelAccessLockedTime) { + this.uniformBucketLevelAccessLockedTime = uniformBucketLevelAccessLockedTime; + return this; + } + + /** + * Sets the bucket's Public Access Prevention configuration. Currently supported options are + * {@link PublicAccessPrevention#INHERITED} or {@link PublicAccessPrevention#ENFORCED} + * + * @see public-access-prevention + */ + public Builder setPublicAccessPrevention(PublicAccessPrevention publicAccessPrevention) { + this.publicAccessPrevention = publicAccessPrevention; + return this; + } + + /** Builds an {@code IamConfiguration} object */ + public IamConfiguration build() { + return new IamConfiguration(this); + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("isUniformBucketLevelAccessEnabled", isUniformBucketLevelAccessEnabled) + .add("uniformBucketLevelAccessLockedTime", uniformBucketLevelAccessLockedTime) + .add("publicAccessPrevention", publicAccessPrevention) + .toString(); + } + } + + /** + * The bucket's soft delete policy. If this policy is set, any deleted objects will be + * soft-deleted according to the time specified in the policy + */ + public static class SoftDeletePolicy implements Serializable { + + private static final long serialVersionUID = -8100190443052242908L; + private Duration retentionDuration; + private OffsetDateTime effectiveTime; + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SoftDeletePolicy)) { + return false; + } + SoftDeletePolicy that = (SoftDeletePolicy) o; + return Objects.equals(retentionDuration, that.retentionDuration) + && Objects.equals(effectiveTime, that.effectiveTime); + } + + @Override + public int hashCode() { + return Objects.hash(retentionDuration, effectiveTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("retentionDuration", retentionDuration) + .add("effectiveTime", effectiveTime) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder().setRetentionDuration(retentionDuration).setEffectiveTime(effectiveTime); + } + + private SoftDeletePolicy() {} + + public SoftDeletePolicy(Builder builder) { + this.retentionDuration = builder.retentionDuration; + this.effectiveTime = builder.effectiveTime; + } + + public Duration getRetentionDuration() { + return retentionDuration; + } + + public OffsetDateTime getEffectiveTime() { + return effectiveTime; + } + + public static final class Builder { + private Duration retentionDuration; + private OffsetDateTime effectiveTime; + + /** Sets the length of time to retain soft-deleted objects for, expressed as a Duration */ + public Builder setRetentionDuration(Duration retentionDuration) { + this.retentionDuration = retentionDuration; + return this; + } + + /** + * Sets the time from which this soft-delete policy is effective. This is package-private + * because it can only be set by the backend. + */ + Builder setEffectiveTime(OffsetDateTime effectiveTime) { + this.effectiveTime = effectiveTime; + return this; + } + + public SoftDeletePolicy build() { + return new SoftDeletePolicy(this); + } + } + } + + /** + * Configuration for the Autoclass settings of a bucket. + * + * @see https://cloud.google.com/storage/docs/autoclass + */ + public static final class Autoclass implements Serializable { + + private static final long serialVersionUID = -2378172222188072439L; + private final Boolean enabled; + private final OffsetDateTime toggleTime; + private final StorageClass terminalStorageClass; + private final OffsetDateTime terminalStorageClassUpdateTime; + + private Autoclass(Builder builder) { + this.enabled = builder.enabled; + this.toggleTime = builder.toggleTime; + this.terminalStorageClass = builder.terminalStorageClass; + this.terminalStorageClassUpdateTime = builder.terminalStorageClassUpdateTime; + } + + public Boolean getEnabled() { + return enabled; + } + + public OffsetDateTime getToggleTime() { + return toggleTime; + } + + public StorageClass getTerminalStorageClass() { + return terminalStorageClass; + } + + public OffsetDateTime getTerminalStorageClassUpdateTime() { + return terminalStorageClassUpdateTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Autoclass)) { + return false; + } + Autoclass autoclass = (Autoclass) o; + return Objects.equals(enabled, autoclass.enabled) + && Objects.equals(toggleTime, autoclass.toggleTime) + && Objects.equals(terminalStorageClass, autoclass.terminalStorageClass) + && Objects.equals( + terminalStorageClassUpdateTime, autoclass.terminalStorageClassUpdateTime); + } + + @Override + public int hashCode() { + return Objects.hash( + enabled, toggleTime, terminalStorageClass, terminalStorageClassUpdateTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("enabled", enabled) + .add("toggleTime", toggleTime) + .add("terminalStorageClass", terminalStorageClass) + .add("terminalStorageClassUpdateTime", terminalStorageClassUpdateTime) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return newBuilder() + .setEnabled(enabled) + .setToggleTime(toggleTime) + .setTerminalStorageClass(terminalStorageClass) + .setTerminalStorageClassUpdateTime(terminalStorageClassUpdateTime); + } + + public static final class Builder { + private Boolean enabled; + private OffsetDateTime toggleTime; + private StorageClass terminalStorageClass; + private OffsetDateTime terminalStorageClassUpdateTime; + + /** + * Sets whether Autoclass is enabled for this bucket. Currently, autoclass can only be enabled + * at bucket create time. Any calls to update an existing Autoclass configuration must be to + * disable it, calls to enable Autoclass on an existing bucket will fail. + */ + public Builder setEnabled(Boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Sets the last time autoclass was toggled on or off. Set to package private because this + * should only be set by the backend. + */ + Builder setToggleTime(OffsetDateTime toggleTime) { + this.toggleTime = toggleTime; + return this; + } + + /** + * When set to {@link StorageClass#NEARLINE}, Autoclass restricts transitions between Standard + * and Nearline storage classes only. + * + *

When set to {@link StorageClass#ARCHIVE}, Autoclass allows transitions to Coldline and + * Archive as well. + * + *

Only valid values are {@code NEARLINE} and {@code ARCHIVE}. + */ + public Builder setTerminalStorageClass(StorageClass terminalStorageClass) { + this.terminalStorageClass = terminalStorageClass; + return this; + } + + /** + * The time at which Autoclass terminal storage class was last updated for this bucket. + * + *

This is auto populated when the feature is enabled. + */ + Builder setTerminalStorageClassUpdateTime(OffsetDateTime terminalStorageClassUpdateTime) { + this.terminalStorageClassUpdateTime = terminalStorageClassUpdateTime; + return this; + } + + public Autoclass build() { + return new Autoclass(this); + } + } + } + + public static final class ObjectRetention implements Serializable { + + private static final long serialVersionUID = 3948199339534287669L; + private Mode mode; + + public Mode getMode() { + return mode; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ObjectRetention)) { + return false; + } + ObjectRetention that = (ObjectRetention) o; + return Objects.equals(mode, that.mode); + } + + @Override + public int hashCode() { + return Objects.hash(mode); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("mode", mode).toString(); + } + + private ObjectRetention() {} + + private ObjectRetention(Builder builder) { + this.mode = builder.mode; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder().setMode(this.mode); + } + + public static final class Builder { + private Mode mode; + + /** Sets the object retention mode. Can be Enabled or Disabled. */ + public Builder setMode(Mode mode) { + this.mode = mode; + return this; + } + + public ObjectRetention build() { + return new ObjectRetention(this); + } + } + + public static final class Mode extends StringEnumValue { + private static final long serialVersionUID = 1973143582659557184L; + + private Mode(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = Mode::new; + + private static final StringEnumType type = + new StringEnumType<>(Mode.class, CONSTRUCTOR); + + public static final Mode ENABLED = type.createAndRegister("Enabled"); + + public static final Mode DISABLED = type.createAndRegister("Disabled"); + + public static Mode valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + public static Mode valueOf(String constant) { + return type.valueOf(constant); + } + + public static Mode[] values() { + return type.values(); + } + } + } + + /** + * The bucket's custom placement configuration for Custom Dual Regions. If using `location` is + * also required. + */ + public static class CustomPlacementConfig implements Serializable { + + private static final long serialVersionUID = 7284488308696895602L; + private List dataLocations; + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CustomPlacementConfig)) { + return false; + } + CustomPlacementConfig that = (CustomPlacementConfig) o; + return Objects.equals(dataLocations, that.dataLocations); + } + + @Override + public int hashCode() { + return Objects.hash(dataLocations); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("dataLocations", dataLocations).toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + Builder builder = new Builder(); + builder.dataLocations = dataLocations; + return builder; + } + + public List getDataLocations() { + return dataLocations; + } + + private CustomPlacementConfig(Builder builder) { + this.dataLocations = builder.dataLocations; + } + + public static class Builder { + private List dataLocations; + + /** A list of regions for custom placement configurations. */ + public Builder setDataLocations(List dataLocations) { + this.dataLocations = dataLocations != null ? ImmutableList.copyOf(dataLocations) : null; + return this; + } + + public CustomPlacementConfig build() { + return new CustomPlacementConfig(this); + } + } + } + + /** + * The bucket's logging configuration, which defines the destination bucket and optional name + * prefix for the current bucket's logs. + */ + public static class Logging implements Serializable { + + private static final long serialVersionUID = 5213268072569764596L; + private final String logBucket; + private final String logObjectPrefix; + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Logging)) { + return false; + } + Logging logging = (Logging) o; + return Objects.equals(logBucket, logging.logBucket) + && Objects.equals(logObjectPrefix, logging.logObjectPrefix); + } + + @Override + public int hashCode() { + return Objects.hash(logBucket, logObjectPrefix); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + Builder builder = new Builder(); + builder.logBucket = logBucket; + builder.logObjectPrefix = logObjectPrefix; + return builder; + } + + public String getLogBucket() { + return logBucket; + } + + public String getLogObjectPrefix() { + return logObjectPrefix; + } + + private Logging(Builder builder) { + this.logBucket = builder.logBucket; + this.logObjectPrefix = builder.logObjectPrefix; + } + + public static class Builder { + private String logBucket; + private String logObjectPrefix; + + /** The destination bucket where the current bucket's logs should be placed. */ + public Builder setLogBucket(String logBucket) { + this.logBucket = logBucket; + return this; + } + + /** A prefix for log object names. */ + public Builder setLogObjectPrefix(String logObjectPrefix) { + this.logObjectPrefix = logObjectPrefix; + return this; + } + + /** Builds an {@code Logging} object */ + public Logging build() { + return new Logging(this); + } + } + } + + /** The bucket's hierarchical namespace (Folders) configuration. Enable this to use HNS. */ + public static final class HierarchicalNamespace implements Serializable { + + private static final long serialVersionUID = 5932926691444613101L; + private Boolean enabled; + + public Boolean getEnabled() { + return enabled; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof HierarchicalNamespace)) { + return false; + } + HierarchicalNamespace that = (HierarchicalNamespace) o; + return Objects.equals(enabled, that.enabled); + } + + @Override + public int hashCode() { + return Objects.hash(enabled); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("enabled", enabled).toString(); + } + + private HierarchicalNamespace() {} + + private HierarchicalNamespace(Builder builder) { + this.enabled = builder.enabled; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return newBuilder().setEnabled(enabled); + } + + public static final class Builder { + private Boolean enabled; + + /** + * Sets whether Hierarchical Namespace (Folders) is enabled for this bucket. This can only be + * enabled at bucket create time. If this is enabled, Uniform Bucket-Level Access must also be + * enabled. + */ + public Builder setEnabled(Boolean enabled) { + this.enabled = enabled; + return this; + } + + public HierarchicalNamespace build() { + return new HierarchicalNamespace(this); + } + } + } + + /** + * Lifecycle rule for a bucket. Allows supported Actions, such as deleting and changing storage + * class, to be executed when certain Conditions are met. + * + *

Versions 1.50.0-1.111.2 of this library don’t support the CustomTimeBefore, + * DaysSinceCustomTime, DaysSinceNoncurrentTime and NoncurrentTimeBefore lifecycle conditions. To + * read GCS objects with those lifecycle conditions, update your Java client library to the latest + * version. + * + * @see Object Lifecycle + * Management + */ + public static class LifecycleRule implements Serializable { + + private static final long serialVersionUID = 8685745573894069326L; + private final LifecycleAction lifecycleAction; + private final LifecycleCondition lifecycleCondition; + + public LifecycleRule(LifecycleAction action, LifecycleCondition condition) { + if (condition.getIsLive() == null + && condition.getAge() == null + && condition.getCreatedBefore() == null + && condition.getMatchesStorageClass() == null + && condition.getNumberOfNewerVersions() == null + && condition.getDaysSinceNoncurrentTime() == null + && condition.getNoncurrentTimeBefore() == null + && condition.getCustomTimeBefore() == null + && condition.getDaysSinceCustomTime() == null + && condition.getMatchesPrefix() == null + && condition.getMatchesSuffix() == null) { + log.warning( + "Creating a lifecycle condition with no supported conditions:\n" + + this + + "\nAttempting to update with this rule may cause errors. Please update " + + " to the latest version of google-cloud-storage"); + } + + this.lifecycleAction = action; + this.lifecycleCondition = condition; + } + + public LifecycleAction getAction() { + return lifecycleAction; + } + + public LifecycleCondition getCondition() { + return lifecycleCondition; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("lifecycleAction", lifecycleAction) + .add("lifecycleCondition", lifecycleCondition) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(lifecycleAction, lifecycleCondition); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LifecycleRule)) { + return false; + } + LifecycleRule that = (LifecycleRule) o; + return Objects.equals(lifecycleAction, that.lifecycleAction) + && Objects.equals(lifecycleCondition, that.lifecycleCondition); + } + + /** + * Condition for a Lifecycle rule, specifies under what criteria an Action should be executed. + * + * @see Object Lifecycle + * Management + */ + public static class LifecycleCondition implements Serializable { + private static final long serialVersionUID = 7127585850045827932L; + private final Integer age; + private final OffsetDateTime createdBefore; + private final Integer numberOfNewerVersions; + private final Boolean isLive; + private final List matchesStorageClass; + private final Integer daysSinceNoncurrentTime; + private final OffsetDateTime noncurrentTimeBefore; + private final OffsetDateTime customTimeBefore; + private final Integer daysSinceCustomTime; + private final List matchesPrefix; + private final List matchesSuffix; + + private LifecycleCondition(Builder builder) { + this.age = builder.age; + this.createdBefore = builder.createdBefore; + this.numberOfNewerVersions = builder.numberOfNewerVersions; + this.isLive = builder.isLive; + this.matchesStorageClass = builder.matchesStorageClass; + this.daysSinceNoncurrentTime = builder.daysSinceNoncurrentTime; + this.noncurrentTimeBefore = builder.noncurrentTimeBefore; + this.customTimeBefore = builder.customTimeBefore; + this.daysSinceCustomTime = builder.daysSinceCustomTime; + this.matchesPrefix = builder.matchesPrefix; + this.matchesSuffix = builder.matchesSuffix; + } + + public Builder toBuilder() { + return newBuilder() + .setAge(this.age) + .setCreatedBeforeOffsetDateTime(this.createdBefore) + .setNumberOfNewerVersions(this.numberOfNewerVersions) + .setIsLive(this.isLive) + .setMatchesStorageClass(this.matchesStorageClass) + .setDaysSinceNoncurrentTime(this.daysSinceNoncurrentTime) + .setNoncurrentTimeBeforeOffsetDateTime(this.noncurrentTimeBefore) + .setCustomTimeBeforeOffsetDateTime(this.customTimeBefore) + .setDaysSinceCustomTime(this.daysSinceCustomTime) + .setMatchesPrefix(this.matchesPrefix) + .setMatchesSuffix(this.matchesSuffix); + } + + public static Builder newBuilder() { + return new Builder(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("age", age) + .add("createBefore", createdBefore) + .add("numberofNewerVersions", numberOfNewerVersions) + .add("isLive", isLive) + .add("matchesStorageClass", matchesStorageClass) + .add("daysSinceNoncurrentTime", daysSinceNoncurrentTime) + .add("noncurrentTimeBefore", noncurrentTimeBefore) + .add("customTimeBefore", customTimeBefore) + .add("daysSinceCustomTime", daysSinceCustomTime) + .add("matchesPrefix", matchesPrefix) + .add("matchesSuffix", matchesSuffix) + .toString(); + } + + public Integer getAge() { + return age; + } + + /** + * @deprecated Use {@link #getCreatedBeforeOffsetDateTime()} + */ + @Deprecated + public DateTime getCreatedBefore() { + return Utils.dateTimeCodec.nullable().encode(createdBefore); + } + + /** + * Returns the date and offset from UTC for this condition. If a time other than 00:00:00.000 + * is present in the value, GCS will truncate to 00:00:00.000. + */ + public OffsetDateTime getCreatedBeforeOffsetDateTime() { + return createdBefore; + } + + public Integer getNumberOfNewerVersions() { + return numberOfNewerVersions; + } + + public Boolean getIsLive() { + return isLive; + } + + public List getMatchesStorageClass() { + return matchesStorageClass; + } + + /** Returns the number of days elapsed since the noncurrent timestamp of an object. */ + public Integer getDaysSinceNoncurrentTime() { + return daysSinceNoncurrentTime; + } + + /** + * Returns the date in RFC 3339 format with only the date part (for instance, "2013-01-15"). + * + * @deprecated Use {@link #getNoncurrentTimeBeforeOffsetDateTime()} + */ + @Deprecated + public DateTime getNoncurrentTimeBefore() { + return Utils.dateTimeCodec.nullable().encode(noncurrentTimeBefore); + } + + /** + * Returns the date and offset from UTC for this condition. If a time other than 00:00:00.000 + * is present in the value, GCS will truncate to 00:00:00.000. + */ + public OffsetDateTime getNoncurrentTimeBeforeOffsetDateTime() { + return noncurrentTimeBefore; + } + + /** + * Returns the date in RFC 3339 format with only the date part (for instance, "2013-01-15"). + * + * @deprecated Use {@link #getCustomTimeBeforeOffsetDateTime()} + */ + @Deprecated + public DateTime getCustomTimeBefore() { + return Utils.dateTimeCodec.nullable().encode(customTimeBefore); + } + + /** + * Returns the date and offset from UTC for this condition. If a time other than 00:00:00.000 + * is present in the value, GCS will truncate to 00:00:00.000. + */ + public OffsetDateTime getCustomTimeBeforeOffsetDateTime() { + return customTimeBefore; + } + + /** Returns the number of days elapsed since the user-specified timestamp set on an object. */ + public Integer getDaysSinceCustomTime() { + return daysSinceCustomTime; + } + + public List getMatchesPrefix() { + return matchesPrefix; + } + + public List getMatchesSuffix() { + return matchesSuffix; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LifecycleCondition)) { + return false; + } + LifecycleCondition that = (LifecycleCondition) o; + return Objects.equals(age, that.age) + && Objects.equals(createdBefore, that.createdBefore) + && Objects.equals(numberOfNewerVersions, that.numberOfNewerVersions) + && Objects.equals(isLive, that.isLive) + && Objects.equals(matchesStorageClass, that.matchesStorageClass) + && Objects.equals(daysSinceNoncurrentTime, that.daysSinceNoncurrentTime) + && Objects.equals(noncurrentTimeBefore, that.noncurrentTimeBefore) + && Objects.equals(customTimeBefore, that.customTimeBefore) + && Objects.equals(daysSinceCustomTime, that.daysSinceCustomTime) + && Objects.equals(matchesPrefix, that.matchesPrefix) + && Objects.equals(matchesSuffix, that.matchesSuffix); + } + + @Override + public int hashCode() { + return Objects.hash( + age, + createdBefore, + numberOfNewerVersions, + isLive, + matchesStorageClass, + daysSinceNoncurrentTime, + noncurrentTimeBefore, + customTimeBefore, + daysSinceCustomTime, + matchesPrefix, + matchesSuffix); + } + + /** Builder for {@code LifecycleCondition}. */ + public static class Builder { + private Integer age; + private OffsetDateTime createdBefore; + private Integer numberOfNewerVersions; + private Boolean isLive; + private List matchesStorageClass; + private Integer daysSinceNoncurrentTime; + private OffsetDateTime noncurrentTimeBefore; + private OffsetDateTime customTimeBefore; + private Integer daysSinceCustomTime; + private List matchesPrefix; + private List matchesSuffix; + + private Builder() {} + + /** + * Sets the age in days. This condition is satisfied when a Blob reaches the specified age + * (in days). When you specify the Age condition, you are specifying a Time to Live (TTL) + * for objects in a bucket with lifecycle management configured. The time when the Age + * condition is considered to be satisfied is calculated by adding the specified value to + * the object creation time. + */ + public Builder setAge(Integer age) { + this.age = age; + return this; + } + + /** + * Sets the date a Blob should be created before for an Action to be executed. Note that + * only the date will be considered, if the time is specified it will be truncated. This + * condition is satisfied when an object is created before midnight of the specified date in + * UTC. + * + * @deprecated Use {@link #setCreatedBeforeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setCreatedBefore(DateTime createdBefore) { + return setCreatedBeforeOffsetDateTime( + Utils.dateTimeCodec.nullable().decode(createdBefore)); + } + + /** + * Sets the date a Blob should be created before for an Action to be executed. Note that + * only the date will be considered, if the time is specified it will be truncated. This + * condition is satisfied when an object is created before midnight of the specified date in + * UTC. + */ + public Builder setCreatedBeforeOffsetDateTime(OffsetDateTime createdBefore) { + this.createdBefore = createdBefore; + return this; + } + + /** + * Sets the number of newer versions a Blob should have for an Action to be executed. + * Relevant only when versioning is enabled on a bucket. * + */ + public Builder setNumberOfNewerVersions(Integer numberOfNewerVersions) { + this.numberOfNewerVersions = numberOfNewerVersions; + return this; + } + + /** + * Sets an isLive Boolean condition. If the value is true, this lifecycle condition matches + * only live Blobs; if the value is false, it matches only archived objects. For the + * purposes of this condition, Blobs in non-versioned buckets are considered live. + */ + public Builder setIsLive(Boolean live) { + this.isLive = live; + return this; + } + + /** + * Sets a list of Storage Classes for a objects that satisfy the condition to execute the + * Action. * + */ + public Builder setMatchesStorageClass(List matchesStorageClass) { + this.matchesStorageClass = matchesStorageClass; + return this; + } + + /** + * Sets the number of days elapsed since the noncurrent timestamp of an object. The + * condition is satisfied if the days elapsed is at least this number. This condition is + * relevant only for versioned objects. The value of the field must be a nonnegative + * integer. If it's zero, the object version will become eligible for Lifecycle action as + * soon as it becomes noncurrent. + */ + public Builder setDaysSinceNoncurrentTime(Integer daysSinceNoncurrentTime) { + this.daysSinceNoncurrentTime = daysSinceNoncurrentTime; + return this; + } + + /** + * Sets the date in RFC 3339 format with only the date part (for instance, "2013-01-15"). + * Note that only date part will be considered, if the time is specified it will be + * truncated. This condition is satisfied when the noncurrent time on an object is before + * this date. This condition is relevant only for versioned objects. + * + * @deprecated Use {@link #setNoncurrentTimeBeforeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setNoncurrentTimeBefore(DateTime noncurrentTimeBefore) { + return setNoncurrentTimeBeforeOffsetDateTime( + Utils.dateTimeCodec.nullable().decode(noncurrentTimeBefore)); + } + + /** + * Sets the date with only the date part (for instance, "2013-01-15"). Note that only date + * part will be considered, if the time is specified it will be truncated. This condition is + * satisfied when the noncurrent time on an object is before this date. This condition is + * relevant only for versioned objects. + */ + public Builder setNoncurrentTimeBeforeOffsetDateTime(OffsetDateTime noncurrentTimeBefore) { + this.noncurrentTimeBefore = noncurrentTimeBefore; + return this; + } + + /** + * Sets the date in RFC 3339 format with only the date part (for instance, "2013-01-15"). + * Note that only date part will be considered, if the time is specified it will be + * truncated. This condition is satisfied when the custom time on an object is before this + * date in UTC. + * + * @deprecated Use {@link #setCustomTimeBeforeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setCustomTimeBefore(DateTime customTimeBefore) { + return setCustomTimeBeforeOffsetDateTime( + Utils.dateTimeCodec.nullable().decode(customTimeBefore)); + } + + /** + * Sets the date with only the date part (for instance, "2013-01-15"). Note that only date + * part will be considered, if the time is specified it will be truncated. This condition is + * satisfied when the custom time on an object is before this date in UTC. + */ + public Builder setCustomTimeBeforeOffsetDateTime(OffsetDateTime customTimeBefore) { + this.customTimeBefore = customTimeBefore; + return this; + } + + /** + * Sets the number of days elapsed since the user-specified timestamp set on an object. The + * condition is satisfied if the days elapsed is at least this number. If no custom + * timestamp is specified on an object, the condition does not apply. + */ + public Builder setDaysSinceCustomTime(Integer daysSinceCustomTime) { + this.daysSinceCustomTime = daysSinceCustomTime; + return this; + } + + /** + * Sets the list of prefixes. If any prefix matches the beginning of the object’s name, this + * portion of the condition is satisfied for that object. + */ + public Builder setMatchesPrefix(List matchesPrefix) { + this.matchesPrefix = matchesPrefix != null ? ImmutableList.copyOf(matchesPrefix) : null; + return this; + } + + /** + * Sets the list of suffixes. If any suffix matches the end of the object’s name, this + * portion of the condition is satisfied for that object. + */ + public Builder setMatchesSuffix(List matchesSuffix) { + this.matchesSuffix = matchesSuffix != null ? ImmutableList.copyOf(matchesSuffix) : null; + return this; + } + + /** Builds a {@code LifecycleCondition} object. * */ + public LifecycleCondition build() { + return new LifecycleCondition(this); + } + } + } + + /** + * Base class for the Action to take when a Lifecycle Condition is met. Supported Actions are + * expressed as subclasses of this class, accessed by static factory methods. + */ + public static class LifecycleAction implements Serializable { + private static final long serialVersionUID = -816170697779323819L; + + private final String actionType; + + public LifecycleAction(String actionType) { + this.actionType = actionType; + } + + public String getActionType() { + return actionType; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("actionType", getActionType()).toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LifecycleAction)) { + return false; + } + LifecycleAction that = (LifecycleAction) o; + return Objects.equals(actionType, that.actionType); + } + + @Override + public int hashCode() { + return Objects.hash(actionType); + } + + /** + * Creates a new {@code DeleteLifecycleAction}. Blobs that meet the Condition associated with + * this action will be deleted. + */ + public static DeleteLifecycleAction newDeleteAction() { + return new DeleteLifecycleAction(); + } + + /** + * Creates a new {@code SetStorageClassLifecycleAction}. A Blob's storage class that meets the + * action's conditions will be changed to the specified storage class. + * + * @param storageClass The new storage class to use when conditions are met for this action. + */ + public static SetStorageClassLifecycleAction newSetStorageClassAction( + @NonNull StorageClass storageClass) { + return new SetStorageClassLifecycleAction(storageClass); + } + + /** + * Create a new {@code AbortIncompleteMPUAction}. An incomplete multipart upload will be + * aborted when the multipart upload meets the specified condition. Age is the only condition + * supported for this action. See: https://cloud.google.com/storage/docs/lifecycle##abort-mpu + */ + public static LifecycleAction newAbortIncompleteMPUploadAction() { + return new AbortIncompleteMPUAction(); + } + + /** + * Creates a new {@code LifecycleAction}, with no specific supported action associated with + * it. This is only intended as a "backup" for when the library doesn't recognize the type, + * and should generally not be used, instead use the supported actions, and upgrade the + * library if necessary to get new supported actions. + */ + public static LifecycleAction newLifecycleAction(@NonNull String actionType) { + return new LifecycleAction(actionType); + } + } + + public static class DeleteLifecycleAction extends LifecycleAction { + public static final String TYPE = "Delete"; + private static final long serialVersionUID = 4235058923106460876L; + + private DeleteLifecycleAction() { + super(TYPE); + } + } + + public static class SetStorageClassLifecycleAction extends LifecycleAction { + public static final String TYPE = "SetStorageClass"; + private static final long serialVersionUID = 1235008830965208895L; + + private final StorageClass storageClass; + + private SetStorageClassLifecycleAction(StorageClass storageClass) { + super(TYPE); + this.storageClass = storageClass; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("actionType", getActionType()) + .add("storageClass", storageClass.name()) + .toString(); + } + + public StorageClass getStorageClass() { + return storageClass; + } + } + + public static class AbortIncompleteMPUAction extends LifecycleAction { + public static final String TYPE = "AbortIncompleteMultipartUpload"; + private static final long serialVersionUID = 8158049841366366988L; + + private AbortIncompleteMPUAction() { + super(TYPE); + } + } + } + + /** + * Base class for bucket's delete rules. Allows to configure automatic deletion of blobs and blobs + * versions. + * + * @see Object Lifecycle Management + * @deprecated Use a {@code LifecycleRule} with a {@code DeleteLifecycleAction} and a {@code + * LifecycleCondition} which is equivalent to a subclass of DeleteRule instead. + */ + @Deprecated + public abstract static class DeleteRule implements Serializable { + + private static final long serialVersionUID = -2831684017163653163L; + static final String SUPPORTED_ACTION = "Delete"; + private final Type type; + + public enum Type { + AGE, + CREATE_BEFORE, + NUM_NEWER_VERSIONS, + IS_LIVE, + UNKNOWN + } + + DeleteRule(Type type) { + this.type = type; + } + + public Type getType() { + return type; + } + + @Override + public int hashCode() { + return Objects.hash(type); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DeleteRule)) { + return false; + } + DeleteRule that = (DeleteRule) o; + return type == that.type; + } + } + + /** + * Delete rule class that sets a Time To Live for blobs in the bucket. + * + * @see Object Lifecycle Management + * @deprecated Use a {@code LifecycleRule} with a {@code DeleteLifecycleAction} and use {@code + * LifecycleCondition.Builder.setAge} instead. + *

For example, {@code new DeleteLifecycleAction(1)} is equivalent to {@code new + * LifecycleRule( LifecycleAction.newDeleteAction(), + * LifecycleCondition.newBuilder().setAge(1).build()))} + */ + @Deprecated + public static class AgeDeleteRule extends DeleteRule { + + private static final long serialVersionUID = 8655342969048652720L; + private final int daysToLive; + + /** + * Creates an {@code AgeDeleteRule} object. + * + * @param daysToLive blobs' Time To Live expressed in days. The time when the age condition is + * considered to be satisfied is computed by adding {@code daysToLive} days to the midnight + * following blob's creation time in UTC. + */ + public AgeDeleteRule(int daysToLive) { + super(Type.AGE); + this.daysToLive = daysToLive; + } + + public int getDaysToLive() { + return daysToLive; + } + } + + static class RawDeleteRule extends DeleteRule { + + private static final long serialVersionUID = -3490275955461147025L; + + private transient Rule rule; + + RawDeleteRule(Rule rule) { + super(Type.UNKNOWN); + this.rule = rule; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + out.writeUTF(rule.toString()); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + rule = new JacksonFactory().fromString(in.readUTF(), Rule.class); + } + + Rule getRule() { + return rule; + } + } + + /** + * Delete rule class for blobs in the bucket that have been created before a certain date. + * + * @see Object Lifecycle Management + * @deprecated Use a {@code LifecycleRule} with an action {@code DeleteLifecycleAction} and a + * condition {@code LifecycleCondition.Builder.setCreatedBefore} instead. + */ + @Deprecated + public static class CreatedBeforeDeleteRule extends DeleteRule { + + private static final long serialVersionUID = -2941931783781989505L; + private final OffsetDateTime time; + + /** + * Creates an {@code CreatedBeforeDeleteRule} object. + * + * @param timeMillis a date in UTC. Blobs that have been created before midnight of the provided + * date meet the delete condition + * @deprecated Use {@link #CreatedBeforeDeleteRule(OffsetDateTime)} instead + */ + @Deprecated + public CreatedBeforeDeleteRule(long timeMillis) { + this(millisUtcCodec.encode(timeMillis)); + } + + /** + * Creates an {@code CreatedBeforeDeleteRule} object. + * + * @param time Blobs that have been created before midnight of the provided date meet the delete + * condition + */ + public CreatedBeforeDeleteRule(OffsetDateTime time) { + super(Type.CREATE_BEFORE); + this.time = time; + } + + /** + * @deprecated {@link #getTime()} + */ + @Deprecated + public long getTimeMillis() { + return millisUtcCodec.decode(time); + } + + public OffsetDateTime getTime() { + return time; + } + } + + /** + * Delete rule class for versioned blobs. Specifies when to delete a blob's version according to + * the number of available newer versions for that blob. + * + * @see Object Lifecycle Management + * @deprecated Use a {@code LifecycleRule} with a {@code DeleteLifecycleAction} and a condition + * {@code LifecycleCondition.Builder.setNumberOfNewerVersions} instead. + */ + @Deprecated + public static class NumNewerVersionsDeleteRule extends DeleteRule { + + private static final long serialVersionUID = 8984956956307794724L; + private final int numNewerVersions; + + /** + * Creates an {@code NumNewerVersionsDeleteRule} object. + * + * @param numNewerVersions the number of newer versions. A blob's version meets the delete + * condition when {@code numNewerVersions} newer versions are available. + */ + public NumNewerVersionsDeleteRule(int numNewerVersions) { + super(Type.NUM_NEWER_VERSIONS); + this.numNewerVersions = numNewerVersions; + } + + public int getNumNewerVersions() { + return numNewerVersions; + } + } + + /** + * Delete rule class to distinguish between live and archived blobs. + * + * @see Object Lifecycle Management + * @deprecated Use a {@code LifecycleRule} with a {@code DeleteLifecycleAction} and a condition + * {@code LifecycleCondition.Builder.setIsLive} instead. + */ + @Deprecated + public static class IsLiveDeleteRule extends DeleteRule { + + private static final long serialVersionUID = 6769701586197631153L; + private final boolean isLive; + + /** + * Creates an {@code IsLiveDeleteRule} object. + * + * @param isLive if set to {@code true} live blobs meet the delete condition. If set to {@code + * false} delete condition is met by archived blobs. + */ + public IsLiveDeleteRule(boolean isLive) { + super(Type.IS_LIVE); + this.isLive = isLive; + } + + public boolean isLive() { + return isLive; + } + } + + /** + * A buckets IP + * filtering configuration. Specifies the network sources that can access the bucket, as well + * as its underlying objects. + * + * @since 2.54.0 + */ + @Immutable + public static final class IpFilter implements Serializable { + private static final long serialVersionUID = 3883696370256011372L; + private final @Nullable String mode; + private final @Nullable PublicNetworkSource publicNetworkSource; + private final @Nullable List vpcNetworkSources; + private final @Nullable Boolean allowCrossOrgVpcs; + private final @Nullable Boolean allowAllServiceAgentAccess; + + private IpFilter( + @Nullable String mode, + @Nullable PublicNetworkSource publicNetworkSource, + @Nullable List vpcNetworkSources, + @Nullable Boolean allowCrossOrgVpcs, + @Nullable Boolean allowAllServiceAgentAccess) { + this.mode = mode; + this.publicNetworkSource = publicNetworkSource; + this.vpcNetworkSources = vpcNetworkSources; + this.allowCrossOrgVpcs = allowCrossOrgVpcs; + this.allowAllServiceAgentAccess = allowAllServiceAgentAccess; + } + + /** + * The state of the IP filter configuration. Valid values are `Enabled` and `Disabled`. When set + * to `Enabled`, IP filtering rules are applied to a bucket and all incoming requests to the + * bucket are evaluated against these rules. When set to `Disabled`, IP filtering rules are not + * applied to a bucket. + * + * @since 2.54.0 + * @see Builder#setMode + */ + public @Nullable String getMode() { + return mode; + } + + /** + * Optional. Public IPs allowed to operate or access the bucket. + * + * @since 2.54.0 + * @see Builder#setPublicNetworkSource(PublicNetworkSource) + */ + public @Nullable PublicNetworkSource getPublicNetworkSource() { + return publicNetworkSource; + } + + /** + * Optional. The list of network sources that are allowed to access operations on the bucket or + * the underlying objects. + * + * @since 2.54.0 + * @see Builder#setVpcNetworkSources(List) + */ + public @Nullable List getVpcNetworkSources() { + return vpcNetworkSources; + } + + /** + * Optional. Whether or not to allow VPCs from orgs different than the bucket's parent org to + * access the bucket. When set to true, validations on the existence of the VPCs won't be + * performed. If set to false, each VPC network source will be checked to belong to the same org + * as the bucket as well as validated for existence. + * + * @since 2.54.0 + * @see Builder#setAllowCrossOrgVpcs(Boolean) + */ + public @Nullable Boolean getAllowCrossOrgVpcs() { + return allowCrossOrgVpcs; + } + + /** + * Whether or not to allow all P4SA access to the bucket. When set to true, IP filter config + * validation will not apply. + * + * @since 2.54.0 + * @see Builder#setAllowAllServiceAgentAccess(Boolean) + */ + public @Nullable Boolean getAllowAllServiceAgentAccess() { + return allowAllServiceAgentAccess; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof IpFilter)) { + return false; + } + IpFilter ipFilter = (IpFilter) o; + return Objects.equals(mode, ipFilter.mode) + && Objects.equals(publicNetworkSource, ipFilter.publicNetworkSource) + && Objects.equals(vpcNetworkSources, ipFilter.vpcNetworkSources) + && Objects.equals(allowCrossOrgVpcs, ipFilter.allowCrossOrgVpcs) + && Objects.equals(allowAllServiceAgentAccess, ipFilter.allowAllServiceAgentAccess); + } + + @Override + public int hashCode() { + return Objects.hash( + mode, + publicNetworkSource, + vpcNetworkSources, + allowCrossOrgVpcs, + allowAllServiceAgentAccess); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("mode", mode) + .add("publicNetworkSource", publicNetworkSource) + .add("vpcNetworkSources", vpcNetworkSources) + .add("allowCrossOrgVpcs", allowCrossOrgVpcs) + .add("allowAllServiceAgentAccess", allowAllServiceAgentAccess) + .toString(); + } + + /** + * @since 2.54.0 + */ + public Builder toBuilder() { + return new Builder(this); + } + + /** + * @since 2.54.0 + */ + public static Builder newBuilder() { + return new Builder(); + } + + /** + * @since 2.54.0 + */ + public static final class Builder { + private @Nullable String mode; + private @Nullable PublicNetworkSource publicNetworkSource; + private @Nullable List vpcNetworkSources; + private @Nullable Boolean allowCrossOrgVpcs; + private @Nullable Boolean allowAllServiceAgentAccess; + + private Builder() {} + + private Builder(IpFilter ipFilter) { + this.mode = ipFilter.mode; + this.publicNetworkSource = ipFilter.publicNetworkSource; + this.vpcNetworkSources = ipFilter.vpcNetworkSources; + this.allowCrossOrgVpcs = ipFilter.allowCrossOrgVpcs; + this.allowAllServiceAgentAccess = ipFilter.allowAllServiceAgentAccess; + } + + /** + * The state of the IP filter configuration. Valid values are `Enabled` and `Disabled`. When + * set to `Enabled`, IP filtering rules are applied to a bucket and all incoming requests to + * the bucket are evaluated against these rules. When set to `Disabled`, IP filtering rules + * are not applied to a bucket. + * + * @since 2.54.0 + * @see IpFilter#getMode + */ + public Builder setMode(@Nullable String mode) { + this.mode = mode; + return this; + } + + /** + * Optional. Public IPs allowed to operate or access the bucket. + * + * @since 2.54.0 + * @see IpFilter#getPublicNetworkSource() + */ + public Builder setPublicNetworkSource(@Nullable PublicNetworkSource publicNetworkSource) { + this.publicNetworkSource = publicNetworkSource; + return this; + } + + /** + * Optional. The list of network sources that are allowed to access operations on the bucket + * or the underlying objects. + * + * @since 2.54.0 + * @see IpFilter#getVpcNetworkSources() + */ + public Builder setVpcNetworkSources(@Nullable List vpcNetworkSources) { + this.vpcNetworkSources = vpcNetworkSources; + return this; + } + + /** + * Optional. Whether or not to allow VPCs from orgs different than the bucket's parent org to + * access the bucket. When set to true, validations on the existence of the VPCs won't be + * performed. If set to false, each VPC network source will be checked to belong to the same + * org as the bucket as well as validated for existence. + * + * @since 2.54.0 + * @see IpFilter#getAllowCrossOrgVpcs() + */ + public Builder setAllowCrossOrgVpcs(@Nullable Boolean allowCrossOrgVpcs) { + this.allowCrossOrgVpcs = allowCrossOrgVpcs; + return this; + } + + /** + * Whether or not to allow all P4SA access to the bucket. When set to true, IP filter config + * validation will not apply. + * + * @since 2.54.0 + * @see IpFilter#getAllowAllServiceAgentAccess() + */ + public Builder setAllowAllServiceAgentAccess(@Nullable Boolean allowAllServiceAgentAccess) { + this.allowAllServiceAgentAccess = allowAllServiceAgentAccess; + return this; + } + + /** + * @since 2.54.0 + */ + public IpFilter build() { + return new IpFilter( + mode, + publicNetworkSource, + vpcNetworkSources, + allowCrossOrgVpcs, + allowAllServiceAgentAccess); + } + } + + /** + * The public network IP address ranges that can access the bucket and its data. + * + * @since 2.54.0 + */ + @Immutable + public static final class PublicNetworkSource implements Serializable { + private static final long serialVersionUID = -5597599591237060501L; + + private final List allowedIpCidrRanges; + + private PublicNetworkSource(List allowedIpCidrRanges) { + this.allowedIpCidrRanges = allowedIpCidrRanges; + } + + /** + * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to operate or access the + * bucket and its underlying objects. + * + * @since 2.54.0 + */ + public List getAllowedIpCidrRanges() { + return allowedIpCidrRanges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PublicNetworkSource)) { + return false; + } + PublicNetworkSource that = (PublicNetworkSource) o; + return Objects.equals(allowedIpCidrRanges, that.allowedIpCidrRanges); + } + + @Override + public int hashCode() { + return Objects.hashCode(allowedIpCidrRanges); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("allowedIpCidrRanges", allowedIpCidrRanges) + .toString(); + } + + /** + * @since 2.54.0 + */ + public static PublicNetworkSource of(List allowedIpCidrRanges) { + return new PublicNetworkSource(allowedIpCidrRanges); + } + } + + /** + * The list of VPC networks that can access the bucket. + * + * @since 2.54.0 + */ + @Immutable + public static final class VpcNetworkSource implements Serializable { + private static final long serialVersionUID = 9075759536253054803L; + private final @Nullable String network; + private final @Nullable List allowedIpCidrRanges; + + private VpcNetworkSource( + @Nullable String network, @Nullable List allowedIpCidrRanges) { + this.network = network; + this.allowedIpCidrRanges = allowedIpCidrRanges; + } + + /** + * Name of the network. + * + *

Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME` + * + * @since 2.54.0 + * @see Builder#setNetwork(String) + */ + public @Nullable String getNetwork() { + return network; + } + + /** + * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that can access the + * bucket. In the CIDR IP address block, the specified IP address must be properly truncated, + * meaning all the host bits must be zero or else the input is considered malformed. For + * example, `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for IPv6, + * `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not. + * + * @since 2.54.0 + * @see Builder#setAllowedIpCidrRanges(List) + */ + public @Nullable List getAllowedIpCidrRanges() { + return allowedIpCidrRanges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof VpcNetworkSource)) { + return false; + } + VpcNetworkSource that = (VpcNetworkSource) o; + return Objects.equals(network, that.network) + && Objects.equals(allowedIpCidrRanges, that.allowedIpCidrRanges); + } + + @Override + public int hashCode() { + return Objects.hash(network, allowedIpCidrRanges); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("network", network) + .add("allowedIpCidrRanges", allowedIpCidrRanges) + .toString(); + } + + /** + * @since 2.54.0 + */ + public Builder toBuilder() { + return new Builder(this); + } + + /** + * @since 2.54.0 + */ + public static Builder newBuilder() { + return new Builder(); + } + + /** + * @since 2.54.0 + */ + public static final class Builder { + private @Nullable String network; + private @Nullable List allowedIpCidrRanges; + + private Builder() {} + + public Builder(VpcNetworkSource vpcNetworksource) { + this.network = vpcNetworksource.network; + this.allowedIpCidrRanges = vpcNetworksource.allowedIpCidrRanges; + } + + /** + * Name of the network. + * + *

Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME` + * + * @since 2.54.0 + * @see VpcNetworkSource#getNetwork() + */ + public Builder setNetwork(@Nullable String network) { + this.network = network; + return this; + } + + /** + * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that can access the + * bucket. In the CIDR IP address block, the specified IP address must be properly + * truncated, meaning all the host bits must be zero or else the input is considered + * malformed. For example, `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, + * for IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not. + * + * @since 2.54.0 + * @see VpcNetworkSource#getAllowedIpCidrRanges() + */ + public Builder setAllowedIpCidrRanges(@Nullable List allowedIpCidrRanges) { + this.allowedIpCidrRanges = allowedIpCidrRanges; + return this; + } + + /** + * @since 2.54.0 + */ + public VpcNetworkSource build() { + return new VpcNetworkSource(network, allowedIpCidrRanges); + } + } + } + } + + /** + * Google Managed Encryption (GMEK) enforcement config of a bucket. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final class GoogleManagedEncryptionEnforcementConfig { + @Nullable private final EncryptionEnforcementRestrictionMode restrictionMode; + @Nullable private final OffsetDateTime effectiveTime; + + private GoogleManagedEncryptionEnforcementConfig( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + this.restrictionMode = restrictionMode; + this.effectiveTime = effectiveTime; + } + + /** + * Restriction mode for new objects within the bucket. If {@link + * EncryptionEnforcementRestrictionMode#NOT_RESTRICTED NotRestricted} or {@code null}, creation + * of new objects with google-managed encryption is allowed. If `FullyRestricted`, new objects + * can not be created using google-managed encryption. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable EncryptionEnforcementRestrictionMode getRestrictionMode() { + return restrictionMode; + } + + /** + * Output only. Time from which the config was effective. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable OffsetDateTime getEffectiveTime() { + return effectiveTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GoogleManagedEncryptionEnforcementConfig)) { + return false; + } + GoogleManagedEncryptionEnforcementConfig that = (GoogleManagedEncryptionEnforcementConfig) o; + return Objects.equals(restrictionMode, that.restrictionMode) + && Objects.equals(effectiveTime, that.effectiveTime); + } + + @Override + public int hashCode() { + return Objects.hash(restrictionMode, effectiveTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("restrictionMode", restrictionMode) + .add("effectiveTime", effectiveTime) + .toString(); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static GoogleManagedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode) { + return new GoogleManagedEncryptionEnforcementConfig(restrictionMode, null); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static GoogleManagedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + return new GoogleManagedEncryptionEnforcementConfig(restrictionMode, effectiveTime); + } + } + + /** + * Customer Managed Encryption (CMEK) enforcement config of a bucket. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final class CustomerManagedEncryptionEnforcementConfig { + @Nullable private final EncryptionEnforcementRestrictionMode restrictionMode; + @Nullable private final OffsetDateTime effectiveTime; + + private CustomerManagedEncryptionEnforcementConfig( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + this.restrictionMode = restrictionMode; + this.effectiveTime = effectiveTime; + } + + /** + * Restriction mode for new objects within the bucket. If {@link + * EncryptionEnforcementRestrictionMode#NOT_RESTRICTED NotRestricted} or {@code null}, creation + * of new objects with customer-managed encryption is allowed. If `FullyRestricted`, new objects + * can not be created using customer-managed encryption. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable EncryptionEnforcementRestrictionMode getRestrictionMode() { + return restrictionMode; + } + + /** + * Output only. Time from which the config was effective. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable OffsetDateTime getEffectiveTime() { + return effectiveTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CustomerManagedEncryptionEnforcementConfig)) { + return false; + } + CustomerManagedEncryptionEnforcementConfig that = + (CustomerManagedEncryptionEnforcementConfig) o; + return Objects.equals(restrictionMode, that.restrictionMode) + && Objects.equals(effectiveTime, that.effectiveTime); + } + + @Override + public int hashCode() { + return Objects.hash(restrictionMode, effectiveTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("restrictionMode", restrictionMode) + .add("effectiveTime", effectiveTime) + .toString(); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static CustomerManagedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode) { + return new CustomerManagedEncryptionEnforcementConfig(restrictionMode, null); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static CustomerManagedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + return new CustomerManagedEncryptionEnforcementConfig(restrictionMode, effectiveTime); + } + } + + /** + * Customer Supplied Encryption (CSEK) enforcement config of a bucket. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final class CustomerSuppliedEncryptionEnforcementConfig { + @Nullable private final EncryptionEnforcementRestrictionMode restrictionMode; + @Nullable private final OffsetDateTime effectiveTime; + + private CustomerSuppliedEncryptionEnforcementConfig( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + this.restrictionMode = restrictionMode; + this.effectiveTime = effectiveTime; + } + + /** + * Restriction mode for new objects within the bucket. If {@link + * EncryptionEnforcementRestrictionMode#NOT_RESTRICTED NotRestricted} or {@code null}, creation + * of new objects with customer-supplied encryption is allowed. If `FullyRestricted`, new + * objects can not be created using customer-supplied encryption. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable EncryptionEnforcementRestrictionMode getRestrictionMode() { + return restrictionMode; + } + + /** + * Output only. Time from which the config was effective. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public @Nullable OffsetDateTime getEffectiveTime() { + return effectiveTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CustomerSuppliedEncryptionEnforcementConfig)) { + return false; + } + CustomerSuppliedEncryptionEnforcementConfig that = + (CustomerSuppliedEncryptionEnforcementConfig) o; + return Objects.equals(restrictionMode, that.restrictionMode) + && Objects.equals(effectiveTime, that.effectiveTime); + } + + @Override + public int hashCode() { + return Objects.hash(restrictionMode, effectiveTime); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("restrictionMode", restrictionMode) + .add("effectiveTime", effectiveTime) + .toString(); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static CustomerSuppliedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode) { + return new CustomerSuppliedEncryptionEnforcementConfig(restrictionMode, null); + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static CustomerSuppliedEncryptionEnforcementConfig of( + @Nullable EncryptionEnforcementRestrictionMode restrictionMode, + @Nullable OffsetDateTime effectiveTime) { + return new CustomerSuppliedEncryptionEnforcementConfig(restrictionMode, effectiveTime); + } + } + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final class EncryptionEnforcementRestrictionMode extends StringEnumValue { + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final EncryptionEnforcementRestrictionMode NOT_RESTRICTED = + new EncryptionEnforcementRestrictionMode("NotRestricted"); + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static final EncryptionEnforcementRestrictionMode FULLY_RESTRICTED = + new EncryptionEnforcementRestrictionMode("FullyRestricted"); + + private static final StringEnumType type = + new StringEnumType<>( + EncryptionEnforcementRestrictionMode.class, EncryptionEnforcementRestrictionMode::new); + + private EncryptionEnforcementRestrictionMode(String constant) { + super(constant); + } + + /** + * Get the {@link EncryptionEnforcementRestrictionMode} for the given String constant, and throw + * an exception if the constant is not recognized. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static EncryptionEnforcementRestrictionMode valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** + * Get the {@link EncryptionEnforcementRestrictionMode} for the given String constant, and allow + * unrecognized values. + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static EncryptionEnforcementRestrictionMode valueOf(String constant) { + return type.valueOf(constant); + } + + /** + * Return the known values + * + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static EncryptionEnforcementRestrictionMode[] values() { + return type.values(); + } + } + + /** Builder for {@code BucketInfo}. */ + public abstract static class Builder { + Builder() {} + + abstract Builder setProject(BigInteger project); + + /** Sets the bucket's name. */ + public abstract Builder setName(String name); + + abstract Builder setGeneratedId(String generatedId); + + abstract Builder setOwner(Acl.Entity owner); + + abstract Builder setSelfLink(String selfLink); + + /** + * Sets whether a user accessing the bucket or an object it contains should assume the transit + * costs related to the access. + */ + public abstract Builder setRequesterPays(Boolean requesterPays); + + /** + * Sets whether versioning should be enabled for this bucket. When set to true, versioning is + * fully enabled. + */ + public abstract Builder setVersioningEnabled(Boolean enable); + + /** + * Sets the bucket's website index page. Behaves as the bucket's directory index where missing + * blobs are treated as potential directories. + */ + public abstract Builder setIndexPage(String indexPage); + + /** Sets the custom object to return when a requested resource is not found. */ + public abstract Builder setNotFoundPage(String notFoundPage); + + /** + * Sets the bucket's lifecycle configuration as a number of delete rules. + * + * @deprecated Use {@link #setLifecycleRules(Iterable)} instead, as in {@code + * setLifecycleRules(Collections.singletonList( new BucketInfo.LifecycleRule( + * LifecycleAction.newDeleteAction(), LifecycleCondition.newBuilder().setAge(5).build())));} + */ + @Deprecated + public abstract Builder setDeleteRules(Iterable rules); + + /** + * Sets the bucket's lifecycle configuration as a number of lifecycle rules, consisting of an + * action and a condition. + * + * @see Object Lifecycle + * Management + */ + public abstract Builder setLifecycleRules(Iterable rules); + + /** Deletes the lifecycle rules of this bucket. */ + public abstract Builder deleteLifecycleRules(); + + /** + * Sets the bucket's Recovery Point Objective (RPO). This can only be set for a dual-region + * bucket, and determines the speed at which data will be replicated between regions. See the + * {@code Rpo} class for supported values, and here for additional + * details. + */ + public abstract Builder setRpo(Rpo rpo); + + /** + * Sets the bucket's storage class. This defines how blobs in the bucket are stored and + * determines the SLA and the cost of storage. A list of supported values is available here. + */ + public abstract Builder setStorageClass(StorageClass storageClass); + + /** + * Sets the bucket's location. Data for blobs in the bucket resides in physical storage within + * this region or regions. A list of supported values is available here. + */ + public abstract Builder setLocation(String location); + + abstract Builder setEtag(String etag); + + /** + * @deprecated {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + abstract Builder setCreateTime(Long createTime); + + Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + // provide an implementation for source and binary compatibility which we override ourselves + setCreateTime(millisOffsetDateTimeCodec.decode(createTime)); + return this; + } + + /** + * @deprecated {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + abstract Builder setUpdateTime(Long updateTime); + + Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + // provide an implementation for source and binary compatibility which we override ourselves + setCreateTime(millisOffsetDateTimeCodec.decode(updateTime)); + return this; + } + + abstract Builder setMetageneration(Long metageneration); + + abstract Builder setLocationType(String locationType); + + /** + * Sets the bucket's Cross-Origin Resource Sharing (CORS) configuration. + * + * @see Cross-Origin Resource + * Sharing (CORS) + */ + public abstract Builder setCors(Iterable cors); + + /** + * Sets the bucket's access control configuration. + * + * @see + * About Access Control Lists + */ + public abstract Builder setAcl(Iterable acl); + + /** + * Sets the default access control configuration to apply to bucket's blobs when no other + * configuration is specified. + * + * @see + * About Access Control Lists + */ + public abstract Builder setDefaultAcl(Iterable acl); + + /** Sets the label of this bucket. */ + public abstract Builder setLabels(@Nullable Map<@NonNull String, @Nullable String> labels); + + /** Sets the default Cloud KMS key name for this bucket. */ + public abstract Builder setDefaultKmsKeyName(String defaultKmsKeyName); + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract Builder setGoogleManagedEncryptionEnforcementConfig( + GoogleManagedEncryptionEnforcementConfig config); + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract Builder setCustomerManagedEncryptionEnforcementConfig( + CustomerManagedEncryptionEnforcementConfig config); + + /** + * @since 2.55.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract Builder setCustomerSuppliedEncryptionEnforcementConfig( + CustomerSuppliedEncryptionEnforcementConfig config); + + /** Sets the default event-based hold for this bucket. */ + @BetaApi + public abstract Builder setDefaultEventBasedHold(Boolean defaultEventBasedHold); + + /** + * @deprecated {@link #setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime)} + */ + @BetaApi + @Deprecated + abstract Builder setRetentionEffectiveTime(Long retentionEffectiveTime); + + @BetaApi + Builder setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime retentionEffectiveTime) { + return setRetentionEffectiveTime(millisOffsetDateTimeCodec.decode(retentionEffectiveTime)); + } + + @BetaApi + abstract Builder setRetentionPolicyIsLocked(Boolean retentionPolicyIsLocked); + + /** + * If policy is not locked this value can be cleared, increased, and decreased. If policy is + * locked the retention period can only be increased. + * + * @deprecated Use {@link #setRetentionPeriodDuration(Duration)} + */ + @BetaApi + @Deprecated + public abstract Builder setRetentionPeriod(Long retentionPeriod); + + /** + * If policy is not locked this value can be cleared, increased, and decreased. If policy is + * locked the retention period can only be increased. + */ + @BetaApi + public Builder setRetentionPeriodDuration(Duration retentionPeriod) { + return setRetentionPeriod(nullableDurationSecondsCodec.encode(retentionPeriod)); + } + + /** + * Sets the IamConfiguration to specify whether IAM access should be enabled. + * + * @see Bucket Policy + * Only + */ + @BetaApi + public abstract Builder setIamConfiguration(IamConfiguration iamConfiguration); + + public abstract Builder setAutoclass(Autoclass autoclass); + + public abstract Builder setLogging(Logging logging); + + public abstract Builder setCustomPlacementConfig(CustomPlacementConfig customPlacementConfig); + + public abstract Builder setHierarchicalNamespace(HierarchicalNamespace hierarchicalNamespace); + + abstract Builder setObjectRetention(ObjectRetention objectRetention); + + public abstract Builder setSoftDeletePolicy(SoftDeletePolicy softDeletePolicy); + + /** + * @since 2.54.0 + */ + public abstract Builder setIpFilter(IpFilter ipFilter); + + public abstract Builder setIsUnreachable(Boolean isUnreachable); + + /** Creates a {@code BucketInfo} object. */ + public abstract BucketInfo build(); + + abstract Builder clearGeneratedId(); + + abstract Builder clearProject(); + + abstract Builder clearName(); + + abstract Builder clearOwner(); + + abstract Builder clearSelfLink(); + + abstract Builder clearRequesterPays(); + + abstract Builder clearVersioningEnabled(); + + abstract Builder clearIndexPage(); + + abstract Builder clearNotFoundPage(); + + abstract Builder clearLifecycleRules(); + + abstract Builder clearRpo(); + + abstract Builder clearStorageClass(); + + abstract Builder clearLocation(); + + abstract Builder clearEtag(); + + abstract Builder clearCreateTime(); + + abstract Builder clearUpdateTime(); + + abstract Builder clearMetageneration(); + + abstract Builder clearCors(); + + abstract Builder clearAcl(); + + abstract Builder clearDefaultAcl(); + + abstract Builder clearLabels(); + + abstract Builder clearDefaultKmsKeyName(); + + abstract Builder clearDefaultEventBasedHold(); + + abstract Builder clearRetentionEffectiveTime(); + + abstract Builder clearRetentionPolicyIsLocked(); + + abstract Builder clearRetentionPeriod(); + + abstract Builder clearIamConfiguration(); + + abstract Builder clearLocationType(); + + abstract Builder clearLogging(); + + abstract Builder clearCustomPlacementConfig(); + + abstract Builder clearIpFilter(); + + abstract Builder clearGoogleManagedEncryptionEnforcementConfig(); + + abstract Builder clearCustomerManagedEncryptionEnforcementConfig(); + + abstract Builder clearCustomerSuppliedEncryptionEnforcementConfig(); + + abstract Builder clearIsUnreachable(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private BigInteger project; + private String name; + private Acl.Entity owner; + private String selfLink; + private Boolean requesterPays; + private Boolean versioningEnabled; + private String indexPage; + private String notFoundPage; + @Nullable private List lifecycleRules; + private Rpo rpo; + private StorageClass storageClass; + private String location; + private String etag; + private OffsetDateTime createTime; + private OffsetDateTime updateTime; + private Long metageneration; + private List cors; + private List acl; + private List defaultAcl; + private Map labels; + private String defaultKmsKeyName; + private Boolean defaultEventBasedHold; + private OffsetDateTime retentionEffectiveTime; + private Boolean retentionPolicyIsLocked; + private Duration retentionPeriod; + private IamConfiguration iamConfiguration; + private Autoclass autoclass; + private String locationType; + private Logging logging; + private CustomPlacementConfig customPlacementConfig; + private ObjectRetention objectRetention; + + private SoftDeletePolicy softDeletePolicy; + private HierarchicalNamespace hierarchicalNamespace; + private IpFilter ipFilter; + private GoogleManagedEncryptionEnforcementConfig googleManagedEncryptionEnforcementConfig; + private CustomerManagedEncryptionEnforcementConfig customerManagedEncryptionEnforcementConfig; + private CustomerSuppliedEncryptionEnforcementConfig customerSuppliedEncryptionEnforcementConfig; + private Boolean isUnreachable; + private final ImmutableSet.Builder modifiedFields = ImmutableSet.builder(); + + BuilderImpl(String name) { + this.name = name; + } + + BuilderImpl(BucketInfo bucketInfo) { + generatedId = bucketInfo.generatedId; + project = bucketInfo.project; + name = bucketInfo.name; + etag = bucketInfo.etag; + createTime = bucketInfo.createTime; + updateTime = bucketInfo.updateTime; + metageneration = bucketInfo.metageneration; + location = bucketInfo.location; + rpo = bucketInfo.rpo; + storageClass = bucketInfo.storageClass; + cors = bucketInfo.cors; + acl = bucketInfo.acl; + defaultAcl = bucketInfo.defaultAcl; + owner = bucketInfo.owner; + selfLink = bucketInfo.selfLink; + versioningEnabled = bucketInfo.versioningEnabled; + indexPage = bucketInfo.indexPage; + notFoundPage = bucketInfo.notFoundPage; + lifecycleRules = bucketInfo.lifecycleRules; + labels = bucketInfo.labels; + requesterPays = bucketInfo.requesterPays; + defaultKmsKeyName = bucketInfo.defaultKmsKeyName; + defaultEventBasedHold = bucketInfo.defaultEventBasedHold; + retentionEffectiveTime = bucketInfo.retentionEffectiveTime; + retentionPolicyIsLocked = bucketInfo.retentionPolicyIsLocked; + retentionPeriod = bucketInfo.retentionPeriod; + iamConfiguration = bucketInfo.iamConfiguration; + autoclass = bucketInfo.autoclass; + locationType = bucketInfo.locationType; + logging = bucketInfo.logging; + customPlacementConfig = bucketInfo.customPlacementConfig; + objectRetention = bucketInfo.objectRetention; + softDeletePolicy = bucketInfo.softDeletePolicy; + hierarchicalNamespace = bucketInfo.hierarchicalNamespace; + ipFilter = bucketInfo.ipFilter; + googleManagedEncryptionEnforcementConfig = + bucketInfo.googleManagedEncryptionEnforcementConfig; + customerManagedEncryptionEnforcementConfig = + bucketInfo.customerManagedEncryptionEnforcementConfig; + customerSuppliedEncryptionEnforcementConfig = + bucketInfo.customerSuppliedEncryptionEnforcementConfig; + isUnreachable = bucketInfo.isUnreachable; + } + + @Override + public Builder setName(String name) { + this.name = checkNotNull(name); + return this; + } + + @Override + Builder setProject(BigInteger project) { + if (!Objects.equals(this.project, project)) { + modifiedFields.add(BucketField.PROJECT); + } + this.project = project; + return this; + } + + @Override + Builder setGeneratedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + Builder setOwner(Acl.Entity owner) { + if (!Objects.equals(this.owner, owner)) { + modifiedFields.add(BucketField.OWNER); + } + this.owner = owner; + return this; + } + + @Override + Builder setSelfLink(String selfLink) { + this.selfLink = selfLink; + return this; + } + + @Override + public Builder setVersioningEnabled(Boolean enable) { + Boolean tmp = firstNonNull(enable, Data.nullOf(Boolean.class)); + if (!Objects.equals(this.versioningEnabled, tmp)) { + modifiedFields.add(BucketField.VERSIONING); + } + this.versioningEnabled = tmp; + return this; + } + + @Override + public Builder setRequesterPays(Boolean enable) { + Boolean tmp = firstNonNull(enable, Data.nullOf(Boolean.class)); + if (!Objects.equals(this.requesterPays, tmp)) { + modifiedFields.add(BucketField.BILLING); + } + this.requesterPays = tmp; + return this; + } + + @Override + public Builder setIndexPage(String indexPage) { + if (!Objects.equals(this.indexPage, indexPage)) { + modifiedFields.add(BucketField.WEBSITE); + } + this.indexPage = indexPage; + return this; + } + + @Override + public Builder setNotFoundPage(String notFoundPage) { + if (!Objects.equals(this.notFoundPage, notFoundPage)) { + modifiedFields.add(BucketField.WEBSITE); + } + this.notFoundPage = notFoundPage; + return this; + } + + /** + * @deprecated Use {@code setLifecycleRules} method instead. * + */ + @Override + @Deprecated + public Builder setDeleteRules(Iterable rules) { + // if the provided rules are null or empty clear all current delete rules + if (rules == null) { + return clearDeleteLifecycleRules(); + } else { + ArrayList deleteRules = newArrayList(rules); + if (deleteRules.isEmpty()) { + if (lifecycleRules != null) { + return clearDeleteLifecycleRules(); + } else { + lifecycleRules = ImmutableList.of(); + return this; + } + } else { + // if the provided rules are non-empty, replace all existing delete rules + + Stream newDeleteRules = + deleteRules.stream().map(BackwardCompatibilityUtils.deleteRuleCodec::encode); + + // if our current lifecycleRules are null, set to the newDeleteRules + if (lifecycleRules == null) { + return setLifecycleRules(newDeleteRules.collect(ImmutableList.toImmutableList())); + } else { + // if lifecycleRules is non-null, filter out existing delete rules, then add our new + // ones + ImmutableList newLifecycleRules = + Streams.concat( + lifecycleRules.stream().filter(IS_DELETE_LIFECYCLE_RULE.negate()), + newDeleteRules) + .collect(ImmutableList.toImmutableList()); + return setLifecycleRules(newLifecycleRules); + } + } + } + } + + @SuppressWarnings("unchecked") + @Override + public Builder setLifecycleRules(Iterable rules) { + final ImmutableList tmp; + if (rules != null) { + if (rules instanceof ImmutableList) { + tmp = (ImmutableList) rules; + } else { + tmp = ImmutableList.copyOf(rules); + } + } else { + tmp = ImmutableList.of(); + } + if (!Objects.equals(this.lifecycleRules, tmp)) { + modifiedFields.add(BucketField.LIFECYCLE); + } + this.lifecycleRules = tmp; + return this; + } + + @Override + public Builder deleteLifecycleRules() { + return setLifecycleRules(null); + } + + @Override + public Builder setRpo(Rpo rpo) { + if (!Objects.equals(this.rpo, rpo)) { + modifiedFields.add(BucketField.RPO); + } + this.rpo = rpo; + return this; + } + + @Override + public Builder setStorageClass(StorageClass storageClass) { + if (!Objects.equals(this.storageClass, storageClass)) { + modifiedFields.add(BucketField.STORAGE_CLASS); + } + this.storageClass = storageClass; + return this; + } + + @Override + public Builder setLocation(String location) { + if (!Objects.equals(this.location, location)) { + modifiedFields.add(BucketField.LOCATION); + } + this.location = location; + return this; + } + + @Override + Builder setEtag(String etag) { + if (!Objects.equals(this.etag, etag)) { + modifiedFields.add(BucketField.ETAG); + } + this.etag = etag; + return this; + } + + /** + * @deprecated {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + @Override + Builder setCreateTime(Long createTime) { + return setCreateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(createTime)); + } + + @Override + Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + if (!Objects.equals(this.createTime, createTime)) { + modifiedFields.add(BucketField.TIME_CREATED); + } + this.createTime = createTime; + return this; + } + + /** + * @deprecated {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + @Override + Builder setUpdateTime(Long updateTime) { + return setUpdateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(updateTime)); + } + + @Override + Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + if (!Objects.equals(this.updateTime, updateTime)) { + modifiedFields.add(BucketField.UPDATED); + } + this.updateTime = updateTime; + return this; + } + + @Override + Builder setMetageneration(Long metageneration) { + this.metageneration = metageneration; + return this; + } + + @Override + public Builder setCors(Iterable cors) { + ImmutableList tmp = cors != null ? ImmutableList.copyOf(cors) : ImmutableList.of(); + if (!Objects.equals(this.cors, tmp)) { + modifiedFields.add(BucketField.CORS); + } + this.cors = tmp; + return this; + } + + @Override + public Builder setAcl(Iterable acl) { + List tmp = acl != null ? ImmutableList.copyOf(acl) : null; + if (!Objects.equals(this.acl, tmp)) { + modifiedFields.add(BucketField.ACL); + } + this.acl = tmp; + return this; + } + + @Override + public Builder setDefaultAcl(Iterable acl) { + List tmp = acl != null ? ImmutableList.copyOf(acl) : null; + if (!Objects.equals(this.defaultAcl, tmp)) { + modifiedFields.add(BucketField.DEFAULT_OBJECT_ACL); + } + this.defaultAcl = tmp; + return this; + } + + @SuppressWarnings("UnnecessaryLocalVariable") + @Override + public Builder setLabels(@Nullable Map<@NonNull String, @Nullable String> labels) { + Map left = this.labels; + Map right = labels; + if (!Objects.equals(left, right)) { + diffMaps(BucketField.LABELS, left, right, modifiedFields::add); + if (right != null) { + this.labels = new HashMap<>(right); + } else { + this.labels = (Map) Data.nullOf(ImmutableEmptyMap.class); + } + } + return this; + } + + @Override + public Builder setDefaultKmsKeyName(String defaultKmsKeyName) { + String tmp = defaultKmsKeyName != null ? defaultKmsKeyName : Data.nullOf(String.class); + if (!Objects.equals(this.defaultKmsKeyName, tmp)) { + modifiedFields.add(BucketField.ENCRYPTION); + } + this.defaultKmsKeyName = tmp; + return this; + } + + @Override + public BuilderImpl setGoogleManagedEncryptionEnforcementConfig( + GoogleManagedEncryptionEnforcementConfig googleManagedEncryptionEnforcementConfig) { + if (!Objects.equals( + this.googleManagedEncryptionEnforcementConfig, + googleManagedEncryptionEnforcementConfig)) { + modifiedFields.add(BucketField.ENCRYPTION); + } + this.googleManagedEncryptionEnforcementConfig = googleManagedEncryptionEnforcementConfig; + return this; + } + + @Override + public BuilderImpl setCustomerManagedEncryptionEnforcementConfig( + CustomerManagedEncryptionEnforcementConfig customerManagedEncryptionEnforcementConfig) { + if (!Objects.equals( + this.customerManagedEncryptionEnforcementConfig, + customerManagedEncryptionEnforcementConfig)) { + modifiedFields.add(BucketField.ENCRYPTION); + } + this.customerManagedEncryptionEnforcementConfig = customerManagedEncryptionEnforcementConfig; + return this; + } + + @Override + public BuilderImpl setCustomerSuppliedEncryptionEnforcementConfig( + CustomerSuppliedEncryptionEnforcementConfig customerSuppliedEncryptionEnforcementConfig) { + if (!Objects.equals( + this.customerSuppliedEncryptionEnforcementConfig, + customerSuppliedEncryptionEnforcementConfig)) { + modifiedFields.add(BucketField.ENCRYPTION); + } + this.customerSuppliedEncryptionEnforcementConfig = + customerSuppliedEncryptionEnforcementConfig; + return this; + } + + @Override + public Builder setDefaultEventBasedHold(Boolean defaultEventBasedHold) { + Boolean tmp = firstNonNull(defaultEventBasedHold, Data.nullOf(Boolean.class)); + if (!Objects.equals(this.defaultEventBasedHold, tmp)) { + modifiedFields.add(BucketField.DEFAULT_EVENT_BASED_HOLD); + } + this.defaultEventBasedHold = tmp; + return this; + } + + /** + * @deprecated Use {@link #setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime)} + */ + @Override + @Deprecated + Builder setRetentionEffectiveTime(Long retentionEffectiveTime) { + return setRetentionEffectiveTimeOffsetDateTime( + millisOffsetDateTimeCodec.encode(retentionEffectiveTime)); + } + + @Override + Builder setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime retentionEffectiveTime) { + if (!Objects.equals(this.retentionEffectiveTime, retentionEffectiveTime)) { + modifiedFields.add(BucketField.RETENTION_POLICY); + } + this.retentionEffectiveTime = retentionEffectiveTime; + return this; + } + + @Override + Builder setRetentionPolicyIsLocked(Boolean retentionPolicyIsLocked) { + Boolean tmp = firstNonNull(retentionPolicyIsLocked, Data.nullOf(Boolean.class)); + if (!Objects.equals(this.retentionPolicyIsLocked, retentionPolicyIsLocked)) { + modifiedFields.add(BucketField.RETENTION_POLICY); + } + this.retentionPolicyIsLocked = tmp; + return this; + } + + /** + * @deprecated Use {@link #setRetentionPeriodDuration(Duration)} + */ + @Override + public Builder setRetentionPeriod(Long retentionPeriod) { + return setRetentionPeriodDuration(nullableDurationSecondsCodec.decode(retentionPeriod)); + } + + @Override + public Builder setRetentionPeriodDuration(Duration retentionPeriod) { + if (!Objects.equals(this.retentionPeriod, retentionPeriod)) { + modifiedFields.add(BucketField.RETENTION_POLICY); + } + this.retentionPeriod = retentionPeriod; + return this; + } + + @Override + public Builder setIamConfiguration(IamConfiguration iamConfiguration) { + if (!Objects.equals(this.iamConfiguration, iamConfiguration)) { + modifiedFields.add(BucketField.IAMCONFIGURATION); + } + this.iamConfiguration = iamConfiguration; + return this; + } + + @Override + public Builder setAutoclass(Autoclass autoclass) { + if (!Objects.equals(this.autoclass, autoclass)) { + modifiedFields.add(BucketField.AUTOCLASS); + } + this.autoclass = autoclass; + return this; + } + + @Override + public Builder setLogging(Logging logging) { + Logging tmp = logging != null ? logging : Logging.newBuilder().build(); + if (!Objects.equals(this.logging, tmp)) { + modifiedFields.add(BucketField.LOGGING); + } + this.logging = tmp; + return this; + } + + @Override + public Builder setCustomPlacementConfig(CustomPlacementConfig customPlacementConfig) { + if (!Objects.equals(this.customPlacementConfig, customPlacementConfig)) { + modifiedFields.add(BucketField.CUSTOM_PLACEMENT_CONFIG); + } + this.customPlacementConfig = customPlacementConfig; + return this; + } + + @Override + Builder setObjectRetention(ObjectRetention objectRetention) { + if (!Objects.equals(this.objectRetention, objectRetention)) { + modifiedFields.add(BucketField.OBJECT_RETENTION); + } + this.objectRetention = objectRetention; + return this; + } + + @Override + public Builder setSoftDeletePolicy(SoftDeletePolicy softDeletePolicy) { + if (!Objects.equals(this.softDeletePolicy, softDeletePolicy)) { + modifiedFields.add(BucketField.SOFT_DELETE_POLICY); + } + this.softDeletePolicy = softDeletePolicy; + return this; + } + + @Override + public Builder setHierarchicalNamespace(HierarchicalNamespace hierarchicalNamespace) { + if (!Objects.equals(this.hierarchicalNamespace, hierarchicalNamespace)) { + modifiedFields.add(BucketField.HIERARCHICAL_NAMESPACE); + } + this.hierarchicalNamespace = hierarchicalNamespace; + return this; + } + + @Override + Builder setLocationType(String locationType) { + if (!Objects.equals(this.locationType, locationType)) { + modifiedFields.add(BucketField.LOCATION_TYPE); + } + this.locationType = locationType; + return this; + } + + @Override + public Builder setIpFilter(IpFilter ipFilter) { + if (!Objects.equals(this.ipFilter, ipFilter)) { + modifiedFields.add(BucketField.IP_FILTER); + } + this.ipFilter = ipFilter; + return this; + } + + @Override + public Builder setIsUnreachable(Boolean isUnreachable) { + Boolean tmp = firstNonNull(isUnreachable, Data.nullOf(Boolean.class)); + this.isUnreachable = tmp; + return this; + } + + @Override + public BucketInfo build() { + checkNotNull(name); + return new BucketInfo(this); + } + + @Override + BuilderImpl clearGeneratedId() { + this.generatedId = null; + return this; + } + + @Override + BuilderImpl clearProject() { + this.project = null; + return this; + } + + @Override + BuilderImpl clearName() { + this.name = null; + return this; + } + + @Override + BuilderImpl clearOwner() { + this.owner = null; + return this; + } + + @Override + BuilderImpl clearSelfLink() { + this.selfLink = null; + return this; + } + + @Override + BuilderImpl clearRequesterPays() { + this.requesterPays = null; + return this; + } + + @Override + BuilderImpl clearVersioningEnabled() { + this.versioningEnabled = null; + return this; + } + + @Override + BuilderImpl clearIndexPage() { + this.indexPage = null; + return this; + } + + @Override + BuilderImpl clearNotFoundPage() { + this.notFoundPage = null; + return this; + } + + @Override + BuilderImpl clearLifecycleRules() { + this.lifecycleRules = null; + return this; + } + + @Override + BuilderImpl clearRpo() { + this.rpo = null; + return this; + } + + @Override + BuilderImpl clearStorageClass() { + this.storageClass = null; + return this; + } + + @Override + BuilderImpl clearLocation() { + this.location = null; + return this; + } + + @Override + BuilderImpl clearEtag() { + this.etag = null; + return this; + } + + @Override + BuilderImpl clearCreateTime() { + this.createTime = null; + return this; + } + + @Override + BuilderImpl clearUpdateTime() { + this.updateTime = null; + return this; + } + + @Override + BuilderImpl clearMetageneration() { + this.metageneration = null; + return this; + } + + @Override + BuilderImpl clearCors() { + this.cors = null; + return this; + } + + @Override + BuilderImpl clearAcl() { + this.acl = null; + return this; + } + + @Override + BuilderImpl clearDefaultAcl() { + this.defaultAcl = null; + return this; + } + + @Override + BuilderImpl clearLabels() { + this.labels = null; + return this; + } + + @Override + BuilderImpl clearDefaultKmsKeyName() { + this.defaultKmsKeyName = null; + return this; + } + + @Override + BuilderImpl clearDefaultEventBasedHold() { + this.defaultEventBasedHold = null; + return this; + } + + @Override + BuilderImpl clearRetentionEffectiveTime() { + this.retentionEffectiveTime = null; + return this; + } + + @Override + BuilderImpl clearRetentionPolicyIsLocked() { + this.retentionPolicyIsLocked = null; + return this; + } + + @Override + BuilderImpl clearRetentionPeriod() { + this.retentionPeriod = null; + return this; + } + + @Override + BuilderImpl clearIamConfiguration() { + this.iamConfiguration = null; + return this; + } + + @Override + BuilderImpl clearLocationType() { + this.locationType = null; + return this; + } + + @Override + BuilderImpl clearLogging() { + this.logging = null; + return this; + } + + @Override + BuilderImpl clearCustomPlacementConfig() { + this.customPlacementConfig = null; + return this; + } + + @Override + BuilderImpl clearIpFilter() { + this.ipFilter = null; + return this; + } + + @Override + BuilderImpl clearGoogleManagedEncryptionEnforcementConfig() { + this.googleManagedEncryptionEnforcementConfig = null; + return this; + } + + @Override + BuilderImpl clearCustomerManagedEncryptionEnforcementConfig() { + this.customerManagedEncryptionEnforcementConfig = null; + return this; + } + + @Override + BuilderImpl clearCustomerSuppliedEncryptionEnforcementConfig() { + this.customerSuppliedEncryptionEnforcementConfig = null; + return this; + } + + @Override + BuilderImpl clearIsUnreachable() { + this.isUnreachable = null; + return this; + } + + private Builder clearDeleteLifecycleRules() { + if (lifecycleRules != null && !lifecycleRules.isEmpty()) { + ImmutableList nonDeleteRules = + lifecycleRules.stream() + .filter(IS_DELETE_LIFECYCLE_RULE.negate()) + .collect(ImmutableList.toImmutableList()); + return setLifecycleRules(nonDeleteRules); + } else { + return this; + } + } + } + + BucketInfo(BuilderImpl builder) { + generatedId = builder.generatedId; + project = builder.project; + name = builder.name; + etag = builder.etag; + createTime = builder.createTime; + updateTime = builder.updateTime; + metageneration = builder.metageneration; + location = builder.location; + rpo = builder.rpo; + storageClass = builder.storageClass; + cors = builder.cors; + acl = builder.acl; + defaultAcl = builder.defaultAcl; + owner = builder.owner; + selfLink = builder.selfLink; + versioningEnabled = builder.versioningEnabled; + indexPage = builder.indexPage; + notFoundPage = builder.notFoundPage; + lifecycleRules = builder.lifecycleRules; + labels = builder.labels; + requesterPays = builder.requesterPays; + defaultKmsKeyName = builder.defaultKmsKeyName; + defaultEventBasedHold = builder.defaultEventBasedHold; + retentionEffectiveTime = builder.retentionEffectiveTime; + retentionPolicyIsLocked = builder.retentionPolicyIsLocked; + retentionPeriod = builder.retentionPeriod; + iamConfiguration = builder.iamConfiguration; + autoclass = builder.autoclass; + locationType = builder.locationType; + logging = builder.logging; + customPlacementConfig = builder.customPlacementConfig; + objectRetention = builder.objectRetention; + softDeletePolicy = builder.softDeletePolicy; + hierarchicalNamespace = builder.hierarchicalNamespace; + ipFilter = builder.ipFilter; + googleManagedEncryptionEnforcementConfig = builder.googleManagedEncryptionEnforcementConfig; + customerManagedEncryptionEnforcementConfig = builder.customerManagedEncryptionEnforcementConfig; + customerSuppliedEncryptionEnforcementConfig = + builder.customerSuppliedEncryptionEnforcementConfig; + isUnreachable = builder.isUnreachable; + modifiedFields = builder.modifiedFields.build(); + } + + /** The project number of the project the bucket belongs to */ + public BigInteger getProject() { + return project; + } + + /** Returns the service-generated id for the bucket. */ + public String getGeneratedId() { + return generatedId; + } + + /** Returns the bucket's name. */ + public String getName() { + return name; + } + + /** Returns the bucket's owner. This is always the project team's owner group. */ + public Entity getOwner() { + return owner; + } + + /** Returns the URI of this bucket as a string. */ + public String getSelfLink() { + return selfLink; + } + + /** + * Returns a {@code Boolean} with either {@code true}, {@code null} and in certain cases {@code + * false}. + * + *

Case 1: {@code true} the field {@link + * com.google.cloud.storage.Storage.BucketField#VERSIONING} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)} and versions for the bucket is enabled. + * + *

Case 2.1: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#VERSIONING} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)}, but versions for the bucket is not enabled. + * This case can be considered implicitly {@code false}. + * + *

Case 2.2: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#VERSIONING} is not selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)}, and the state for this field is unknown. + * + *

Case 3: {@code false} versions is explicitly set to false client side for a follow-up + * request for example {@link Storage#update(BucketInfo, Storage.BucketTargetOption...)} in which + * case the value of versions will remain {@code false} for for the given instance. + */ + public Boolean versioningEnabled() { + return Data.isNull(versioningEnabled) ? null : versioningEnabled; + } + + /** + * Returns a {@code Boolean} with either {@code true}, {@code false}, and in a specific case + * {@code null}. + * + *

Case 1: {@code true} the field {@link com.google.cloud.storage.Storage.BucketField#BILLING} + * is selected in a {@link Storage#get(String, Storage.BucketGetOption...)} and requester pays for + * the bucket is enabled. + * + *

Case 2: {@code false} the field {@link com.google.cloud.storage.Storage.BucketField#BILLING} + * in a {@link Storage#get(String, Storage.BucketGetOption...)} is selected and requester pays for + * the bucket is disable. + * + *

Case 3: {@code null} the field {@link com.google.cloud.storage.Storage.BucketField#BILLING} + * in a {@link Storage#get(String, Storage.BucketGetOption...)} is not selected, the value is + * unknown. + */ + public Boolean requesterPays() { + return Data.isNull(requesterPays) ? null : requesterPays; + } + + /** + * Returns bucket's website index page. Behaves as the bucket's directory index where missing + * blobs are treated as potential directories. + */ + public String getIndexPage() { + return indexPage; + } + + /** Returns the custom object to return when a requested resource is not found. */ + public String getNotFoundPage() { + return notFoundPage; + } + + /** + * Returns bucket's lifecycle configuration as a number of delete rules. + * + * @see Lifecycle Management + */ + @Deprecated + public List getDeleteRules() { + return getLifecycleRules().stream() + .filter(IS_DELETE_LIFECYCLE_RULE) + .map(BackwardCompatibilityUtils.deleteRuleCodec::decode) + .collect(ImmutableList.toImmutableList()); + } + + @NonNull + public List getLifecycleRules() { + return lifecycleRules != null ? lifecycleRules : ImmutableList.of(); + } + + /** + * Returns HTTP 1.1 Entity tag for the bucket. + * + * @see Entity Tags + */ + public String getEtag() { + return etag; + } + + /** + * Returns the time at which the bucket was created. + * + * @deprecated {@link #getCreateTimeOffsetDateTime()} + */ + @Deprecated + public Long getCreateTime() { + return millisOffsetDateTimeCodec.decode(createTime); + } + + public OffsetDateTime getCreateTimeOffsetDateTime() { + return createTime; + } + + /** + * Returns the last modification time of the bucket's metadata expressed as the number of + * milliseconds since the Unix epoch. + * + * @deprecated {@link #getUpdateTimeOffsetDateTime()} + */ + @Deprecated + public Long getUpdateTime() { + return millisOffsetDateTimeCodec.decode(updateTime); + } + + public OffsetDateTime getUpdateTimeOffsetDateTime() { + return updateTime; + } + + /** Returns the metadata generation of this bucket. */ + public Long getMetageneration() { + return metageneration; + } + + /** + * Returns the bucket's location. Data for blobs in the bucket resides in physical storage within + * this region or regions. If specifying more than one region `customPlacementConfig` should be + * set in conjunction. + * + * @see Bucket Locations + */ + public String getLocation() { + return location; + } + + /** + * Returns the bucket's locationType. + * + * @see Bucket LocationType + */ + public String getLocationType() { + return locationType; + } + + /** + * Returns the bucket's recovery point objective (RPO). This defines how quickly data is + * replicated between regions in a dual-region bucket. Not defined for single-region buckets. + * + * @see Turbo Replication" + */ + public Rpo getRpo() { + return rpo; + } + + /** + * Returns the bucket's storage class. This defines how blobs in the bucket are stored and + * determines the SLA and the cost of storage. + * + * @see Storage Classes + */ + public StorageClass getStorageClass() { + return storageClass; + } + + /** + * Returns the bucket's Cross-Origin Resource Sharing (CORS) configuration. + * + * @see Cross-Origin Resource Sharing + * (CORS) + */ + public List getCors() { + return cors; + } + + /** + * Returns the bucket's access control configuration. + * + * @see + * About Access Control Lists + */ + public List getAcl() { + return acl; + } + + /** + * Returns the default access control configuration for this bucket's blobs. + * + * @see + * About Access Control Lists + */ + public List getDefaultAcl() { + return defaultAcl; + } + + /** Returns the labels for this bucket. */ + @Nullable + public Map<@NonNull String, @Nullable String> getLabels() { + return labels; + } + + /** Returns the default Cloud KMS key to be applied to newly inserted objects in this bucket. */ + public String getDefaultKmsKeyName() { + return defaultKmsKeyName; + } + + /** + * Returns a {@code Boolean} with either {@code true}, {@code null} and in certain cases {@code + * false}. + * + *

Case 1: {@code true} the field {@link + * com.google.cloud.storage.Storage.BucketField#DEFAULT_EVENT_BASED_HOLD} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)} and default event-based hold for the bucket is + * enabled. + * + *

Case 2.1: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#DEFAULT_EVENT_BASED_HOLD} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)}, but default event-based hold for the bucket + * is not enabled. This case can be considered implicitly {@code false}. + * + *

Case 2.2: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#DEFAULT_EVENT_BASED_HOLD} is not selected in a + * {@link Storage#get(String, Storage.BucketGetOption...)}, and the state for this field is + * unknown. + * + *

Case 3: {@code false} default event-based hold is explicitly set to false using in a {@link + * Builder#setDefaultEventBasedHold(Boolean)} client side for a follow-up request e.g. {@link + * Storage#update(BucketInfo, Storage.BucketTargetOption...)} in which case the value of default + * event-based hold will remain {@code false} for the given instance. + */ + @BetaApi + public Boolean getDefaultEventBasedHold() { + return Data.isNull(defaultEventBasedHold) ? null : defaultEventBasedHold; + } + + /** + * Returns the retention effective time a policy took effect if a retention policy is defined as a + * {@code Long}. + * + * @deprecated Use {@link #getRetentionPeriodDuration()} + */ + @BetaApi + @Deprecated + public Long getRetentionEffectiveTime() { + return Data.isNull(retentionEffectiveTime) + ? null + : millisOffsetDateTimeCodec.decode(retentionEffectiveTime); + } + + /** Returns the retention effective time a policy took effect if a retention policy is defined. */ + @BetaApi + public OffsetDateTime getRetentionEffectiveTimeOffsetDateTime() { + return retentionEffectiveTime; + } + + /** + * Returns a {@code Boolean} with either {@code true} or {@code null}. + * + *

Case 1: {@code true} the field {@link + * com.google.cloud.storage.Storage.BucketField#RETENTION_POLICY} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)} and retention policy for the bucket is locked. + * + *

Case 2.1: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#RETENTION_POLICY} is selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)}, but retention policy for the bucket is not + * locked. This case can be considered implicitly {@code false}. + * + *

Case 2.2: {@code null} the field {@link + * com.google.cloud.storage.Storage.BucketField#RETENTION_POLICY} is not selected in a {@link + * Storage#get(String, Storage.BucketGetOption...)}, and the state for this field is unknown. + */ + @BetaApi + public Boolean retentionPolicyIsLocked() { + return Data.isNull(retentionPolicyIsLocked) ? null : retentionPolicyIsLocked; + } + + /** + * Returns the retention policy retention period. + * + * @deprecated Use {@link #getRetentionPeriodDuration()} + */ + @BetaApi + @Deprecated + public Long getRetentionPeriod() { + return nullableDurationSecondsCodec.encode(retentionPeriod); + } + + /** Returns the retention policy retention period. */ + @BetaApi + public Duration getRetentionPeriodDuration() { + return retentionPeriod; + } + + /** Returns the IAM configuration */ + @BetaApi + public IamConfiguration getIamConfiguration() { + return iamConfiguration; + } + + /** Returns the Autoclass configuration */ + public Autoclass getAutoclass() { + return autoclass; + } + + /** Returns the Logging */ + public Logging getLogging() { + return logging; + } + + /** Returns the Custom Placement Configuration */ + public CustomPlacementConfig getCustomPlacementConfig() { + return customPlacementConfig; + } + + /** returns the Object Retention configuration */ + public ObjectRetention getObjectRetention() { + return objectRetention; + } + + /** returns the Soft Delete policy */ + public SoftDeletePolicy getSoftDeletePolicy() { + return softDeletePolicy; + } + + /** Returns the Hierarchical Namespace (Folders) Configuration */ + public HierarchicalNamespace getHierarchicalNamespace() { + return hierarchicalNamespace; + } + + /** + * @since 2.54.0 + */ + public @Nullable IpFilter getIpFilter() { + return ipFilter; + } + + public @Nullable GoogleManagedEncryptionEnforcementConfig + getGoogleManagedEncryptionEnforcementConfig() { + return googleManagedEncryptionEnforcementConfig; + } + + public @Nullable CustomerManagedEncryptionEnforcementConfig + getCustomerManagedEncryptionEnforcementConfig() { + return customerManagedEncryptionEnforcementConfig; + } + + public @Nullable CustomerSuppliedEncryptionEnforcementConfig + getCustomerSuppliedEncryptionEnforcementConfig() { + return customerSuppliedEncryptionEnforcementConfig; + } + + /** + * Returns a {@code Boolean} with {@code true} if the bucket is unreachable, else {@code null} + * + *

A bucket may be unreachable if the region in which it resides is experiencing an outage or + * if there are other temporary access issues. + */ + public Boolean isUnreachable() { + return Data.isNull(isUnreachable) ? null : isUnreachable; + } + + /** Returns a builder for the current bucket. */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public int hashCode() { + return Objects.hash( + generatedId, + project, + name, + owner, + selfLink, + requesterPays, + versioningEnabled, + indexPage, + notFoundPage, + lifecycleRules, + etag, + createTime, + updateTime, + metageneration, + cors, + acl, + defaultAcl, + location, + rpo, + storageClass, + labels, + defaultKmsKeyName, + defaultEventBasedHold, + retentionEffectiveTime, + retentionPolicyIsLocked, + retentionPeriod, + iamConfiguration, + autoclass, + locationType, + objectRetention, + softDeletePolicy, + customPlacementConfig, + hierarchicalNamespace, + logging, + ipFilter, + googleManagedEncryptionEnforcementConfig, + customerManagedEncryptionEnforcementConfig, + customerSuppliedEncryptionEnforcementConfig, + isUnreachable); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BucketInfo)) { + return false; + } + BucketInfo that = (BucketInfo) o; + return Objects.equals(generatedId, that.generatedId) + && Objects.equals(project, that.project) + && Objects.equals(name, that.name) + && Objects.equals(owner, that.owner) + && Objects.equals(selfLink, that.selfLink) + && Objects.equals(requesterPays, that.requesterPays) + && Objects.equals(versioningEnabled, that.versioningEnabled) + && Objects.equals(indexPage, that.indexPage) + && Objects.equals(notFoundPage, that.notFoundPage) + && Objects.equals(lifecycleRules, that.lifecycleRules) + && Objects.equals(etag, that.etag) + && Objects.equals(createTime, that.createTime) + && Objects.equals(updateTime, that.updateTime) + && Objects.equals(metageneration, that.metageneration) + && Objects.equals(cors, that.cors) + && Objects.equals(acl, that.acl) + && Objects.equals(defaultAcl, that.defaultAcl) + && Objects.equals(location, that.location) + && Objects.equals(rpo, that.rpo) + && Objects.equals(storageClass, that.storageClass) + && Objects.equals(labels, that.labels) + && Objects.equals(defaultKmsKeyName, that.defaultKmsKeyName) + && Objects.equals(defaultEventBasedHold, that.defaultEventBasedHold) + && Objects.equals(retentionEffectiveTime, that.retentionEffectiveTime) + && Objects.equals(retentionPolicyIsLocked, that.retentionPolicyIsLocked) + && Objects.equals(retentionPeriod, that.retentionPeriod) + && Objects.equals(iamConfiguration, that.iamConfiguration) + && Objects.equals(autoclass, that.autoclass) + && Objects.equals(locationType, that.locationType) + && Objects.equals(customPlacementConfig, that.customPlacementConfig) + && Objects.equals(objectRetention, that.objectRetention) + && Objects.equals(softDeletePolicy, that.softDeletePolicy) + && Objects.equals(hierarchicalNamespace, that.hierarchicalNamespace) + && Objects.equals(logging, that.logging) + && Objects.equals(ipFilter, that.ipFilter) + && Objects.equals( + googleManagedEncryptionEnforcementConfig, that.googleManagedEncryptionEnforcementConfig) + && Objects.equals( + customerManagedEncryptionEnforcementConfig, + that.customerManagedEncryptionEnforcementConfig) + && Objects.equals( + customerSuppliedEncryptionEnforcementConfig, + that.customerSuppliedEncryptionEnforcementConfig) + && Objects.equals(isUnreachable, that.isUnreachable); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).toString(); + } + + /** + * Attach this instance to an instance of {@link Storage} thereby allowing RPCs to be performed + * using the methods from the resulting {@link Bucket} + */ + Bucket asBucket(Storage storage) { + return new Bucket(storage, new BucketInfo.BuilderImpl(this)); + } + + ImmutableSet getModifiedFields() { + return modifiedFields; + } + + /** Creates a {@code BucketInfo} object for the provided bucket name. */ + public static BucketInfo of(String name) { + return newBuilder(name).build(); + } + + /** Returns a {@code BucketInfo} builder where the bucket's name is set to the provided name. */ + public static Builder newBuilder(String name) { + return new BuilderImpl(name); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandle.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandle.java new file mode 100644 index 000000000000..272f5061bd04 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandle.java @@ -0,0 +1,128 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.annotations.VisibleForTesting; +import java.nio.ByteBuffer; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Sometimes, we need a handle to create a buffer but do not want to unnecessarily allocate it. This + * class can be though of as an enriched {@code Supplier} that still implements the + * common stateful methods of {@link ByteBuffer} without allocating the ByteBuffer until necessary. + * + *

{@link ByteBuffer} is a sealed class hierarchy, meaning we can't simply extend it to provide + * laziness without this new class. + */ +abstract class BufferHandle implements Supplier { + + @VisibleForTesting + BufferHandle() {} + + abstract int remaining(); + + abstract int capacity(); + + abstract int position(); + + static BufferHandle allocate(int capacity) { + return new LazyBufferHandle(capacity, Buffers::allocate); + } + + static BufferHandle handleOf(ByteBuffer buf) { + return new EagerBufferHandle(buf); + } + + static final class LazyBufferHandle extends BufferHandle { + + private final int capacity; + private final Function factory; + + // It is theoretically possible for this value to be null for any of the methods, while + // get is invoked. Whenever reading this value, always read into a local variable and then + // operate on that variable for the rest of the scope. + private volatile ByteBuffer buf; + + @VisibleForTesting + LazyBufferHandle(int capacity, Function factory) { + this.capacity = capacity; + this.factory = factory; + } + + @Override + int remaining() { + ByteBuffer buffer = buf; + return buffer == null ? capacity : buffer.remaining(); + } + + @Override + int capacity() { + ByteBuffer buffer = buf; + return buffer == null ? capacity : buffer.capacity(); + } + + @Override + int position() { + ByteBuffer buffer = buf; + return buffer == null ? 0 : buffer.position(); + } + + @Override + public ByteBuffer get() { + ByteBuffer result = buf; + if (result != null) { + return result; + } else { + synchronized (this) { + if (buf == null) { + buf = factory.apply(capacity); + } + return buf; + } + } + } + } + + static final class EagerBufferHandle extends BufferHandle { + private final ByteBuffer buf; + + private EagerBufferHandle(ByteBuffer buf) { + this.buf = buf; + } + + @Override + int remaining() { + return buf.remaining(); + } + + @Override + int capacity() { + return buf.capacity(); + } + + @Override + int position() { + return buf.position(); + } + + @Override + public ByteBuffer get() { + return buf; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandlePool.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandlePool.java new file mode 100644 index 000000000000..74c7723f033b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferHandlePool.java @@ -0,0 +1,191 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.annotations.VisibleForTesting; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.IntStream; + +interface BufferHandlePool { + + PooledBuffer getBuffer(); + + void returnBuffer(PooledBuffer handle); + + static BufferHandlePool simple(int capacity) { + return new SimpleBufferHandlePool(capacity); + } + + static BufferHandlePool fixedPool(int bufferCount, int bufferCapacity) { + return FixedBufferHandlePool.of(bufferCount, bufferCapacity); + } + + final class SimpleBufferHandlePool implements BufferHandlePool { + private final int capacity; + + private SimpleBufferHandlePool(int capacity) { + this.capacity = capacity; + } + + @Override + public PooledBuffer getBuffer() { + return PooledBuffer.of(BufferHandle.allocate(capacity)); + } + + @Override + public void returnBuffer(PooledBuffer handle) { + // noop + } + } + + /** + * Specialized and simplified {@link java.util.concurrent.BlockingQueue}. We don't need the + * majority of methods/functionality just blocking put/get. + * + *

Inspired by the BoundedBuffer example from the class javadocs of {@link Condition} (java8) + */ + final class FixedBufferHandlePool implements BufferHandlePool { + @VisibleForTesting final HashSet pool; + private final int poolMaxSize; + + private final ReentrantLock lock; + private final Condition notEmpty; + private final Condition notFull; + + @VisibleForTesting + FixedBufferHandlePool(HashSet pool) { + checkArgument(!pool.isEmpty(), "provided pool bust not start empty"); + this.pool = pool; + this.poolMaxSize = pool.size(); + + this.lock = new ReentrantLock(); + this.notEmpty = lock.newCondition(); + this.notFull = lock.newCondition(); + } + + @Override + public PooledBuffer getBuffer() { + try (AcquiredLock ignore = AcquiredLock.lock(this.lock)) { + while (pool.isEmpty()) { + notEmpty.awaitUninterruptibly(); + } + return dequeue(); + } + } + + @Override + public void returnBuffer(PooledBuffer handle) { + checkNotNull(handle, "handle must be non null"); + try (AcquiredLock ignore = AcquiredLock.lock(this.lock)) { + if (pool.contains(handle)) { + return; + } + while (poolMaxSize == pool.size()) { + notFull.awaitUninterruptibly(); + } + enqueue(handle); + } + } + + private void enqueue(PooledBuffer pooled) { + pooled.getBufferHandle().get().clear(); + pool.add(pooled); + notEmpty.signal(); + } + + private PooledBuffer dequeue() { + Iterator iterator = pool.iterator(); + checkState(iterator.hasNext(), "attempt to acquire pooled buffer failed"); + PooledBuffer poll = iterator.next(); + iterator.remove(); + notFull.signal(); + return poll; + } + + @VisibleForTesting + static FixedBufferHandlePool of(int bufferCount, int bufferCapacity) { + // explicitly collect to a HashSet + HashSet buffers = + IntStream.range(0, bufferCount) + .mapToObj(i -> BufferHandle.allocate(bufferCapacity)) + .map(PooledBuffer::of) + .collect(HashSet::new, Set::add, HashSet::addAll); + return new FixedBufferHandlePool(buffers); + } + } + + final class PooledBuffer { + private final BufferHandle bufferHandle; + + private PooledBuffer(BufferHandle bufferHandle) { + this.bufferHandle = bufferHandle; + } + + BufferHandle getBufferHandle() { + return bufferHandle; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PooledBuffer)) { + return false; + } + PooledBuffer that = (PooledBuffer) o; + return Objects.equals(System.identityHashCode(this), System.identityHashCode(that)); + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @VisibleForTesting + static PooledBuffer of(BufferHandle bufferHandle) { + return new PooledBuffer(bufferHandle); + } + } + + final class AcquiredLock implements AutoCloseable { + private final ReentrantLock lock; + + private AcquiredLock(ReentrantLock lock) { + this.lock = lock; + lock.lock(); + } + + @Override + public void close() { + lock.unlock(); + } + + private static AcquiredLock lock(ReentrantLock lock) { + return new AcquiredLock(lock); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferToDiskThenUpload.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferToDiskThenUpload.java new file mode 100644 index 000000000000..d831f2737d0c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferToDiskThenUpload.java @@ -0,0 +1,295 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.RecoveryFileManager.RecoveryVolumeSinkFactory; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Clock; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collector; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * There are scenarios in which disk space is more plentiful than memory space. This new {@link + * BlobWriteSessionConfig} allows augmenting an instance of storage to produce {@link + * BlobWriteSession}s which will buffer to disk rather than holding things in memory. + * + *

Once the file on disk is closed, the entire file will then be uploaded to GCS. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see BlobWriteSessionConfigs#bufferToDiskThenUpload(Path) + * @see BlobWriteSessionConfigs#bufferToDiskThenUpload(Collection) + */ +@Immutable +@BetaApi +@TransportCompatibility({Transport.GRPC, Transport.HTTP}) +public final class BufferToDiskThenUpload extends BlobWriteSessionConfig + implements BlobWriteSessionConfig.HttpCompatible, BlobWriteSessionConfig.GrpcCompatible { + + private static final long serialVersionUID = 9059242302276891867L; + + /** + * non-final because of {@link java.io.Serializable}, however this field is effectively final as + * it is immutable and there is not reference mutator method. + */ + @MonotonicNonNull private transient ImmutableList paths; + + private final boolean includeLoggingSink; + + /** Used for {@link java.io.Serializable} */ + @MonotonicNonNull private volatile ArrayList absolutePaths; + + @InternalApi + BufferToDiskThenUpload(ImmutableList paths, boolean includeLoggingSink) throws IOException { + this.paths = paths; + this.includeLoggingSink = includeLoggingSink; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BufferToDiskThenUpload)) { + return false; + } + BufferToDiskThenUpload that = (BufferToDiskThenUpload) o; + return includeLoggingSink == that.includeLoggingSink + && Objects.equals(paths, that.paths) + && Objects.equals(absolutePaths, that.absolutePaths); + } + + @Override + public int hashCode() { + return Objects.hash(paths, includeLoggingSink, absolutePaths); + } + + @VisibleForTesting + @InternalApi + BufferToDiskThenUpload withIncludeLoggingSink() throws IOException { + return new BufferToDiskThenUpload(paths, true); + } + + @InternalApi + @Override + WriterFactory createFactory(Clock clock) throws IOException { + Duration window = Duration.ofMinutes(10); + RecoveryFileManager recoveryFileManager = + RecoveryFileManager.of(paths, getRecoverVolumeSinkFactory(clock, window)); + ThroughputSink gcs = ThroughputSink.windowed(ThroughputMovingWindow.of(window), clock); + gcs = includeLoggingSink ? ThroughputSink.tee(ThroughputSink.logged("gcs", clock), gcs) : gcs; + return new Factory(recoveryFileManager, clock, gcs); + } + + private RecoveryVolumeSinkFactory getRecoverVolumeSinkFactory(Clock clock, Duration window) { + return path -> { + ThroughputSink windowed = ThroughputSink.windowed(ThroughputMovingWindow.of(window), clock); + if (includeLoggingSink) { + return ThroughputSink.tee( + ThroughputSink.logged(path.toAbsolutePath().toString(), clock), windowed); + } else { + return windowed; + } + }; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + if (absolutePaths == null) { + synchronized (this) { + if (absolutePaths == null) { + absolutePaths = + paths.stream() + .map(Path::toAbsolutePath) + .map(Path::toString) + .collect( + Collector.of( + ArrayList::new, + ArrayList::add, + (left, right) -> { + left.addAll(right); + return left; + })); + } + } + } + out.defaultWriteObject(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.paths = absolutePaths.stream().map(Paths::get).collect(ImmutableList.toImmutableList()); + } + + private static final class Factory implements WriterFactory { + + private final RecoveryFileManager recoveryFileManager; + private final Clock clock; + private final ThroughputSink gcs; + + private Factory(RecoveryFileManager recoveryFileManager, Clock clock, ThroughputSink gcs) { + this.recoveryFileManager = recoveryFileManager; + this.clock = clock; + this.gcs = gcs; + } + + @InternalApi + @Override + public WritableByteChannelSession writeSession( + StorageInternal storage, BlobInfo info, Opts opts) { + return new Factory.WriteToFileThenUpload( + storage, info, opts, recoveryFileManager.newRecoveryFile(info)); + } + + private final class WriteToFileThenUpload + implements WritableByteChannelSession { + + private final StorageInternal storage; + private final BlobInfo info; + private final Opts opts; + private final RecoveryFile rf; + private final SettableApiFuture result; + + private WriteToFileThenUpload( + StorageInternal storage, BlobInfo info, Opts opts, RecoveryFile rf) { + this.info = info; + this.opts = opts; + this.rf = rf; + this.storage = storage; + this.result = SettableApiFuture.create(); + } + + @Override + public ApiFuture openAsync() { + try { + ApiFuture f = ApiFutures.immediateFuture(rf.writer()); + return ApiFutures.transform( + f, Factory.WriteToFileThenUpload.Flusher::new, MoreExecutors.directExecutor()); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public ApiFuture getResult() { + return result; + } + + @SuppressWarnings("UnstableApiUsage") + private final class Flusher implements WritableByteChannel { + + private final WritableByteChannel delegate; + private final Hasher cumulativeCrc32c; + private final ReentrantLock lock; + + private Flusher(WritableByteChannel delegate) { + this.delegate = delegate; + this.cumulativeCrc32c = + opts.getHasher().initialValue() == null ? null : Hashing.crc32c().newHasher(); + this.lock = new ReentrantLock(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + lock.lock(); + try { + if (cumulativeCrc32c != null) { + cumulativeCrc32c.putBytes(src.duplicate()); + } + return delegate.write(src); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + + delegate.close(); + try (RecoveryFile rf = Factory.WriteToFileThenUpload.this.rf) { + Path path = rf.getPath(); + long size = Files.size(path); + ThroughputSink.computeThroughput( + clock, + gcs, + size, + () -> { + BlobInfo pendingInfo = info; + Opts pendingOpts = opts; + if (cumulativeCrc32c != null) { + int hashCodeInt = cumulativeCrc32c.hash().asInt(); + pendingInfo = + pendingInfo.toBuilder() + .clearMd5() + .clearCrc32c() + .setCrc32c(Utils.crc32cCodec.encode(hashCodeInt)) + .build(); + pendingOpts = opts.prepend(Opts.from(UnifiedOpts.crc32cMatch(hashCodeInt))); + } + BlobInfo blob = storage.internalCreateFrom(path, pendingInfo, pendingOpts); + result.set(blob); + }); + } catch (StorageException | IOException e) { + result.setException(e); + throw e; + } + } finally { + lock.unlock(); + } + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedReadableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedReadableByteChannelSession.java new file mode 100644 index 000000000000..f307d0a86d0e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedReadableByteChannelSession.java @@ -0,0 +1,26 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import java.nio.channels.ReadableByteChannel; + +interface BufferedReadableByteChannelSession + extends ReadableByteChannelSession { + + interface BufferedReadableByteChannel extends ReadableByteChannel {} +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java new file mode 100644 index 000000000000..cf9c19602f88 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java @@ -0,0 +1,31 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import java.io.IOException; +import java.nio.channels.WritableByteChannel; + +interface BufferedWritableByteChannelSession + extends WritableByteChannelSession { + + interface BufferedWritableByteChannel extends WritableByteChannel { + + /** Block the invoking thread until all written bytes are accepted by the lower layer */ + void flush() throws IOException; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java new file mode 100644 index 000000000000..21d8c2ed98f1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java @@ -0,0 +1,190 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.function.Consumer; + +/** + * Utility methods for working with ByteBuffers + * + *

Several methods are copied from {@link com.google.common.base.Java8Compatibility} which is + * package private. Each of these methods will need to be kept as long as we are supporting Java 8. + * The list of applicable methods are: + * + *

    + *
  1. {@link #clear} + *
  2. {@link #flip} + *
  3. {@link #limit} + *
  4. {@link #position} + *
+ */ +final class Buffers { + + private Buffers() {} + + static void clear(Buffer b) { + b.clear(); + } + + static void flip(Buffer b) { + b.flip(); + } + + static void limit(Buffer b, int limit) { + b.limit(limit); + } + + static void position(Buffer b, int position) { + b.position(position); + } + + static int position(Buffer b) { + return b.position(); + } + + static int remaining(Buffer b) { + return b.remaining(); + } + + static boolean hasRemaining(Buffer b) { + return b.hasRemaining(); + } + + static void compact(ByteBuffer b) { + b.compact(); + } + + /** attempt to drain all of {@code content} into {@code dst} */ + static long copy(ByteBuffer content, ByteBuffer dst) { + return copy(content, new ByteBuffer[] {dst}, 0, 1); + } + + /** + * attempt to drain all of {@code content} into {@code dsts} starting from {@code dsts[0]} through + * {@code dsts[dsts.length - 1]} + */ + static long copy(ByteBuffer content, ByteBuffer[] dsts) { + return copy(content, dsts, 0, dsts.length); + } + + /** + * attempt to drain all of `content` into `dsts` starting from `dsts[offset]` through + * `dsts[length]` + */ + static long copy(ByteBuffer content, ByteBuffer[] dsts, int offset, int length) { + long total = 0; + for (int i = offset; i < length; i++) { + int contentRemaining = content.remaining(); + if (contentRemaining <= 0) { + break; + } + ByteBuffer buf = dsts[i]; + int bufRemaining = buf.remaining(); + if (bufRemaining == 0) { + continue; + } else if (bufRemaining < contentRemaining) { + sliceAndConsume(content, bufRemaining, buf::put); + } else { + buf.put(content); + } + int written = bufRemaining - buf.remaining(); + total += written; + } + return total; + } + + /** + * Slice the provided source with a limit of {@code limit}, consume the slice with {@code c} then + * increment position of {@code src} to reflect the consumed bytes. + */ + static void sliceAndConsume(ByteBuffer src, int limit, Consumer c) { + ByteBuffer slice = src.slice(); + slice.limit(limit); + c.accept(slice); + Buffers.position(src, src.position() + limit); + } + + static ByteBuffer allocate(long l) { + return ByteBuffer.allocate(Math.toIntExact(l)); + } + + static ByteBuffer allocate(int i) { + return ByteBuffer.allocate(i); + } + + /** + * Give {@code size} "snap" it to the next {@code alignmentMultiple} that is >= {@code size}. + * + *

i.e. Given 344k size, 256k alignmentMultiple expect 512k + */ + static ByteBuffer allocateAligned(int size, int alignmentMultiple) { + int actualSize = alignSize(size, alignmentMultiple); + return allocate(actualSize); + } + + static int alignSize(int size, int alignmentMultiple) { + int alignedSize = size; + if (size < alignmentMultiple) { + alignedSize = alignmentMultiple; + } else if (size % alignmentMultiple != 0) { + // TODO: this mod will cause two divisions to happen + // * try and measure how expensive two divisions is compared to one + // * also measure the case where size is a multiple, and how much the following calculation + // costs + + // add almost another full alignmentMultiple to the size + // then integer divide it before multiplying it by the alignmentMultiple + alignedSize = (size + alignmentMultiple - 1) / alignmentMultiple * alignmentMultiple; + } // else size is already aligned + return alignedSize; + } + + static int fillFrom(ByteBuffer buf, ReadableByteChannel c) throws IOException { + return StorageChannelUtils.blockingFillFrom(buf, c); + } + + static int emptyTo(ByteBuffer buf, WritableByteChannel c) throws IOException { + return StorageChannelUtils.blockingEmptyTo(buf, c); + } + + static long totalRemaining(ByteBuffer[] buffers, int offset, int length) { + long totalRemaning = 0; + for (int i = offset; i < length; i++) { + ByteBuffer buffer = buffers[i]; + totalRemaning += buffer.remaining(); + } + return totalRemaning; + } + + static long copyUsingBuffer(ByteBuffer buf, ReadableByteChannel r, WritableByteChannel w) + throws IOException { + long total = 0; + while (r.read(buf) != -1) { + buf.flip(); + while (buf.hasRemaining()) { + total += w.write(buf); + } + buf.clear(); + } + return total; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java new file mode 100644 index 000000000000..a166c5ea2f20 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java @@ -0,0 +1,596 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.api.client.util.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.InternalApi; +import com.google.common.base.MoreObjects; +import com.google.common.base.MoreObjects.ToStringHelper; +import com.google.storage.v2.ReadObjectRequest; +import java.io.Serializable; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BiFunction; +import javax.annotation.concurrent.Immutable; +import javax.annotation.concurrent.ThreadSafe; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Typeful sealed class hierarchy for representing an HTTP + * Range Header There are certain subtleties when building these header values depending on + * whether explicit boundaries or relative lengths are used. This class encapsulates the edge cases + * as well as the concept of an effective infinity value for end of range. + * + *

This class does not currently support negative offsets, i.e. start from end of content. + */ +@InternalApi +@ThreadSafe +abstract class ByteRangeSpec implements Serializable { + + public static final long EFFECTIVE_INFINITY = Long.MAX_VALUE; + + @Nullable private volatile String httpRangeHeader; + + private ByteRangeSpec() {} + + abstract long beginOffset(); + + abstract long endOffset() throws ArithmeticException; + + abstract long endOffsetInclusive() throws ArithmeticException; + + abstract long length() throws ArithmeticException; + + // TODO: add validation to this if it ever becomes public + abstract ByteRangeSpec withNewBeginOffset(long beginOffset); + + abstract ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException; + + abstract ByteRangeSpec withNewEndOffset(long endOffsetExclusive); + + abstract ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive); + + abstract ByteRangeSpec withNewRelativeLength(long relativeLength); + + /** + * If a range has no effectively declared beginning and end the string returned here will be null. + */ + @Nullable + final String getHttpRangeHeader() throws ArithmeticException { + if (httpRangeHeader == null) { + synchronized (this) { + if (httpRangeHeader == null) { + httpRangeHeader = fmtAsHttpRangeHeader(); + } + } + } + return httpRangeHeader; + } + + abstract ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b); + + @Nullable + protected abstract String fmtAsHttpRangeHeader() throws ArithmeticException; + + @Override + public int hashCode() { + return Objects.hash(getHttpRangeHeader()); + } + + @Override + public boolean equals(Object o) { + if (o instanceof ByteRangeSpec) { + ByteRangeSpec that = (ByteRangeSpec) o; + return Objects.equals(this.getHttpRangeHeader(), that.getHttpRangeHeader()); + } + return false; + } + + @Override + public String toString() { + return append(MoreObjects.toStringHelper("ByteRangeSpec")).toString(); + } + + protected abstract MoreObjects.ToStringHelper append(MoreObjects.ToStringHelper tsh); + + static ByteRangeSpec nullRange() { + return NullByteRangeSpec.INSTANCE; + } + + static ByteRangeSpec relativeLength(@Nullable Long beginOffset, @Nullable Long length) { + return create(beginOffset, length, RelativeByteRangeSpec::new); + } + + static ByteRangeSpec explicit(@Nullable Long beginOffset, @Nullable Long endOffsetExclusive) { + return create(beginOffset, endOffsetExclusive, LeftClosedRightOpenByteRangeSpec::new); + } + + static ByteRangeSpec explicitClosed( + @Nullable Long beginOffset, @Nullable Long endOffsetInclusive) { + return create(beginOffset, endOffsetInclusive, LeftClosedRightClosedByteRangeSpec::new); + } + + static ByteRangeSpec parse(String string) { + checkNotNull(string, "Range header is null"); + checkArgument(string.startsWith("bytes="), "malformed Range header value: %s", string); + + int i = string.indexOf('-'); + String minS = string.substring(6, i); + String maxS = string.substring(i + 1); + + long min = Long.parseLong(minS); + long max = Long.parseLong(maxS); + + return explicitClosed(min, max); + } + + private static ByteRangeSpec create( + @Nullable Long beginOffset, + @Nullable Long length, + BiFunction<@NonNull Long, @NonNull Long, ByteRangeSpec> f) { + if (beginOffset == null && length == null) { + return nullRange(); + } else if (beginOffset != null && length != null) { + if (beginOffset == 0 && length == EFFECTIVE_INFINITY) { + return nullRange(); + } else if (length == EFFECTIVE_INFINITY) { + return new LeftClosedByteRangeSpec(beginOffset); + } + return f.apply(beginOffset, length); + } else if (beginOffset == null /* && length != null*/) { + if (length == EFFECTIVE_INFINITY) { + return nullRange(); + } + return f.apply(0L, length); + } else { + return new LeftClosedByteRangeSpec(beginOffset); + } + } + + @Immutable + private static final class RelativeByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -3910856417374881377L; + private final long beginOffset; + private final long length; + + private RelativeByteRangeSpec(long beginOffset, long length) { + this.beginOffset = beginOffset; + this.length = length; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return Math.addExact(beginOffset, length); + } + + @Override + long endOffsetInclusive() throws ArithmeticException { + return Math.addExact(beginOffset, length) - 1; + } + + @Override + long length() throws ArithmeticException { + return length; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new RelativeByteRangeSpec(beginOffset, length); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new RelativeByteRangeSpec(Math.addExact(beginOffset, beginOffsetIncrement), length); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + if (relativeLength != this.length) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } else { + return this; + } + } + + @Override + public ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b) { + return b.setReadOffset(beginOffset()).setReadLimit(length()); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format(Locale.US, "bytes=%d-%d", beginOffset, endOffsetInclusive()); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format(Locale.US, "%d + %d", beginOffset, length)); + } + } + + @Immutable + private static final class LeftClosedRightOpenByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -2706235472358072141L; + private final long beginOffset; + private final long endOffsetExclusive; + + private LeftClosedRightOpenByteRangeSpec(long beginOffset, long endOffsetExclusive) { + this.beginOffset = beginOffset; + this.endOffsetExclusive = endOffsetExclusive; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return endOffsetExclusive; + } + + @Override + long endOffsetInclusive() throws ArithmeticException { + return Math.subtractExact(endOffsetExclusive, 1); + } + + @Override + long length() throws ArithmeticException { + return Math.subtractExact(endOffsetExclusive, beginOffset); + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedRightOpenByteRangeSpec( + Math.addExact(beginOffset, beginOffsetIncrement), endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + if (endOffsetExclusive != this.endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + public ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b) { + return b.setReadOffset(beginOffset()).setReadLimit(length()); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format(Locale.US, "bytes=%d-%d", beginOffset, endOffsetInclusive()); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format(Locale.US, "[%d, %d)", beginOffset, endOffsetExclusive)); + } + } + + @Immutable + private static final class LeftClosedRightClosedByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -2706235472358072141L; + private final long beginOffset; + private final long endOffsetInclusive; + + private LeftClosedRightClosedByteRangeSpec(long beginOffset, long endOffsetInclusive) { + this.beginOffset = beginOffset; + this.endOffsetInclusive = endOffsetInclusive; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return Math.addExact(endOffsetInclusive, 1L); + } + + @Override + long endOffsetInclusive() throws ArithmeticException { + return endOffsetInclusive; + } + + @Override + long length() throws ArithmeticException { + return Math.addExact(Math.subtractExact(endOffsetInclusive, beginOffset), 1); + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedRightClosedByteRangeSpec( + Math.addExact(beginOffset, beginOffsetIncrement), endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + if (endOffsetInclusive != this.endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + public ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b) { + return b.setReadOffset(beginOffset()).setReadLimit(length()); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format(Locale.US, "bytes=%d-%d", beginOffset, endOffsetInclusive); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format(Locale.US, "[%d, %d]", beginOffset, endOffsetInclusive)); + } + } + + @Immutable + private static final class LeftClosedByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = 4732278479149027012L; + private final long beginOffset; + + private LeftClosedByteRangeSpec(long beginOffset) { + this.beginOffset = beginOffset; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long endOffsetInclusive() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long length() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedByteRangeSpec(beginOffset); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedByteRangeSpec(Math.addExact(beginOffset, beginOffsetIncrement)); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + public ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b) { + return b.setReadOffset(beginOffset()); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + if (beginOffset > 0) { + return String.format(Locale.US, "bytes=%d-", beginOffset); + } else if (beginOffset < 0) { + return String.format(Locale.US, "bytes=%d", beginOffset); + } else { + return null; + } + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format(Locale.US, "[%d, +INF)", beginOffset)); + } + } + + @Immutable + private static final class NullByteRangeSpec extends ByteRangeSpec { + private static final NullByteRangeSpec INSTANCE = new NullByteRangeSpec(); + private static final long serialVersionUID = 9110512497431639881L; + + @Override + long beginOffset() { + return 0; + } + + @Override + long endOffset() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long endOffsetInclusive() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long length() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != 0) { + return new LeftClosedByteRangeSpec(beginOffset); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedByteRangeSpec(beginOffsetIncrement); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + if (endOffsetExclusive != EFFECTIVE_INFINITY) { + return new LeftClosedRightOpenByteRangeSpec(0, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + if (endOffsetInclusive != EFFECTIVE_INFINITY) { + return new LeftClosedRightClosedByteRangeSpec(0, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + if (relativeLength != EFFECTIVE_INFINITY) { + return new RelativeByteRangeSpec(0, relativeLength); + } else { + return this; + } + } + + @Override + public ReadObjectRequest.Builder seekReadObjectRequest(ReadObjectRequest.Builder b) { + return b; + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return null; + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue("[0, INF]"); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java new file mode 100644 index 000000000000..c7110c1b1221 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java @@ -0,0 +1,43 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +final class ByteSizeConstants { + + static final int _1KiB = 1024; + static final int _128KiB = 128 * _1KiB; + static final int _256KiB = 256 * _1KiB; + static final int _384KiB = 384 * _1KiB; + static final int _512KiB = 512 * _1KiB; + static final int _768KiB = 768 * _1KiB; + static final int _1MiB = 1024 * _1KiB; + static final int _2MiB = 2 * _1MiB; + static final int _4MiB = 4 * _1MiB; + static final int _16MiB = 16 * _1MiB; + static final int _32MiB = 32 * _1MiB; + static final long _1GiB = 1024 * _1MiB; + static final long _1TiB = 1024 * _1GiB; + static final long _5TiB = 5 * _1TiB; + + static final long _128KiBL = 131072L; + static final long _256KiBL = 262144L; + static final long _512KiBL = 524288L; + static final long _768KiBL = 786432L; + static final long _1MiBL = 1048576L; + + private ByteSizeConstants() {} +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteStringStrategy.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteStringStrategy.java new file mode 100644 index 000000000000..a08118301622 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteStringStrategy.java @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import java.nio.ByteBuffer; +import java.util.function.Function; + +interface ByteStringStrategy extends Function { + + static ByteStringStrategy copy() { + return ByteString::copyFrom; + } + + static ByteStringStrategy noCopy() { + return UnsafeByteOperations::unsafeWrap; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializer.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializer.java new file mode 100644 index 000000000000..c13bcaf99dd4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializer.java @@ -0,0 +1,124 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Canonical extension header serializer. + * + * @see + * Canonical Extension Headers + */ +public class CanonicalExtensionHeadersSerializer { + + private static final char HEADER_SEPARATOR = ':'; + private static final char HEADER_NAME_SEPARATOR = ';'; + + private final Storage.SignUrlOption.SignatureVersion signatureVersion; + + public CanonicalExtensionHeadersSerializer( + Storage.SignUrlOption.SignatureVersion signatureVersion) { + this.signatureVersion = signatureVersion; + } + + public CanonicalExtensionHeadersSerializer() { + // TODO switch this when V4 becomes default + this.signatureVersion = Storage.SignUrlOption.SignatureVersion.V2; + } + + public StringBuilder serialize(Map canonicalizedExtensionHeaders) { + + StringBuilder serializedHeaders = new StringBuilder(); + + if (canonicalizedExtensionHeaders == null || canonicalizedExtensionHeaders.isEmpty()) { + return serializedHeaders; + } + + Map lowercaseHeaders = getLowercaseHeaders(canonicalizedExtensionHeaders); + + // Sort all custom headers by header name using a lexicographical sort by code point value. + List sortedHeaderNames = new ArrayList<>(lowercaseHeaders.keySet()); + Collections.sort(sortedHeaderNames); + + for (String headerName : sortedHeaderNames) { + serializedHeaders + .append(headerName) + .append(HEADER_SEPARATOR) + .append( + lowercaseHeaders + .get(headerName) + // Remove any whitespace around the colon that appears after the header name. + .trim() + // Replace any sequence of whitespace with a single space. + .replaceAll("\\s+", " ")) + // Append a newline (U+000A) to each custom header. + .append(SignatureInfo.COMPONENT_SEPARATOR); + } + + // Concatenate all custom headers + return serializedHeaders; + } + + public StringBuilder serializeHeaderNames(Map canonicalizedExtensionHeaders) { + StringBuilder serializedHeaders = new StringBuilder(); + + if (canonicalizedExtensionHeaders == null || canonicalizedExtensionHeaders.isEmpty()) { + return serializedHeaders; + } + Map lowercaseHeaders = getLowercaseHeaders(canonicalizedExtensionHeaders); + + List sortedHeaderNames = new ArrayList<>(lowercaseHeaders.keySet()); + Collections.sort(sortedHeaderNames); + + for (String headerName : sortedHeaderNames) { + serializedHeaders.append(headerName).append(HEADER_NAME_SEPARATOR); + } + + serializedHeaders.setLength(serializedHeaders.length() - 1); // remove trailing semicolon + + return serializedHeaders; + } + + private Map getLowercaseHeaders( + Map canonicalizedExtensionHeaders) { + // Make all custom header names lowercase. + Map lowercaseHeaders = new HashMap<>(); + for (String headerName : new ArrayList<>(canonicalizedExtensionHeaders.keySet())) { + + String lowercaseHeaderName = headerName.toLowerCase(); + + // If present and we're V2, remove the x-goog-encryption-key and x-goog-encryption-key-sha256 + // headers. (CSEK headers are allowed for V4) + if (Storage.SignUrlOption.SignatureVersion.V2.equals(signatureVersion) + && ("x-goog-encryption-key".equals(lowercaseHeaderName) + || "x-goog-encryption-key-sha256".equals(lowercaseHeaderName))) { + + continue; + } + + lowercaseHeaders.put(lowercaseHeaderName, canonicalizedExtensionHeaders.get(headerName)); + } + + return lowercaseHeaders; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java new file mode 100644 index 000000000000..532b561bce4a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java @@ -0,0 +1,124 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.common.base.MoreObjects; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.function.BiFunction; + +class ChannelSession { + private final Object channelInitSyncObj = new ChannelSessionInitLockObject(); + + private final ApiFuture startFuture; + private final ApiFunction f; + private final SettableApiFuture resultFuture; + + private volatile ApiFuture channelFuture; + + ChannelSession( + ApiFuture startFuture, BiFunction, ChannelT> f) { + this(startFuture, f, SettableApiFuture.create()); + } + + ChannelSession( + ApiFuture startFuture, + BiFunction, ChannelT> f, + SettableApiFuture resultFuture) { + this.startFuture = startFuture; + this.resultFuture = resultFuture; + this.f = (s) -> f.apply(s, this.resultFuture); + } + + public ApiFuture openAsync() { + ApiFuture result = channelFuture; + if (result != null) { + return result; + } + + synchronized (channelInitSyncObj) { + if (channelFuture == null) { + channelFuture = ApiFutures.transform(startFuture, f, MoreExecutors.directExecutor()); + } + return channelFuture; + } + } + + public ApiFuture getResult() { + return resultFuture; + } + + static final class UnbufferedReadSession + extends ChannelSession + implements UnbufferedReadableByteChannelSession { + + UnbufferedReadSession( + ApiFuture startFuture, + BiFunction, UnbufferedReadableByteChannel> f) { + super(startFuture, f); + } + } + + static final class BufferedReadSession + extends ChannelSession + implements BufferedReadableByteChannelSession { + + BufferedReadSession( + ApiFuture startFuture, + BiFunction, BufferedReadableByteChannel> f) { + super(startFuture, f); + } + } + + static final class UnbufferedWriteSession + extends ChannelSession + implements UnbufferedWritableByteChannelSession { + + UnbufferedWriteSession( + ApiFuture startFuture, + BiFunction, UnbufferedWritableByteChannel> f) { + super(startFuture, f); + } + } + + static final class BufferedWriteSession + extends ChannelSession + implements BufferedWritableByteChannelSession { + + BufferedWriteSession( + ApiFuture startFuture, + BiFunction, BufferedWritableByteChannel> f) { + super(startFuture, f); + } + } + + private static final class ChannelSessionInitLockObject { + private ChannelSessionInitLockObject() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChecksumResponseParser.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChecksumResponseParser.java new file mode 100644 index 000000000000..2c44565682c9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChecksumResponseParser.java @@ -0,0 +1,75 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.http.HttpResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** A utility class to parse checksums from an {@link HttpResponse}. */ +final class ChecksumResponseParser { + + private static final String X_GOOG_HASH = "x-goog-hash"; + + private ChecksumResponseParser() {} + + static UploadPartResponse parseUploadResponse(HttpResponse response) { + String eTag = response.getHeaders().getETag(); + Map hashes = extractHashesFromHeader(response); + return UploadPartResponse.builder() + .eTag(eTag) + .md5(hashes.get("md5")) + .crc32c(hashes.get("crc32c")) + .build(); + } + + static CompleteMultipartUploadResponse parseCompleteResponse(HttpResponse response) + throws IOException { + Map hashes = extractHashesFromHeader(response); + CompleteMultipartUploadResponse completeMpuResponse = + response.parseAs(CompleteMultipartUploadResponse.class); + return CompleteMultipartUploadResponse.builder() + .location(completeMpuResponse.location()) + .bucket(completeMpuResponse.bucket()) + .key(completeMpuResponse.key()) + .etag(completeMpuResponse.etag()) + .crc32c(hashes.get("crc32c")) + .build(); + } + + static Map extractHashesFromHeader(HttpResponse response) { + List hashHeaders = response.getHeaders().getHeaderStringValues(X_GOOG_HASH); + if (hashHeaders == null || hashHeaders.isEmpty()) { + return Collections.emptyMap(); + } + + return hashHeaders.stream() + .flatMap(h -> Arrays.stream(h.split(","))) + .map(String::trim) + .filter(s -> !s.isEmpty()) + .map(s -> s.split("=", 2)) + .filter(a -> a.length == 2) + .filter(a -> "crc32c".equalsIgnoreCase(a[0]) || "md5".equalsIgnoreCase(a[0])) + .collect(Collectors.toMap(a -> a[0].toLowerCase(), a -> a[1], (v1, v2) -> v1)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChunkSegmenter.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChunkSegmenter.java new file mode 100644 index 000000000000..d2f4ea9fe35e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ChunkSegmenter.java @@ -0,0 +1,264 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.math.IntMath; +import com.google.common.primitives.Ints; +import com.google.protobuf.ByteString; +import java.math.RoundingMode; +import java.nio.ByteBuffer; +import java.util.ArrayDeque; +import java.util.Deque; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * When processing a "chunk" of data to write to GCS, we must turn our logical chunk into N + * segments. Each resulting segment will then become an individual message. + */ +final class ChunkSegmenter { + private final Hasher hasher; + private final ByteStringStrategy bss; + private final int maxSegmentSize; + private final int blockSize; + + ChunkSegmenter(Hasher hasher, ByteStringStrategy bss, int maxSegmentSize) { + this(hasher, bss, maxSegmentSize, ByteSizeConstants._256KiB); + } + + @VisibleForTesting + ChunkSegmenter(Hasher hasher, ByteStringStrategy bss, int maxSegmentSize, int blockSize) { + int mod = maxSegmentSize % blockSize; + Preconditions.checkArgument( + mod == 0, + "maxSegmentSize % blockSize == 0 (%s % %s == %s)", + maxSegmentSize, + blockSize, + mod); + this.hasher = hasher; + this.bss = bss; + this.maxSegmentSize = maxSegmentSize; + this.blockSize = blockSize; + } + + Hasher getHasher() { + return hasher; + } + + ChunkSegment[] segmentBuffer(ByteBuffer bb) { + return segmentBuffers(new ByteBuffer[] {bb}, 0, 1); + } + + /** + * Given {@code bbs}, yield N segments, where each segment is at most {@code maxSegmentSize} + * bytes. + * + *

An example: + * + *

+   * Given a "chunk" consisting of two ByteBuffers, A and B, where A contains 3 MiB and B contains 6.6 MiB
+   *    A: 3 MiB                       B: 6.6 MiB
+   * |-----------------------------|-----------------------------------------------------------------|
+   *
+   * Produce segments
+   *   S1: 2 MiB            S2: 2 MiB           S3: 2 MiB           S4: 2 MiB           S5: 1.6 MiB
+   * |-------------------|-------------------|-------------------|-------------------|---------------|
+   * 
+ * + * Each segment will conditionally compute a crc32c value depending upon {@code hasher}. + * + * @see #segmentBuffers(ByteBuffer[], int, int) + */ + ChunkSegment[] segmentBuffers(ByteBuffer[] bbs) { + return segmentBuffers(bbs, 0, bbs.length); + } + + ChunkSegment[] segmentBuffers(ByteBuffer[] bbs, int offset, int length) { + return segmentBuffers(bbs, offset, length, true); + } + + ChunkSegment[] segmentBuffers( + ByteBuffer[] bbs, int offset, int length, boolean allowUnalignedBlocks) { + // turn this into a single branch, rather than multiple that would need to be checked each + // element of the iteration + if (allowUnalignedBlocks) { + return segmentWithUnaligned(bbs, offset, length, Long.MAX_VALUE); + } else { + return segmentWithoutUnaligned(bbs, offset, length, Long.MAX_VALUE); + } + } + + ChunkSegment[] segmentBuffers( + ByteBuffer[] bbs, + int offset, + int length, + boolean allowUnalignedBlocks, + long maxBytesToConsume) { + // turn this into a single branch, rather than multiple that would need to be checked each + // element of the iteration + if (allowUnalignedBlocks) { + return segmentWithUnaligned(bbs, offset, length, maxBytesToConsume); + } else { + long misaligned = maxBytesToConsume % blockSize; + long alignedMaxBytesToConsume = maxBytesToConsume - misaligned; + return segmentWithoutUnaligned(bbs, offset, length, alignedMaxBytesToConsume); + } + } + + private ChunkSegment[] segmentWithUnaligned( + ByteBuffer[] bbs, int offset, int length, long maxBytesToConsume) { + Deque data = new ArrayDeque<>(); + + long consumed = 0; + for (int i = offset; i < length; i++) { + ByteBuffer buffer = bbs[i]; + int remaining; + while ((remaining = buffer.remaining()) > 0 && consumed < maxBytesToConsume) { + long remainingConsumable = maxBytesToConsume - consumed; + int toConsume = remaining; + if (remainingConsumable < remaining) { + toConsume = Math.toIntExact(remainingConsumable); + } + long consumeBytes = consumeBytes(data, toConsume, buffer); + consumed += consumeBytes; + } + } + + return data.toArray(new ChunkSegment[0]); + } + + private ChunkSegment[] segmentWithoutUnaligned( + ByteBuffer[] bbs, int offset, int length, long maxBytesToConsume) { + Deque data = new ArrayDeque<>(); + + long buffersTotalRemaining = Buffers.totalRemaining(bbs, offset, length); + final long totalRemaining = Math.min(maxBytesToConsume, buffersTotalRemaining); + long consumedSoFar = 0; + + int currentBlockPending = blockSize; + + outerloop: + for (int i = offset; i < length; i++) { + ByteBuffer buffer = bbs[i]; + int remaining; + while ((remaining = buffer.remaining()) > 0) { + long overallRemaining = totalRemaining - consumedSoFar; + if (overallRemaining < blockSize && currentBlockPending == blockSize) { + break outerloop; + } + + int numBytesConsumable; + if (remaining >= blockSize && currentBlockPending == blockSize) { + int blockCount = IntMath.divide(remaining, blockSize, RoundingMode.DOWN); + numBytesConsumable = blockCount * blockSize; + } else { + numBytesConsumable = Math.min(remaining, currentBlockPending); + } + if (numBytesConsumable <= 0) { + break outerloop; + } + + int consumed = consumeBytes(data, numBytesConsumable, buffer); + int currentBlockPendingLessConsumed = currentBlockPending - consumed; + currentBlockPending = currentBlockPendingLessConsumed % blockSize; + if (currentBlockPending == 0) { + currentBlockPending = blockSize; + } + consumedSoFar += consumed; + } + } + + return data.toArray(new ChunkSegment[0]); + } + + private int consumeBytes(Deque data, int numBytesConsumable, ByteBuffer buffer) { + // either no chunk or most recent chunk is full, start a new one + ChunkSegment peekLast = data.peekLast(); + if (peekLast == null || peekLast.b.size() == maxSegmentSize) { + int limit = Math.min(numBytesConsumable, maxSegmentSize); + ChunkSegment datum = newSegment(buffer, limit); + data.addLast(datum); + return limit; + } else { + ChunkSegment chunkSoFar = data.pollLast(); + //noinspection ConstantConditions -- covered by peekLast check above + int limit = + Ints.min(buffer.remaining(), numBytesConsumable, maxSegmentSize - chunkSoFar.b.size()); + ChunkSegment datum = newSegment(buffer, limit); + ChunkSegment plus = chunkSoFar.concat(datum); + data.addLast(plus); + return limit; + } + } + + private ChunkSegment newSegment(ByteBuffer buffer, int limit) { + final ByteBuffer slice = buffer.slice(); + slice.limit(limit); + + Crc32cLengthKnown hash = hasher.hash(slice::duplicate); + ByteString byteString = bss.apply(slice); + Buffers.position(buffer, buffer.position() + limit); + + return new ChunkSegment(byteString, hash); + } + + final class ChunkSegment { + private final ByteString b; + @Nullable private final Crc32cLengthKnown crc32c; + private final boolean onlyFullBlocks; + + private ChunkSegment(ByteString b, @Nullable Crc32cLengthKnown crc32c) { + this.b = b; + this.onlyFullBlocks = b.size() % blockSize == 0; + this.crc32c = crc32c; + } + + public ChunkSegment concat(ChunkSegment other) { + Crc32cLengthKnown newCrc = null; + if (crc32c != null && other.crc32c != null) { + newCrc = crc32c.concat(other.crc32c); + } + ByteString concat = b.concat(other.b); + return new ChunkSegment(concat, newCrc); + } + + public ByteString getB() { + return b; + } + + @Nullable + public Crc32cLengthKnown getCrc32c() { + return crc32c; + } + + public boolean isOnlyFullBlocks() { + return onlyFullBlocks; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("crc32c", crc32c) + .add("onlyFullBlocks", onlyFullBlocks) + .add("b", b) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Conversions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Conversions.java new file mode 100644 index 000000000000..b8e9c5718cb8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Conversions.java @@ -0,0 +1,152 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Abstraction utilities for converting between two different types. + * + *

Primarily targeted at encapsulating the logic for conversion from our model classes and the + * respective transport specific models. + */ +final class Conversions { + + private Conversions() {} + + /** Entry point to the registry of Codecs for conversions with the JSON Api model */ + static JsonConversions json() { + return JsonConversions.INSTANCE; + } + + /** Entry point to the registry of Codecs for conversions with the gRPC Api model */ + static GrpcConversions grpc() { + return GrpcConversions.INSTANCE; + } + + /** + * Abstraction representing a conversion to a different model type. + * + *

This class is the inverse of {@link Decoder} + * + *

A symmetric {@link Encoder} {@link Decoder} pair can make a {@link Codec} + * + * @param + * @param + * @see Decoder + * @see Codec + */ + @FunctionalInterface + interface Encoder { + To encode(From f); + } + + /** + * Abstraction representing a conversion from a different model type. + * + *

This class is the inverse of {@link Encoder} + * + *

A symmetric {@link Encoder} {@link Decoder} pair can make a {@link Codec} + * + * @param + * @param + * @see Encoder + * @see Codec + */ + @FunctionalInterface + interface Decoder { + To decode(From f); + + default Decoder andThen(Decoder d) { + return f -> d.decode(this.decode(f)); + } + + default Decoder compose(Decoder before) { + return in -> this.decode(before.decode(in)); + } + + static Decoder identity() { + return (x) -> x; + } + } + + interface Codec extends Encoder, Decoder { + static Codec of(Encoder e, Decoder d) { + return new SimpleCodec<>(e, d); + } + + default Codec andThen(Codec c) { + Codec self = this; + return new Codec() { + @Override + public A decode(R f) { + return self.decode(c.decode(f)); + } + + @Override + public R encode(A f) { + return c.encode(self.encode(f)); + } + }; + } + + /** + * Create a new Codec which guards calling each method with a null check. + * + *

If the values provided to either {@link #decode(Object)} or {@link #encode(Object)} is + * null, null will be returned. + */ + default Codec<@Nullable A, @Nullable B> nullable() { + Codec self = this; + return new Codec() { + @Override + public A decode(B f) { + return f == null ? null : self.decode(f); + } + + @Override + public B encode(A f) { + return f == null ? null : self.encode(f); + } + }; + } + } + + /** + * Internal implementation detail, not to be opened if the containing class and interfaces are + * ever opened up for access. + */ + private static final class SimpleCodec implements Codec { + private final Encoder e; + private final Decoder d; + + private SimpleCodec(Encoder e, Decoder d) { + this.e = e; + this.d = d; + } + + @Override + public B encode(A f) { + return e.encode(f); + } + + @Override + public A decode(B f) { + return d.decode(f); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CopyWriter.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CopyWriter.java new file mode 100644 index 000000000000..feeb9ef7fd39 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CopyWriter.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.Restorable; +import com.google.cloud.RestorableState; + +/** + * Google Storage blob copy writer. A {@code CopyWriter} object allows to copy both blob's data and + * information. To override source blob's information supply a {@code BlobInfo} to the {@code + * CopyRequest} using either {@link Storage.CopyRequest.Builder#setTarget(BlobInfo, + * Storage.BlobTargetOption...)} or {@link Storage.CopyRequest.Builder#setTarget(BlobInfo, + * Iterable)}. + * + *

This class holds the result of a copy request. If source and destination blobs share the same + * location and storage class the copy is completed in one RPC call otherwise one or more {@link + * #copyChunk} calls are necessary to complete the copy. In addition, {@link CopyWriter#getResult()} + * can be used to automatically complete the copy and return information on the newly created blob. + * + * @see Rewrite + */ +public abstract class CopyWriter implements Restorable { + + // keep this class only extendable within our package + CopyWriter() {} + + /** + * Returns the updated information for the written blob. Calling this method when {@code isDone()} + * is {@code false} will block until all pending chunks are copied. + * + *

This method has the same effect of doing: + * + *

{@code
+   * while (!copyWriter.isDone()) {
+   *    copyWriter.copyChunk();
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + public abstract Blob getResult(); + + /** Returns the size of the blob being copied. */ + public abstract long getBlobSize(); + + /** Returns {@code true} if blob copy has finished, {@code false} otherwise. */ + public abstract boolean isDone(); + + /** Returns the number of bytes copied. */ + public abstract long getTotalBytesCopied(); + + /** + * Copies the next chunk of the blob. An RPC is issued only if copy has not finished yet ({@link + * #isDone} returns {@code false}). + * + * @throws StorageException upon failure + */ + public abstract void copyChunk(); + + @Override + public abstract RestorableState capture(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Cors.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Cors.java new file mode 100644 index 000000000000..a3f9ca6615af --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Cors.java @@ -0,0 +1,208 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.collect.ImmutableList; +import java.io.Serializable; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Objects; + +/** + * Cross-Origin Resource Sharing (CORS) configuration for a bucket. + * + * @see Cross-Origin Resource Sharing + * (CORS) + */ +public final class Cors implements Serializable { + + private static final long serialVersionUID = 3811576113627241235L; + + private final Integer maxAgeSeconds; + private final ImmutableList methods; + private final ImmutableList origins; + private final ImmutableList responseHeaders; + + /** Class for a CORS origin. */ + public static final class Origin implements Serializable { + + private static final long serialVersionUID = -3240120183350397818L; + private static final String ANY_URI = "*"; + private final String value; + + private static final Origin ANY = new Origin(ANY_URI); + + private Origin(String value) { + this.value = checkNotNull(value); + } + + /** Returns an {@code Origin} object for all possible origins. */ + public static Origin any() { + return ANY; + } + + /** Returns an {@code Origin} object for the given scheme, host and port. */ + public static Origin of(String scheme, String host, int port) { + try { + return of(new URI(scheme, null, host, port, null, null, null).toString()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } + + /** Creates an {@code Origin} object for the provided value. */ + public static Origin of(String value) { + if (ANY_URI.equals(value)) { + return any(); + } + return new Origin(value); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Origin)) { + return false; + } + Origin origin = (Origin) obj; + return Objects.equals(value, origin.value); + } + + @Override + public String toString() { + return getValue(); + } + + public String getValue() { + return value; + } + } + + /** CORS configuration builder. */ + public static final class Builder { + + private Integer maxAgeSeconds; + private ImmutableList methods; + private ImmutableList origins; + private ImmutableList responseHeaders; + + private Builder() {} + + /** + * Sets the max time in seconds in which a client can issue requests before sending a new + * preflight request. + */ + public Builder setMaxAgeSeconds(Integer maxAgeSeconds) { + this.maxAgeSeconds = maxAgeSeconds; + return this; + } + + /** Sets the HTTP methods supported by this CORS configuration. */ + public Builder setMethods(Iterable methods) { + this.methods = methods != null ? ImmutableList.copyOf(methods) : null; + return this; + } + + /** Sets the origins for this CORS configuration. */ + public Builder setOrigins(Iterable origins) { + this.origins = origins != null ? ImmutableList.copyOf(origins) : null; + return this; + } + + /** Sets the response headers supported by this CORS configuration. */ + public Builder setResponseHeaders(Iterable headers) { + this.responseHeaders = headers != null ? ImmutableList.copyOf(headers) : null; + return this; + } + + /** Creates a CORS configuration. */ + public Cors build() { + return new Cors(this); + } + } + + private Cors(Builder builder) { + this.maxAgeSeconds = builder.maxAgeSeconds; + this.methods = builder.methods; + this.origins = builder.origins; + this.responseHeaders = builder.responseHeaders; + } + + /** + * Returns the max time in seconds in which a client can issue requests before sending a new + * preflight request. + */ + public Integer getMaxAgeSeconds() { + return maxAgeSeconds; + } + + /** Returns the HTTP methods supported by this CORS configuration. */ + public List getMethods() { + return methods; + } + + /** Returns the origins in this CORS configuration. */ + public List getOrigins() { + return origins; + } + + /** Returns the response headers supported by this CORS configuration. */ + public List getResponseHeaders() { + return responseHeaders; + } + + /** Returns a builder for this CORS configuration. */ + public Builder toBuilder() { + return newBuilder() + .setMaxAgeSeconds(maxAgeSeconds) + .setMethods(methods) + .setOrigins(origins) + .setResponseHeaders(responseHeaders); + } + + @Override + public int hashCode() { + return Objects.hash(maxAgeSeconds, methods, origins, responseHeaders); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Cors)) { + return false; + } + Cors other = (Cors) obj; + return Objects.equals(maxAgeSeconds, other.maxAgeSeconds) + && Objects.equals(methods, other.methods) + && Objects.equals(origins, other.origins) + && Objects.equals(responseHeaders, other.responseHeaders); + } + + /** Returns a CORS configuration builder. */ + public static Builder newBuilder() { + return new Builder(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cUtility.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cUtility.java new file mode 100644 index 000000000000..9f91d3b84baa --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cUtility.java @@ -0,0 +1,88 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +class Crc32cUtility { + private Crc32cUtility() {} + + // Castagnoli polynomial and its degree. + private static final long CASTAGNOLI_POLY = 4812730177L; + private static final int DEGREE = 32; + + // Table storing values of x^(2^k) mod CASTANOLI_POLY for all k < 31. This is sufficient since + // x^(2^31) = x. + private static final long[] X_POW_2K_TABLE = { + 2L, 4L, 16L, 256L, 65536L, 517762881L, 984302966L, + 408362264L, 1503875210L, 2862076957L, 3884826397L, 1324787473L, 621200174L, 1758783527L, + 1416537776L, 1180494764L, 648569364L, 2521473789L, 994858823L, 1728245375L, 3498467999L, + 4059169852L, 3345064394L, 2828422810L, 2429203150L, 3336788029L, 860151998L, 2102628683L, + 1033187991L, 4243778976L, 1123580069L + }; + + // Multiplies two polynomials together modulo CASTAGNOLI_POLY. + private static int multiply(int p, int q) { + long q64 = q; + int result = 0; + long topBit = (1L << DEGREE); + for (int i = 0; i < DEGREE; i++) { + if ((p & 1) != 0) { + result ^= (int) q64; + } + q64 <<= 1; // Multiply by x. + + // If multiplying by x gave q64 a non-zero 32nd coefficient, it no longer encodes the desired + // representative of that polynomial modulo CASTAGNOLI_POLY, so we subtract the generator. + if ((q64 & topBit) != 0) { + q64 ^= CASTAGNOLI_POLY; + } + p >>= 1; + } + return result; + } + + // Given crc representing polynomial P(x), compute P(x)*x^numBits. + private static int extendByZeros(int crc, long numBits) { + // Begin by reversing the bits to most-significant coefficient first for comprehensibility. + crc = Integer.reverse(crc); + int i = 0; + // Iterate over the binary representation of numBits, multiplying by x^(2^k) for numBits_k = 1. + while (numBits != 0) { + if ((numBits & 1) != 0) { + crc = multiply(crc, (int) X_POW_2K_TABLE[i % X_POW_2K_TABLE.length]); + } + i += 1; + numBits >>= 1; + } + crc = Integer.reverse(crc); // Return to the standard bit-order. + return crc; + } + + /** + * Efficiently computes CRC32C for concat(A, B) given crc(A), crc(B) and len(B). + * + * @param crcA A 32-bit integer representing crc(A) with least-significant coefficient first. + * @param crcB Same as crcA for B. + * @param numBytesInB Length of B in bytes. + * @return CRC32C for concat(A, B) PiperOrigin-RevId: 158626905 + */ + public static int concatCrc32c(int crcA, int crcB, long numBytesInB) { + if (numBytesInB == 0) { + return crcA; + } + return extendByZeros(crcA, 8 * numBytesInB) ^ crcB; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cValue.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cValue.java new file mode 100644 index 000000000000..5da5a2037665 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Crc32cValue.java @@ -0,0 +1,185 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.nio.ByteBuffer; +import java.util.Locale; +import java.util.Objects; + +abstract class Crc32cValue> { + + private Crc32cValue() {} + + public abstract int getValue(); + + /** + * Concatenate {@code other} to {@code this} value. + * + *

The concat operation satisfies the Left Distributive property. + * + *

This means, given the following instances: + * + *

{@code
+   * var A = Crc32cValue.of(a);
+   * var B = Crc32cValue.of(b, 4);
+   * var C = Crc32cValue.of(c, 4);
+   * var D = Crc32cValue.of(d, 4);
+   * }
+ * + * Each of the following lines will all produce the same value: + * + *
{@code
+   * var ABCD1 = A.concat(B).concat(C).concat(D);
+   * var ABCD2 = A.concat(B.concat(C.concat(D)));
+   * var ABCD3 = A.concat(B.concat(C)).concat(D);
+   * }
+ */ + public abstract Res concat(Crc32cLengthKnown other); + + public abstract String debugString(); + + public boolean eqValue(Crc32cValue other) { + return this.getValue() == other.getValue(); + } + + static Crc32cLengthKnown zero() { + return Crc32cLengthKnown.ZERO; + } + + static Crc32cLengthUnknown of(int value) { + return new Crc32cLengthUnknown(value); + } + + static Crc32cLengthKnown of(int value, long length) { + return new Crc32cLengthKnown(value, length); + } + + static String fmtCrc32cValue(int value1) { + return String.format(Locale.US, "crc32c{0x%08x}", value1); + } + + static final class Crc32cLengthUnknown extends Crc32cValue { + private final int value; + + public Crc32cLengthUnknown(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + + @Override + public Crc32cLengthUnknown concat(Crc32cLengthKnown other) { + if (other == Crc32cLengthKnown.ZERO) { + return this; + } + int combined = Crc32cUtility.concatCrc32c(value, other.value, other.length); + return new Crc32cLengthUnknown(combined); + } + + @Override + public String toString() { + return Crc32cLengthKnown.fmtCrc32cValue(value); + } + + @Override + public String debugString() { + return toString(); + } + + public Crc32cLengthKnown withLength(long length) { + return new Crc32cLengthKnown(value, length); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Crc32cLengthUnknown)) { + return false; + } + Crc32cLengthUnknown that = (Crc32cLengthUnknown) o; + return value == that.value; + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + } + + static final class Crc32cLengthKnown extends Crc32cValue { + private static final Crc32cLengthKnown ZERO = Hasher.enabled().hash(ByteBuffer.allocate(0)); + private final int value; + private final long length; + + private Crc32cLengthKnown(int value, long length) { + this.value = value; + this.length = length; + } + + @Override + public int getValue() { + return value; + } + + public long getLength() { + return length; + } + + @Override + public Crc32cLengthKnown concat(Crc32cLengthKnown other) { + if (other == ZERO) { + return this; + } else if (this == ZERO) { + return other; + } + int combined = Crc32cUtility.concatCrc32c(value, other.value, other.length); + return new Crc32cLengthKnown(combined, length + other.length); + } + + @Override + public String toString() { + return String.format(Locale.US, "crc32c{0x%08x (length = %d)}", value, length); + } + + @Override + public String debugString() { + return fmtCrc32cValue(value); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Crc32cLengthKnown)) { + return false; + } + Crc32cLengthKnown that = (Crc32cLengthKnown) o; + return value == that.value && length == that.length; + } + + @Override + public int hashCode() { + return Objects.hash(value, length); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CrossTransportUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CrossTransportUtils.java new file mode 100644 index 000000000000..d1390aa6aeaa --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/CrossTransportUtils.java @@ -0,0 +1,77 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import java.util.Arrays; +import java.util.Locale; +import java.util.stream.Collectors; + +final class CrossTransportUtils { + + static T throwHttpJsonOnly(String methodName) { + return throwHttpJsonOnly(Storage.class, methodName); + } + + static T throwHttpJsonOnly(Class clazz, String methodName) { + return throwTransportOnly(clazz, methodName, Transport.HTTP); + } + + static T throwGrpcOnly(String methodName) { + return throwGrpcOnly(Storage.class, methodName); + } + + static T throwGrpcOnly(Class clazz, String methodName) { + return throwTransportOnly(clazz, methodName, Transport.GRPC); + } + + static T throwTransportOnly(Class clazz, String methodName, Transport transport) { + String builder; + switch (transport) { + case HTTP: + builder = "StorageOptions.http()"; + break; + case GRPC: + builder = "StorageOptions.grpc()"; + break; + default: + throw new IllegalStateException( + String.format( + Locale.US, + "Broken Java Enum: %s received value: '%s'", + Transport.class, + transport)); + } + String message = + String.format( + Locale.US, + "%s#%s is only supported for %s transport. Please use %s to construct a compatible" + + " instance.", + clazz.getName(), + methodName, + transport, + builder); + throw new UnsupportedOperationException(message); + } + + static String fmtMethodName(String name, Class... args) { + return name + + "(" + + Arrays.stream(args).map(Class::getName).collect(Collectors.joining(", ")) + + ")"; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBlobWriteSessionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBlobWriteSessionConfig.java new file mode 100644 index 000000000000..6d163f1d81fc --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBlobWriteSessionConfig.java @@ -0,0 +1,319 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.time.Clock; +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; +import javax.annotation.concurrent.Immutable; + +/** + * Default Configuration to represent uploading to Google Cloud Storage in a chunked manner. + * + *

Perform a resumable upload, uploading at most {@code chunkSize} bytes each PUT. + * + *

Configuration of chunk size can be performed via {@link + * DefaultBlobWriteSessionConfig#withChunkSize(int)}. + * + *

An instance of this class will provide a {@link BlobWriteSession} is logically equivalent to + * the following: + * + *

{@code
+ * Storage storage = ...;
+ * WriteChannel writeChannel = storage.writer(BlobInfo, BlobWriteOption);
+ * writeChannel.setChunkSize(chunkSize);
+ * }
+ * + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ +@Immutable +@BetaApi +@TransportCompatibility({Transport.GRPC, Transport.HTTP}) +public final class DefaultBlobWriteSessionConfig extends BlobWriteSessionConfig + implements BlobWriteSessionConfig.HttpCompatible, BlobWriteSessionConfig.GrpcCompatible { + private static final long serialVersionUID = -6873740918589930633L; + + private final int chunkSize; + + @InternalApi + DefaultBlobWriteSessionConfig(int chunkSize) { + this.chunkSize = chunkSize; + } + + /** + * The number of bytes each chunk can be. + * + *

Default: {@code 16777216 (16 MiB)} + * + * @see #withChunkSize(int) + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + public int getChunkSize() { + return chunkSize; + } + + /** + * Create a new instance with the {@code chunkSize} set to the specified value. + * + *

Default: {@code 16777216 (16 MiB)} + * + * @param chunkSize The number of bytes each chunk should be. Must be >= {@code 262144 (256 + * KiB)} + * @return The new instance + * @see #getChunkSize() + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public DefaultBlobWriteSessionConfig withChunkSize(int chunkSize) { + Preconditions.checkArgument( + chunkSize >= ByteSizeConstants._256KiB, + "chunkSize must be >= %d", + ByteSizeConstants._256KiB); + return new DefaultBlobWriteSessionConfig(chunkSize); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DefaultBlobWriteSessionConfig)) { + return false; + } + DefaultBlobWriteSessionConfig that = (DefaultBlobWriteSessionConfig) o; + return chunkSize == that.chunkSize; + } + + @Override + public int hashCode() { + return Objects.hashCode(chunkSize); + } + + @Override + @InternalApi + WriterFactory createFactory(Clock clock) { + return new Factory(chunkSize); + } + + @InternalApi + private static final class Factory implements WriterFactory { + + private final int chunkSize; + + private Factory(int chunkSize) { + this.chunkSize = chunkSize; + } + + @InternalApi + @Override + public WritableByteChannelSession writeSession( + StorageInternal s, BlobInfo info, Opts opts) { + if (s instanceof GrpcStorageImpl) { + return new DecoratedWritableByteChannelSession<>( + new LazySession<>( + new LazyWriteChannel<>( + () -> { + GrpcStorageImpl grpc = (GrpcStorageImpl) s; + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = grpc.getWriteObjectRequest(info, opts); + + ApiFuture startResumableWrite = + grpc.startResumableWrite(grpcCallContext, req, opts); + return ResumableMedia.gapic() + .write() + .byteChannel( + grpc.storageClient + .writeObjectCallable() + .withDefaultCallContext(grpcCallContext)) + .setHasher(opts.getHasher()) + .setByteStringStrategy(ByteStringStrategy.copy()) + .resumable() + .withRetryConfig( + grpc.retrier.withAlg(grpc.retryAlgorithmManager.idempotent())) + .buffered(BufferHandle.allocate(chunkSize)) + .setStartAsync(startResumableWrite) + .build(); + })), + Conversions.grpc().blobInfo().compose(WriteObjectResponse::getResource)); + } else if (s instanceof StorageImpl) { + StorageImpl json = (StorageImpl) s; + + return new DecoratedWritableByteChannelSession<>( + new LazySession<>( + new LazyWriteChannel<>( + () -> { + final Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = info.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + + StorageObject encode = Conversions.json().blobInfo().encode(updated); + Supplier uploadIdSupplier = + ResumableMedia.startUploadForBlobInfo( + json.getOptions(), + updated, + optionsMap, + json.retrier.withAlg( + json.retryAlgorithmManager.getForResumableUploadSessionCreate( + optionsMap))); + ApiFuture startAsync = + ApiFutures.immediateFuture( + JsonResumableWrite.of( + encode, + optionsMap, + uploadIdSupplier.get(), + 0L, + opts.getHasher(), + opts.getHasher().initialValue())); + + return ResumableMedia.http() + .write() + .byteChannel(HttpClientContext.from(json.storageRpc)) + .resumable() + .buffered(BufferHandle.allocate(chunkSize)) + .setStartAsync(startAsync) + .build(); + })), + Conversions.json().blobInfo()); + } else { + throw new IllegalStateException( + "Unknown Storage implementation: " + s.getClass().getName()); + } + } + } + + static final class DecoratedWritableByteChannelSession + implements WritableByteChannelSession { + + private final WritableByteChannelSession delegate; + private final Decoder decoder; + + DecoratedWritableByteChannelSession( + WritableByteChannelSession delegate, Decoder decoder) { + this.delegate = delegate; + this.decoder = decoder; + } + + @Override + public ApiFuture openAsync() { + return ApiFutures.catchingAsync( + delegate.openAsync(), + Throwable.class, + throwable -> ApiFutures.immediateFailedFuture(StorageException.coalesce(throwable)), + MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture getResult() { + ApiFuture decodeResult = + ApiFutures.transform( + delegate.getResult(), decoder::decode, MoreExecutors.directExecutor()); + return ApiFutures.catchingAsync( + decodeResult, + Throwable.class, + throwable -> ApiFutures.immediateFailedFuture(StorageException.coalesce(throwable)), + MoreExecutors.directExecutor()); + } + } + + static final class LazySession + implements WritableByteChannelSession { + private final LazyWriteChannel lazy; + + LazySession(LazyWriteChannel lazy) { + this.lazy = lazy; + } + + @Override + public ApiFuture openAsync() { + // make sure the errors coming out of the BufferedWritableByteChannel are either IOException + // or StorageException + return ApiFutures.transform( + lazy.getSession().openAsync(), + delegate -> + new BufferedWritableByteChannel() { + @Override + public int write(ByteBuffer src) throws IOException { + try { + return delegate.write(src); + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public void flush() throws IOException { + try { + delegate.flush(); + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public boolean isOpen() { + try { + return delegate.isOpen(); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public void close() throws IOException { + try { + delegate.close(); + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + }, + MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture getResult() { + return lazy.getSession().getResult(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedReadableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedReadableByteChannel.java new file mode 100644 index 000000000000..af622f9a35ff --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedReadableByteChannel.java @@ -0,0 +1,147 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; + +final class DefaultBufferedReadableByteChannel implements BufferedReadableByteChannel { + + private final BufferHandle handle; + + private final UnbufferedReadableByteChannel channel; + + private boolean flipped = false; + private boolean retEOF = false; + + DefaultBufferedReadableByteChannel(BufferHandle handle, UnbufferedReadableByteChannel channel) { + this.handle = handle; + this.channel = channel; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + if (retEOF) { + retEOF = false; + return -1; + } else if (!enqueuedBytes() && !channel.isOpen()) { + throw new ClosedChannelException(); + } + + int bytesConsumed = 0; + + while (dst.hasRemaining()) { + int bufferRemaining = handle.remaining(); + + int dstRemaining = dst.remaining(); + int dstPosition = dst.position(); + + final int tmpBytesCopied; + if (enqueuedBytes()) { + ByteBuffer buffer = handle.get(); + if (!flipped) { + buffer.flip(); + flipped = true; + } + long copy = Buffers.copy(buffer, new ByteBuffer[] {dst}); + if (buffer.remaining() == 0) { + Buffers.clear(buffer); + } + tmpBytesCopied = Math.toIntExact(copy); + } else { + if (bufferRemaining <= dstRemaining) { + ByteBuffer buf; + if (bufferRemaining == dstRemaining) { + // the available space in dst is the same as our buffer rather than reading into buffer + // before copying to dst, simply read directly into dst + buf = dst; + } else { + // the available space in dst is larger than our buffer rather than reading into buffer + // before copying to dst, simply read a buffer size worth of bytes directly into dst + buf = dst.slice(); + Buffers.limit(buf, bufferRemaining); + } + int read = channel.read(buf); + if (read == -1) { + if (bytesConsumed == 0) { + close(); + return -1; + } else { + retEOF = true; + close(); + break; + } + } + Buffers.position(dst, dstPosition + read); + tmpBytesCopied = read; + } else { + + // the amount of space remaining in dst is smaller than our buffer, + // create a slice of our buffer such that + // dst.remaning() + bufSlice.remaning() == buffer.capacity + + ByteBuffer buffer = handle.get(); + ByteBuffer slice = buffer.slice(); + int sliceCapacity = buffer.capacity() - dstRemaining; + Buffers.limit(slice, sliceCapacity); + + ByteBuffer[] dsts = {dst, slice}; + long read = channel.read(dsts); + if (read == -1) { + if (bytesConsumed == 0) { + close(); + return -1; + } else { + retEOF = true; + close(); + break; + } + } else if (read < dstRemaining) { + // we didn't read enough bytes to fill up dst, no need to advance buffer position + tmpBytesCopied = Math.toIntExact(read); + } else { + // we read some bytes into slice + // determine the position buffer needs to be set to + long bytesReadIntoBuffer = read - dstRemaining; + Buffers.position(buffer, Math.toIntExact(bytesReadIntoBuffer)); + flipped = false; + tmpBytesCopied = dstRemaining; + } + } + } + bytesConsumed += tmpBytesCopied; + } + return bytesConsumed; + } + + @Override + public boolean isOpen() { + return enqueuedBytes() || (!retEOF && channel.isOpen()); + } + + @Override + public void close() throws IOException { + channel.close(); + } + + private boolean enqueuedBytes() { + return handle.position() > 0; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java new file mode 100644 index 000000000000..4e9a7c107ffd --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java @@ -0,0 +1,218 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; + +/** + * Buffering {@link java.nio.channels.WritableByteChannel} which attempts to maximize the amount of + * bytes written to the underlying {@link UnbufferedWritableByteChannel} while minimizing + * unnecessary copying of said bytes. + * + *

Our flushing strategy is "eager", meaning as soon as we have enough total bytes equal to the + * capacity of our buffer we will write to the underlying channel. + * + *

A few strategies are employed to meet the state goals. + * + *

    + *
  1. If we do not have any bytes in our buffer and {@code src} is the same size as our buffer, + * simply {@link UnbufferedWritableByteChannel#write(ByteBuffer) write(src)} to the the + * underlying channel + *
  2. If we do not have any bytes in our buffer and {@code src} is larger than the size of our + * buffer, take a slice of {@code src} the same size as our buffer and {@link + * UnbufferedWritableByteChannel#write(ByteBuffer[]) write(slice)} before enqueuing any + * outstanding bytes which are smaller than our buffer. + *
  3. If we do not have any bytes in our buffer and {@code src} is smaller than the size of our + * buffer, enqueue it in full + *
  4. If we do have enqueued bytes and {@code src} is the size of our remaining buffer space + * {@link UnbufferedWritableByteChannel#write(ByteBuffer[]) write([buffer, src])} to the + * underlying channel + *
  5. If we do have enqueued bytes and {@code src} is larger than the size of our remaining + * buffer space, take a slice of {@code src} the same size as the remaining space in our + * buffer and {@link UnbufferedWritableByteChannel#write(ByteBuffer[]) write([buffer, slice])} + * to the underlying channel before enqueuing any outstanding bytes which are smaller than our + * buffer. + *
  6. If we do have enqueued bytes and {@code src} is smaller than our remaining buffer space, + * enqueue it in full + *
+ */ +final class DefaultBufferedWritableByteChannel implements BufferedWritableByteChannel { + + private final BufferHandle handle; + + private final UnbufferedWritableByteChannel channel; + private final boolean blocking; + + DefaultBufferedWritableByteChannel(BufferHandle handle, UnbufferedWritableByteChannel channel) { + this(handle, channel, true); + } + + DefaultBufferedWritableByteChannel( + BufferHandle handle, UnbufferedWritableByteChannel channel, boolean blocking) { + this.handle = handle; + this.channel = channel; + this.blocking = blocking; + } + + @SuppressWarnings("UnnecessaryLocalVariable") + @Override + public int write(ByteBuffer src) throws IOException { + if (!channel.isOpen()) { + throw new ClosedChannelException(); + } + int bytesConsumed = 0; + + while (src.hasRemaining()) { + int srcRemaining = src.remaining(); + int srcPosition = src.position(); + + int capacity = handle.capacity(); + int bufferRemaining = handle.remaining(); + int bufferPending = capacity - bufferRemaining; + + boolean enqueuedBytes = enqueuedBytes(); + if (srcRemaining < bufferRemaining) { + // srcRemaining is smaller than the remaining space in our buffer, enqueue it in full + handle.get().put(src); + bytesConsumed += srcRemaining; + break; + } else if (enqueuedBytes) { + // between bufferPending and srcRemaining we have a full buffers worth of data + // Figure out what we need to do before flushing it + + ByteBuffer buf; + int sliceLimit = bufferRemaining; // alias for easier readability below + boolean usingSlice = false; + if (srcRemaining == bufferRemaining) { + // our buffer and all of src are equal to a full buffer, no need to slice src + buf = src; + } else { + // our buffer and all of src are larger than a full buffer, take a slice of src such that + // the total number of bytes are equal to capacity + ByteBuffer slice = src.slice(); + Buffers.limit(slice, sliceLimit); + usingSlice = true; + buf = slice; + } + + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + ByteBuffer[] srcs = {buffer, buf}; + long write = channel.write(srcs); + checkState(write >= 0, "write >= 0 (%s > 0)", write); + if (write == capacity) { + // we successfully wrote all the bytes we wanted to + Buffers.clear(buffer); + if (usingSlice) { + Buffers.position(src, srcPosition + sliceLimit); + } + bytesConsumed += sliceLimit; + } else { + if (buffer.hasRemaining()) { + // we didn't write enough bytes to consume the whole buffer. Do not advance the + // position of src + buffer.compact(); + } else { + // we wrote enough to consume the buffer + Buffers.clear(buffer); + // we didn't write enough to consume the whole slice, determine how much of the slice + // was written and advance the position of src + int sliceWritten = Math.toIntExact(write - bufferPending); + Buffers.position(src, srcPosition + sliceWritten); + bytesConsumed += sliceWritten; + } + + if (!blocking) { + break; + } + } + } else { + // no enqueued data and src is at least as large as our buffer, see if we can simply write + // the provided src or a slice of it since our buffer is empty + if (bufferRemaining == srcRemaining) { + // the capacity of buffer and the bytes remaining in src are the same, directly + // write src + int write = channel.write(src); + checkState(write >= 0, "write >= 0 (%s > 0)", write); + bytesConsumed += write; + if (write < srcRemaining && !blocking) { + break; + } + } else { + // the src provided is larger than our buffer. rather than copying into the buffer, simply + // write a slice + ByteBuffer slice = src.slice(); + Buffers.limit(slice, bufferRemaining); + int write = channel.write(slice); + checkState(write >= 0, "write >= 0 (%s > 0)", write); + int newPosition = srcPosition + write; + Buffers.position(src, newPosition); + bytesConsumed += write; + if (write < bufferRemaining && !blocking) { + break; + } + } + } + } + return bytesConsumed; + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() throws IOException { + if (enqueuedBytes()) { + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + channel.writeAndClose(buffer); + if (buffer.hasRemaining()) { + buffer.compact(); + } else { + Buffers.clear(buffer); + } + } else { + channel.close(); + } + } + + @Override + public void flush() throws IOException { + while (enqueuedBytes()) { + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + channel.write(buffer); + if (buffer.hasRemaining()) { + buffer.compact(); + } else { + Buffers.clear(buffer); + } + } + } + + private boolean enqueuedBytes() { + return handle.position() > 0; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultRetryContext.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultRetryContext.java new file mode 100644 index 000000000000..095c01487b32 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultRetryContext.java @@ -0,0 +1,209 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.storage.Backoff.BackoffDuration; +import com.google.cloud.storage.Backoff.BackoffResult; +import com.google.cloud.storage.Backoff.BackoffResults; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.common.annotations.VisibleForTesting; +import java.time.Duration; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; +import org.checkerframework.checker.nullness.qual.Nullable; + +@SuppressWarnings("SizeReplaceableByIsEmpty") // allow elimination of a method call and a negation +final class DefaultRetryContext implements RetryContext { + private final ScheduledExecutorService scheduledExecutorService; + private final RetryingDependencies retryingDependencies; + private final ResultRetryAlgorithm algorithm; + private final Backoff backoff; + private final ReentrantLock lock; + + private List failures; + private long lastReset; + private long lastRecordedErrorNs; + @Nullable private BackoffResult lastBackoffResult; + @Nullable private ScheduledFuture pendingBackoff; + + DefaultRetryContext( + ScheduledExecutorService scheduledExecutorService, + RetryingDependencies retryingDependencies, + ResultRetryAlgorithm algorithm, + Jitterer jitterer) { + this.scheduledExecutorService = scheduledExecutorService; + this.retryingDependencies = retryingDependencies; + this.algorithm = algorithm; + this.backoff = + Backoff.from(retryingDependencies.getRetrySettings()).setJitterer(jitterer).build(); + this.lock = new ReentrantLock(); + this.failures = new LinkedList<>(); + this.lastReset = retryingDependencies.getClock().nanoTime(); + this.lastRecordedErrorNs = this.lastReset; + this.lastBackoffResult = null; + this.pendingBackoff = null; + } + + @Override + public boolean inBackoff() { + lock.lock(); + boolean b = pendingBackoff != null; + try { + return b; + } finally { + lock.unlock(); + } + } + + @Override + public void reset() { + lock.lock(); + try { + if (failures.size() > 0) { + failures = new LinkedList<>(); + } + long now = retryingDependencies.getClock().nanoTime(); + lastReset = now; + lastRecordedErrorNs = now; + clearPendingBackoff(); + backoff.reset(); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + void awaitBackoffComplete() { + while (inBackoff()) { + Thread.yield(); + } + } + + @Override + public void recordError(T t, OnSuccess onSuccess, OnFailure onFailure) { + lock.lock(); + try { + long now = retryingDependencies.getClock().nanoTime(); + Duration elapsed = Duration.ofNanos(now - lastReset); + Duration elapsedSinceLastRecordError = Duration.ofNanos(now - lastRecordedErrorNs); + if (pendingBackoff != null && pendingBackoff.isDone()) { + pendingBackoff = null; + lastBackoffResult = null; + } else if (pendingBackoff != null) { + pendingBackoff.cancel(true); + String message = + String.format( + "Previous backoff interrupted by this error (previousBackoff: %s, elapsed: %s)", + lastBackoffResult != null ? lastBackoffResult.errorString() : null, elapsed); + t.addSuppressed(BackoffComment.of(message)); + } + int failureCount = failures.size() + 1 /* include t in the count*/; + int maxAttempts = retryingDependencies.getRetrySettings().getMaxAttempts(); + if (maxAttempts <= 0) { + maxAttempts = Integer.MAX_VALUE; + } + boolean shouldRetry = algorithm.shouldRetry(t, null); + BackoffResult nextBackoff = backoff.nextBackoff(elapsedSinceLastRecordError); + String msgPrefix = null; + if (shouldRetry && failureCount >= maxAttempts) { + msgPrefix = "Operation failed to complete within attempt budget"; + } else if (nextBackoff == BackoffResults.EXHAUSTED) { + msgPrefix = "Operation failed to complete within backoff budget"; + } else if (!shouldRetry) { + msgPrefix = "Unretryable error"; + } + + lastRecordedErrorNs = now; + if (msgPrefix == null) { + t.addSuppressed(BackoffComment.fromResult(nextBackoff)); + failures.add(t); + + BackoffDuration backoffDuration = (BackoffDuration) nextBackoff; + + lastBackoffResult = nextBackoff; + try { + pendingBackoff = + scheduledExecutorService.schedule( + () -> { + try { + onSuccess.onSuccess(); + } finally { + clearPendingBackoff(); + } + }, + backoffDuration.getDuration().toNanos(), + TimeUnit.NANOSECONDS); + } catch (RejectedExecutionException e) { + InterruptedBackoffComment comment = + new InterruptedBackoffComment( + "Interrupted backoff -- unretryable error due to executor service shutdown"); + comment.addSuppressed(e); + t.addSuppressed(comment); + onFailure.onFailure(t); + } + } else { + String msg = + String.format( + Locale.US, + "%s (attempts: %d%s, elapsed: %s, nextBackoff: %s%s)%s", + msgPrefix, + failureCount, + maxAttempts == Integer.MAX_VALUE + ? "" + : String.format(", maxAttempts: %d", maxAttempts), + elapsed, + nextBackoff.errorString(), + Durations.eq(backoff.getTimeout(), Durations.EFFECTIVE_INFINITY) + ? "" + : ", timeout: " + backoff.getTimeout(), + failures.isEmpty() ? "" : " previous failures follow in order of occurrence"); + t.addSuppressed(new RetryBudgetExhaustedComment(msg)); + for (Throwable failure : failures) { + t.addSuppressed(failure); + } + onFailure.onFailure(t); + } + } finally { + lock.unlock(); + } + } + + private void clearPendingBackoff() { + lock.lock(); + try { + if (pendingBackoff != null) { + if (!pendingBackoff.isDone()) { + pendingBackoff.cancel(true); + } + pendingBackoff = null; + } + if (lastBackoffResult != null) { + lastBackoffResult = null; + } + } finally { + lock.unlock(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java new file mode 100644 index 000000000000..b9a8df0b5217 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java @@ -0,0 +1,165 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.fasterxml.jackson.core.io.JsonEOFException; +import com.google.api.client.http.HttpResponseException; +import com.google.auth.Retryable; +import com.google.cloud.BaseServiceException; +import com.google.cloud.ExceptionHandler; +import com.google.cloud.ExceptionHandler.Interceptor; +import com.google.common.collect.ImmutableSet; +import com.google.gson.stream.MalformedJsonException; +import java.io.IOException; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.util.Set; +import javax.net.ssl.SSLException; + +final class DefaultStorageRetryStrategy implements StorageRetryStrategy { + + static final DefaultStorageRetryStrategy INSTANCE = new DefaultStorageRetryStrategy(); + + private static final long serialVersionUID = 7928177703325504905L; + + private static final Interceptor INTERCEPTOR_IDEMPOTENT = + new InterceptorImpl(true, StorageException.RETRYABLE_ERRORS); + private static final Interceptor INTERCEPTOR_NON_IDEMPOTENT = + new InterceptorImpl(false, ImmutableSet.of()); + + private static final ExceptionHandler IDEMPOTENT_HANDLER = + newHandler(new EmptyJsonParsingExceptionInterceptor(), INTERCEPTOR_IDEMPOTENT); + private static final ExceptionHandler NON_IDEMPOTENT_HANDLER = + newHandler(INTERCEPTOR_NON_IDEMPOTENT); + + private DefaultStorageRetryStrategy() {} + + @Override + public ExceptionHandler getIdempotentHandler() { + return IDEMPOTENT_HANDLER; + } + + @Override + public ExceptionHandler getNonidempotentHandler() { + return NON_IDEMPOTENT_HANDLER; + } + + private static ExceptionHandler newHandler(Interceptor... interceptors) { + return ExceptionHandler.newBuilder().addInterceptors(interceptors).build(); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + + private static class InterceptorImpl implements BaseInterceptor { + + private static final long serialVersionUID = 5283634944744417128L; + private final boolean idempotent; + private final ImmutableSet retryableErrors; + + private InterceptorImpl(boolean idempotent, Set retryableErrors) { + this.idempotent = idempotent; + this.retryableErrors = ImmutableSet.copyOf(retryableErrors); + } + + @Override + public RetryResult beforeEval(Exception exception) { + if (exception instanceof BaseServiceException) { + BaseServiceException baseServiceException = (BaseServiceException) exception; + return deepShouldRetry(baseServiceException); + } else if (exception instanceof HttpResponseException) { + int code = ((HttpResponseException) exception).getStatusCode(); + return shouldRetryCodeReason(code, null); + } else if (exception instanceof Retryable) { + Retryable retryable = (Retryable) exception; + return (idempotent && retryable.isRetryable()) ? RetryResult.RETRY : RetryResult.NO_RETRY; + } else if (exception instanceof IOException) { + IOException ioException = (IOException) exception; + return shouldRetryIOException(ioException); + } + return RetryResult.CONTINUE_EVALUATION; + } + + private RetryResult shouldRetryCodeReason(Integer code, String reason) { + if (BaseServiceException.isRetryable(code, reason, idempotent, retryableErrors)) { + return RetryResult.RETRY; + } else { + return RetryResult.NO_RETRY; + } + } + + private RetryResult shouldRetryIOException(IOException ioException) { + if (ioException instanceof JsonEOFException && idempotent) { // Jackson + return RetryResult.RETRY; + } else if (ioException instanceof MalformedJsonException && idempotent) { // Gson + return RetryResult.RETRY; + } else if (ioException instanceof SSLException && idempotent) { + Throwable cause = ioException.getCause(); + if (cause instanceof SocketException) { + SocketException se = (SocketException) cause; + return shouldRetryIOException(se); + } + } else if (ioException instanceof UnknownHostException && idempotent) { + return RetryResult.RETRY; + } + if (BaseServiceException.isRetryable(idempotent, ioException)) { + return RetryResult.RETRY; + } else { + return RetryResult.NO_RETRY; + } + } + + private RetryResult deepShouldRetry(BaseServiceException baseServiceException) { + if (baseServiceException.getCode() == BaseServiceException.UNKNOWN_CODE + && baseServiceException.getReason() == null) { + final Throwable cause = baseServiceException.getCause(); + if (cause instanceof IOException) { + IOException ioException = (IOException) cause; + return shouldRetryIOException(ioException); + } + } + + int code = baseServiceException.getCode(); + String reason = baseServiceException.getReason(); + return shouldRetryCodeReason(code, reason); + } + } + + private static final class EmptyJsonParsingExceptionInterceptor implements BaseInterceptor { + private static final long serialVersionUID = -3466977370399704805L; + + @Override + public RetryResult beforeEval(Exception exception) { + if (exception instanceof IllegalArgumentException) { + IllegalArgumentException illegalArgumentException = (IllegalArgumentException) exception; + if ("no JSON input found".equals(illegalArgumentException.getMessage())) { + return RetryResult.RETRY; + } + } + return RetryResult.CONTINUE_EVALUATION; + } + } + + private interface BaseInterceptor extends Interceptor { + @Override + default RetryResult afterEval(Exception exception, RetryResult retryResult) { + return RetryResult.CONTINUE_EVALUATION; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Durations.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Durations.java new file mode 100644 index 000000000000..396e91bb613f --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Durations.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.time.Duration; + +final class Durations { + + /* {@code PT2562047H47M16.854775807S} ~ 106,751 days ~ 292.4 years */ + static final Duration EFFECTIVE_INFINITY = Duration.ofNanos(Long.MAX_VALUE); + + private Durations() {} + + static boolean eq(Duration lhs, Duration rhs) { + return lhs.equals(rhs); + } + + static boolean ltEq(Duration lhs, Duration rhs) { + return lhs.compareTo(rhs) <= 0; + } + + static boolean gtEq(Duration lhs, Duration rhs) { + return lhs.compareTo(rhs) >= 0; + } + + static boolean gt(Duration lhs, Duration rhs) { + return lhs.compareTo(rhs) > 0; + } + + static Duration min(Duration d1, Duration d2) { + if (d1.compareTo(d2) < 0) { + return d1; + } else { + return d2; + } + } + + static Duration min(Duration d1, Duration d2, Duration d3) { + return min(min(d1, d2), d3); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java new file mode 100644 index 000000000000..0f7b568b4ea9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java @@ -0,0 +1,307 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._16MiB; +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.ByteSizeConstants._4MiB; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Base class used for flush policies which are responsible for configuring an upload channel's + * behavior with regard to flushes. + * + *

Instances of this class and all its subclasses are immutable and thread safe. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +@InternalExtensionOnly +public abstract class FlushPolicy { + + private FlushPolicy() {} + + /** + * Default instance factory method for {@link MaxFlushSizeFlushPolicy}. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static MaxFlushSizeFlushPolicy maxFlushSize() { + return MaxFlushSizeFlushPolicy.INSTANCE; + } + + /** + * Alias for {@link FlushPolicy#maxFlushSize() FlushPolicy.maxFlushSize()}{@code .}{@link + * MaxFlushSizeFlushPolicy#withMaxFlushSize(int) withMaxFlushSize(int)} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static MaxFlushSizeFlushPolicy maxFlushSize(int maxFlushSize) { + return maxFlushSize().withMaxFlushSize(maxFlushSize); + } + + /** + * Default instance factory method for {@link MinFlushSizeFlushPolicy}. + * + *

Default: logically equivalent to the following: + * + *

+   * {@link #minFlushSize(int) FlushPolicy.minFlushSize}(4 * 1024 * 1024)
+   *     .{@link MinFlushSizeFlushPolicy#withMaxPendingBytes(long) withMaxPendingBytes}(16 * 1024 * 1024)
+   * 
+ * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static MinFlushSizeFlushPolicy minFlushSize() { + return MinFlushSizeFlushPolicy.INSTANCE; + } + + /** + * Alias for {@link FlushPolicy#minFlushSize() FlushPolicy.minFlushSize()}{@code .}{@link + * MinFlushSizeFlushPolicy#withMinFlushSize(int) withMinFlushSize(int)} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static MinFlushSizeFlushPolicy minFlushSize(int minFlushSize) { + return minFlushSize().withMinFlushSize(minFlushSize); + } + + abstract BufferedWritableByteChannel createBufferedChannel( + UnbufferedWritableByteChannel unbuffered, boolean blocking); + + abstract long getMaxPendingBytes(); + + @Override + public abstract boolean equals(Object obj); + + @Override + public abstract int hashCode(); + + @Override + public abstract String toString(); + + /** + * Define a {@link FlushPolicy} where a max number of bytes will be flushed to GCS per flush. + * + *

If there are not enough bytes to trigger a flush, they will be held in memory until there + * are enough bytes, or an explicit flush is performed by closing the channel. If more bytes are + * provided than the configured {@code maxFlushSize}, multiple flushes will be performed. + * + *

Instances of this class are immutable and thread safe. + * + *

Instead of this, strategy use {@link FlushPolicy#minFlushSize()}{@code .}{@link + * MinFlushSizeFlushPolicy#withMaxPendingBytes(long) withMaxPendingBytes(long)} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Immutable + @BetaApi + public static final class MaxFlushSizeFlushPolicy extends FlushPolicy { + private static final MaxFlushSizeFlushPolicy INSTANCE = new MaxFlushSizeFlushPolicy(_2MiB); + + private final int maxFlushSize; + + private MaxFlushSizeFlushPolicy(int maxFlushSize) { + this.maxFlushSize = maxFlushSize; + } + + /** + * The maximum number of bytes to include in each automatic flush + * + *

Default: {@code 2097152 (2 MiB)} + * + * @see #withMaxFlushSize(int) + */ + @BetaApi + public int getMaxFlushSize() { + return maxFlushSize; + } + + /** + * Return an instance with the {@code maxFlushSize} set to the specified value. + * + *

Default: {@code 2097152 (2 MiB)} + * + * @param maxFlushSize The number of bytes to buffer before flushing. + * @return The new instance + * @see #getMaxFlushSize() + */ + @BetaApi + public MaxFlushSizeFlushPolicy withMaxFlushSize(int maxFlushSize) { + Preconditions.checkArgument(maxFlushSize >= 0, "maxFlushSize >= 0 (%s >= 0)", maxFlushSize); + if (this.maxFlushSize == maxFlushSize) { + return this; + } + return new MaxFlushSizeFlushPolicy(maxFlushSize); + } + + @Override + BufferedWritableByteChannel createBufferedChannel( + UnbufferedWritableByteChannel unbuffered, boolean blocking) { + return new DefaultBufferedWritableByteChannel( + BufferHandle.allocate(maxFlushSize), unbuffered, blocking); + } + + @Override + long getMaxPendingBytes() { + return maxFlushSize; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MaxFlushSizeFlushPolicy)) { + return false; + } + MaxFlushSizeFlushPolicy that = (MaxFlushSizeFlushPolicy) o; + return maxFlushSize == that.maxFlushSize; + } + + @Override + public int hashCode() { + return Objects.hashCode(maxFlushSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("maxFlushSize", maxFlushSize).toString(); + } + } + + /** + * Define a {@link FlushPolicy} where a min number of bytes will be required before a flush GCS + * happens. + * + *

If there are not enough bytes to trigger a flush, they will be held in memory until there + * are enough bytes, or an explicit flush is performed by closing the channel. + * + *

Instances of this class are immutable and thread safe. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Immutable + @BetaApi + public static final class MinFlushSizeFlushPolicy extends FlushPolicy { + private static final MinFlushSizeFlushPolicy INSTANCE = + new MinFlushSizeFlushPolicy(_4MiB, _16MiB); + + private final int minFlushSize; + private final long maxPendingBytes; + + private MinFlushSizeFlushPolicy(int minFlushSize, long maxPendingBytes) { + this.minFlushSize = minFlushSize; + this.maxPendingBytes = maxPendingBytes; + } + + /** + * The minimum number of bytes to include in each automatic flush + * + *

Default: {@code 4194304 (4 MiB)} + * + * @see #withMinFlushSize(int) + */ + @BetaApi + public int getMinFlushSize() { + return minFlushSize; + } + + /** + * Return an instance with the {@code minFlushSize} set to the specified value. + * + *

Default: {@code 4194304 (4 MiB)} + * + * @param minFlushSize The number of bytes to buffer before flushing. + * @return The new instance + * @see #getMinFlushSize() + */ + @BetaApi + public MinFlushSizeFlushPolicy withMinFlushSize(int minFlushSize) { + Preconditions.checkArgument(minFlushSize >= 0, "minFlushSize >= 0 (%s >= 0)", minFlushSize); + if (this.minFlushSize == minFlushSize) { + return this; + } + return new MinFlushSizeFlushPolicy(minFlushSize, maxPendingBytes); + } + + @BetaApi + public long getMaxPendingBytes() { + return maxPendingBytes; + } + + @BetaApi + public MinFlushSizeFlushPolicy withMaxPendingBytes(long maxPendingBytes) { + Preconditions.checkArgument( + maxPendingBytes >= 0, "maxPendingBytes >= 0 (%s >= 0)", maxPendingBytes); + Preconditions.checkArgument( + maxPendingBytes >= minFlushSize, + "maxPendingBytes >= minFlushSize (%s >= %s", + maxPendingBytes, + minFlushSize); + if (this.maxPendingBytes == maxPendingBytes) { + return this; + } + return new MinFlushSizeFlushPolicy(minFlushSize, maxPendingBytes); + } + + @Override + BufferedWritableByteChannel createBufferedChannel( + UnbufferedWritableByteChannel unbuffered, boolean blocking) { + return new MinFlushBufferedWritableByteChannel( + BufferHandle.allocate(minFlushSize), unbuffered, blocking); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MinFlushSizeFlushPolicy)) { + return false; + } + MinFlushSizeFlushPolicy that = (MinFlushSizeFlushPolicy) o; + return minFlushSize == that.minFlushSize && maxPendingBytes == that.maxPendingBytes; + } + + @Override + public int hashCode() { + return Objects.hash(minFlushSize, maxPendingBytes); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("minFlushSize", minFlushSize) + .add("maxPendingBytes", maxPendingBytes) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java new file mode 100644 index 000000000000..a5b4904a0f28 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java @@ -0,0 +1,477 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.GrpcUtils.contextWithBucketName; +import static com.google.cloud.storage.Utils.nullSafeList; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectChecksums; +import io.grpc.Status; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Semaphore; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class GapicBidiUnbufferedWritableByteChannel implements UnbufferedWritableByteChannel { + private final BidiStreamingCallable write; + private final RetrierWithAlg retrier; + private final SettableApiFuture resultFuture; + private final ChunkSegmenter chunkSegmenter; + + private final BidiWriteCtx writeCtx; + private final GrpcCallContext context; + private final BidiObserver responseObserver; + + private volatile ApiStreamObserver stream; + private boolean open = true; + private boolean first = true; + private boolean finished = false; + private volatile BidiWriteObjectRequest lastWrittenRequest; + private volatile RewindableContent currentContent; + + GapicBidiUnbufferedWritableByteChannel( + BidiStreamingCallable write, + RetrierWithAlg retrier, + SettableApiFuture resultFuture, + ChunkSegmenter chunkSegmenter, + BidiWriteCtx writeCtx, + Supplier baseContextSupplier) { + this.write = write; + this.retrier = retrier; + this.resultFuture = resultFuture; + this.chunkSegmenter = chunkSegmenter; + + this.writeCtx = writeCtx; + this.responseObserver = new BidiObserver(); + String bucketName = writeCtx.getRequestFactory().bucketName(); + this.context = contextWithBucketName(bucketName, baseContextSupplier.get()); + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + return internalWrite(srcs, srcsOffset, srcsLength, false); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long written = internalWrite(srcs, offset, length, true); + close(); + return written; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + try { + if (!finished) { + BidiWriteObjectRequest message = finishMessage(); + lastWrittenRequest = message; + flush(Collections.singletonList(message)); + } else { + if (stream != null) { + stream.onCompleted(); + responseObserver.await(); + } + } + } finally { + open = false; + stream = null; + lastWrittenRequest = null; + } + } + + @VisibleForTesting + BidiWriteCtx getWriteCtx() { + return writeCtx; + } + + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength, boolean finalize) + throws ClosedChannelException { + if (!open) { + throw new ClosedChannelException(); + } + + long begin = writeCtx.getConfirmedBytes().get(); + currentContent = RewindableContent.of(srcs, srcsOffset, srcsLength); + ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength, finalize); + if (data.length == 0) { + currentContent = null; + return 0; + } + + List messages = new ArrayList<>(); + + for (int i = 0; i < data.length; i++) { + ChunkSegment datum = data[i]; + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ByteString b = datum.getB(); + int contentSize = b.size(); + long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); + Crc32cLengthKnown cumulative = + writeCtx + .getCumulativeCrc32c() + .accumulateAndGet(crc32c, chunkSegmenter.getHasher()::nullSafeConcat); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + BidiWriteObjectRequest.Builder builder = writeCtx.newRequestBuilder(); + if (!first) { + builder.clearUploadId(); + builder.clearObjectChecksums(); + } else { + first = false; + } + builder.setWriteOffset(offset).setChecksummedData(checksummedData.build()); + if (!datum.isOnlyFullBlocks()) { + builder.setFinishWrite(true); + if (cumulative != null) { + builder.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulative.getValue()).build()); + } + finished = true; + } + + if (i == data.length - 1 && !finished) { + if (finalize) { + builder.setFinishWrite(true); + finished = true; + } else { + builder.setFlush(true).setStateLookup(true); + } + } + + BidiWriteObjectRequest build = builder.build(); + messages.add(build); + } + if (finalize && !finished) { + messages.add(finishMessage()); + finished = true; + } + + try { + flush(messages); + } catch (RuntimeException e) { + open = false; + resultFuture.setException(e); + throw e; + } + + long end = writeCtx.getConfirmedBytes().get(); + + long bytesConsumed = end - begin; + return bytesConsumed; + } + + @NonNull + private BidiWriteObjectRequest finishMessage() { + long offset = writeCtx.getTotalSentBytes().get(); + Crc32cLengthKnown crc32cValue = writeCtx.getCumulativeCrc32c().get(); + + BidiWriteObjectRequest.Builder b = writeCtx.newRequestBuilder(); + if (!first) { + b.clearUploadId().clearObjectChecksums(); + } + b.setFinishWrite(true).setWriteOffset(offset); + if (crc32cValue != null) { + b.setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(crc32cValue.getValue()).build()); + } + BidiWriteObjectRequest message = b.build(); + return message; + } + + private ApiStreamObserver openedStream() { + if (stream == null) { + synchronized (this) { + if (stream == null) { + responseObserver.reset(); + stream = + new GracefulOutboundStream(this.write.bidiStreamingCall(responseObserver, context)); + } + } + } + return stream; + } + + private void flush(@NonNull List segments) { + retrier.run( + () -> { + try { + ApiStreamObserver opened = openedStream(); + for (BidiWriteObjectRequest message : segments) { + opened.onNext(message); + lastWrittenRequest = message; + } + if (lastWrittenRequest.getFinishWrite()) { + opened.onCompleted(); + } + responseObserver.await(); + return null; + } catch (Throwable t) { + stream = null; + first = true; + t.addSuppressed(new AsyncStorageTaskException()); + throw t; + } + }, + Decoder.identity()); + } + + private class BidiObserver implements ApiStreamObserver { + + private final Semaphore sem; + private volatile BidiWriteObjectResponse last; + private volatile StorageException clientDetectedError; + private volatile RuntimeException previousError; + + private BidiObserver() { + this.sem = new Semaphore(0); + } + + @Override + public void onNext(BidiWriteObjectResponse value) { + boolean finalizing = lastWrittenRequest.getFinishWrite(); + if (!finalizing && value.hasPersistedSize()) { // incremental + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long persistedSize = value.getPersistedSize(); + + if (totalSentBytes == persistedSize) { + writeCtx.getConfirmedBytes().set(persistedSize); + ok(value); + } else if (persistedSize < totalSentBytes) { + long delta = totalSentBytes - persistedSize; + // rewind our content and any state that my have run ahead of the actual ack'd bytes + currentContent.rewindTo(delta); + writeCtx.getTotalSentBytes().set(persistedSize); + writeCtx.getConfirmedBytes().set(persistedSize); + ok(value); + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_7.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } + } else if (finalizing && value.hasResource()) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long finalSize = value.getResource().getSize(); + if (totalSentBytes == finalSize) { + writeCtx.getConfirmedBytes().set(finalSize); + ok(value); + } else if (finalSize < totalSentBytes) { + clientDetectedError( + UploadFailureScenario.SCENARIO_4_1.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_4_2.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } + } else if (!finalizing && value.hasResource()) { + clientDetectedError( + UploadFailureScenario.SCENARIO_1.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } else if (finalizing && value.hasPersistedSize()) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long persistedSize = value.getPersistedSize(); + // if a flush: true, state_lookup: true message is in the stream along with a + // finish_write: true, GCS can respond with the incremental update, gracefully handle this + // message + if (totalSentBytes == persistedSize) { + writeCtx.getConfirmedBytes().set(persistedSize); + } else if (persistedSize < totalSentBytes) { + clientDetectedError( + UploadFailureScenario.SCENARIO_3.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_2.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_0.toStorageException( + nullSafeList(lastWrittenRequest), value, context, null)); + } + } + + @Override + public void onError(Throwable t) { + if (t instanceof OutOfRangeException) { + OutOfRangeException oore = (OutOfRangeException) t; + ErrorDetails ed = oore.getErrorDetails(); + if (!(ed != null + && ed.getErrorInfo() != null + && ed.getErrorInfo().getReason().equals("GRPC_MISMATCHED_UPLOAD_SIZE"))) { + clientDetectedError( + UploadFailureScenario.SCENARIO_5.toStorageException( + nullSafeList(lastWrittenRequest), null, context, oore)); + return; + } + } + if (t instanceof ApiException) { + // use StorageExceptions logic to translate from ApiException to our status codes ensuring + // things fall in line with our retry handlers. + // This is suboptimal, as it will initialize a second exception, however this is the + // unusual case, and it should not cause a significant overhead given its rarity. + StorageException tmp = StorageException.asStorageException((ApiException) t); + previousError = + UploadFailureScenario.toStorageException( + tmp.getCode(), + tmp.getMessage(), + tmp.getReason(), + nullSafeList(lastWrittenRequest), + null, + context, + t); + sem.release(); + } else if (t instanceof RuntimeException) { + previousError = (RuntimeException) t; + sem.release(); + } + } + + @Override + public void onCompleted() { + if (last != null && last.hasResource()) { + resultFuture.set(last); + } + sem.release(); + } + + private void ok(BidiWriteObjectResponse value) { + last = value; + sem.release(); + } + + private void clientDetectedError(StorageException storageException) { + open = false; + clientDetectedError = storageException; + // yes, check that previousError is not the same instance as e + if (previousError != null && previousError != storageException) { + storageException.addSuppressed(previousError); + previousError = null; + } + if (previousError == null) { + previousError = storageException; + } + sem.release(); + } + + void await() { + try { + sem.acquire(); + } catch (InterruptedException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException(e); + } + } + StorageException e = clientDetectedError; + RuntimeException err = previousError; + clientDetectedError = null; + previousError = null; + if ((e != null || err != null) && stream != null) { + if (lastWrittenRequest != null && lastWrittenRequest.getFinishWrite()) { + stream.onCompleted(); + } else { + stream.onError(Status.CANCELLED.asRuntimeException()); + } + } + if (e != null) { + throw e; + } + if (err != null) { + throw err; + } + } + + public void reset() { + sem.drainPermits(); + last = null; + clientDetectedError = null; + previousError = null; + } + } + + /** + * Prevent "already half-closed" if we previously called onComplete but then detect an error and + * call onError + */ + private static final class GracefulOutboundStream + implements ApiStreamObserver { + + private final ApiStreamObserver delegate; + private volatile boolean closing; + + private GracefulOutboundStream(ApiStreamObserver delegate) { + this.delegate = delegate; + this.closing = false; + } + + @Override + public void onNext(BidiWriteObjectRequest value) { + delegate.onNext(value); + } + + @Override + public void onError(Throwable t) { + if (closing) { + return; + } + closing = true; + delegate.onError(t); + } + + @Override + public void onCompleted() { + if (closing) { + return; + } + closing = true; + delegate.onCompleted(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java new file mode 100644 index 000000000000..84629f0efc7e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java @@ -0,0 +1,161 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.cloud.storage.ChannelSession.BufferedWriteSession; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ServiceConstants.Values; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; + +final class GapicBidiWritableByteChannelSessionBuilder { + + private final BidiStreamingCallable write; + private Hasher hasher; + private ByteStringStrategy byteStringStrategy; + + GapicBidiWritableByteChannelSessionBuilder( + BidiStreamingCallable write) { + this.write = write; + this.hasher = Hasher.noop(); + this.byteStringStrategy = ByteStringStrategy.copy(); + } + + /** + * Set the {@link Hasher} to apply to the bytes passing through the built session's channel. + * + *

Default: {@link Hasher#noop()} + * + * @see Hasher#enabled() + * @see Hasher#noop() + */ + GapicBidiWritableByteChannelSessionBuilder setHasher(Hasher hasher) { + this.hasher = requireNonNull(hasher, "hasher must be non null"); + return this; + } + + /** + * Set the {@link ByteStringStrategy} to be used when constructing {@link + * com.google.protobuf.ByteString ByteString}s from {@link ByteBuffer}s. + * + *

Default: {@link ByteStringStrategy#copy()} + * + *

Note: usage of {@link ByteStringStrategy#noCopy()} requires that any {@link ByteBuffer} + * passed to the session's channel not be modified while {@link + * java.nio.channels.WritableByteChannel#write(ByteBuffer)} is processing. + * + * @see ByteStringStrategy#copy() + * @see ByteStringStrategy#noCopy() + */ + GapicBidiWritableByteChannelSessionBuilder setByteStringStrategy( + ByteStringStrategy byteStringStrategy) { + this.byteStringStrategy = + requireNonNull(byteStringStrategy, "byteStringStrategy must be non null"); + return this; + } + + GapicBidiWritableByteChannelSessionBuilder.ResumableUploadBuilder resumable() { + return new GapicBidiWritableByteChannelSessionBuilder.ResumableUploadBuilder(); + } + + final class ResumableUploadBuilder { + + private RetrierWithAlg retrier; + + ResumableUploadBuilder() { + this.retrier = RetrierWithAlg.attemptOnce(); + } + + ResumableUploadBuilder withRetryConfig(RetrierWithAlg retrier) { + this.retrier = requireNonNull(retrier, "retrier must be non null"); + return this; + } + + /** + * Buffer using {@code byteBuffer} worth of space before attempting to flush. + * + *

The provided {@link ByteBuffer} should be aligned with GCSs block size of 256 + * KiB. + */ + BufferedResumableUploadBuilder buffered(ByteBuffer byteBuffer) { + return buffered(BufferHandle.handleOf(byteBuffer)); + } + + BufferedResumableUploadBuilder buffered(BufferHandle bufferHandle) { + return new BufferedResumableUploadBuilder(bufferHandle); + } + + final class BufferedResumableUploadBuilder { + + private final BufferHandle bufferHandle; + + private ApiFuture start; + + BufferedResumableUploadBuilder(BufferHandle bufferHandle) { + this.bufferHandle = bufferHandle; + } + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the + * Write stream. + */ + BufferedResumableUploadBuilder setStartAsync(ApiFuture start) { + this.start = requireNonNull(start, "start must be non null"); + return this; + } + + BufferedWritableByteChannelSession build() { + // it is theoretically possible that the setter methods for the following variables could + // be called again between when this method is invoked and the resulting function is + // invoked. + // To ensure we are using the specified values at the point in time they are bound to the + // function read them into local variables which will be closed over rather than the class + // fields. + ByteStringStrategy boundStrategy = byteStringStrategy; + Hasher boundHasher = hasher; + RetrierWithAlg boundRetrier = retrier; + return new BufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + ((BiFunction< + BidiResumableWrite, + SettableApiFuture, + UnbufferedWritableByteChannel>) + (start, resultFuture) -> + new GapicBidiUnbufferedWritableByteChannel( + write, + boundRetrier, + resultFuture, + new ChunkSegmenter( + boundHasher, boundStrategy, Values.MAX_WRITE_CHUNK_BYTES_VALUE), + new BidiWriteCtx<>(start), + Retrying::newCallContext)) + .andThen(c -> new DefaultBufferedWritableByteChannel(bufferHandle, c)) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicCopyWriter.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicCopyWriter.java new file mode 100644 index 000000000000..dff801871c51 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicCopyWriter.java @@ -0,0 +1,90 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.RestorableState; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.RewriteResponse; + +final class GapicCopyWriter extends CopyWriter { + + // needed for #getResult + private final transient GrpcStorageImpl storage; + private final UnaryCallable callable; + private final RetrierWithAlg retrier; + private final RewriteObjectRequest originalRequest; + private final RewriteResponse initialResponse; + + private RewriteResponse mostRecentResponse; + + GapicCopyWriter( + GrpcStorageImpl storage, + UnaryCallable callable, + RetrierWithAlg retrier, + RewriteObjectRequest originalRequest, + RewriteResponse initialResponse) { + this.storage = storage; + this.callable = callable; + this.retrier = retrier; + this.initialResponse = initialResponse; + this.mostRecentResponse = initialResponse; + this.originalRequest = originalRequest; + } + + @Override + public Blob getResult() { + while (!isDone()) { + copyChunk(); + } + BlobInfo info = Conversions.grpc().blobInfo().decode(mostRecentResponse.getResource()); + return info.asBlob(storage); + } + + @Override + public long getBlobSize() { + return initialResponse.getObjectSize(); + } + + @Override + public boolean isDone() { + return mostRecentResponse.getDone(); + } + + @Override + public long getTotalBytesCopied() { + return mostRecentResponse.getTotalBytesRewritten(); + } + + @Override + public void copyChunk() { + if (!isDone()) { + RewriteObjectRequest req = + originalRequest.toBuilder().setRewriteToken(mostRecentResponse.getRewriteToken()).build(); + GrpcCallContext retryContext = Retrying.newCallContext(); + mostRecentResponse = retrier.run(() -> callable.call(req, retryContext), Decoder.identity()); + } + } + + @Override + public RestorableState capture() { + return CrossTransportUtils.throwHttpJsonOnly(CopyWriter.class, "capture"); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicDownloadSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicDownloadSessionBuilder.java new file mode 100644 index 000000000000..3bbba1d703ea --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicDownloadSessionBuilder.java @@ -0,0 +1,173 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; +import javax.annotation.concurrent.Immutable; + +@Immutable +final class GapicDownloadSessionBuilder { + private static final GapicDownloadSessionBuilder INSTANCE = new GapicDownloadSessionBuilder(); + + private static final int DEFAULT_BUFFER_CAPACITY = ByteSizeConstants._16MiB; + + private GapicDownloadSessionBuilder() {} + + public static GapicDownloadSessionBuilder create() { + return INSTANCE; + } + + public ReadableByteChannelSessionBuilder byteChannel( + ZeroCopyServerStreamingCallable read, + Retrier retrier, + ResultRetryAlgorithm resultRetryAlgorithm) { + return new ReadableByteChannelSessionBuilder(read, retrier, resultRetryAlgorithm); + } + + public static final class ReadableByteChannelSessionBuilder { + + private final ZeroCopyServerStreamingCallable read; + private final Retrier retrier; + private final ResultRetryAlgorithm resultRetryAlgorithm; + private boolean autoGzipDecompression; + private Hasher hasher; + + private ReadableByteChannelSessionBuilder( + ZeroCopyServerStreamingCallable read, + Retrier retrier, + ResultRetryAlgorithm resultRetryAlgorithm) { + this.read = read; + this.retrier = retrier; + this.resultRetryAlgorithm = resultRetryAlgorithm; + this.hasher = Hasher.defaultHasher(); + this.autoGzipDecompression = false; + } + + public BufferedReadableByteChannelSessionBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + public ReadableByteChannelSessionBuilder setHasher(Hasher hasher) { + this.hasher = hasher; + return this; + } + + public ReadableByteChannelSessionBuilder setAutoGzipDecompression( + boolean autoGzipDecompression) { + this.autoGzipDecompression = autoGzipDecompression; + return this; + } + + public BufferedReadableByteChannelSessionBuilder buffered(BufferHandle bufferHandle) { + return new BufferedReadableByteChannelSessionBuilder(bufferHandle, bindFunction()); + } + + public BufferedReadableByteChannelSessionBuilder buffered(ByteBuffer buffer) { + return buffered(BufferHandle.handleOf(buffer)); + } + + public UnbufferedReadableByteChannelSessionBuilder unbuffered() { + return new UnbufferedReadableByteChannelSessionBuilder(bindFunction()); + } + + private BiFunction, UnbufferedReadableByteChannel> + bindFunction() { + // for any non-final value, create a reference to the value at this point in time + Hasher hasher = this.hasher; + boolean autoGzipDecompression = this.autoGzipDecompression; + return (object, resultFuture) -> { + if (autoGzipDecompression) { + return new GzipReadableByteChannel( + new GapicUnbufferedReadableByteChannel( + resultFuture, read, object, hasher, retrier, resultRetryAlgorithm), + ApiFutures.transform( + resultFuture, Object::getContentEncoding, MoreExecutors.directExecutor())); + } else { + return new GapicUnbufferedReadableByteChannel( + resultFuture, read, object, hasher, retrier, resultRetryAlgorithm); + } + }; + } + + public static final class BufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ReadObjectRequest, SettableApiFuture, BufferedReadableByteChannel> + f; + private ReadObjectRequest request; + + private BufferedReadableByteChannelSessionBuilder( + BufferHandle buffer, + BiFunction, UnbufferedReadableByteChannel> + f) { + this.f = f.andThen(c -> new DefaultBufferedReadableByteChannel(buffer, c)); + } + + public BufferedReadableByteChannelSessionBuilder setReadObjectRequest( + ReadObjectRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public BufferedReadableByteChannelSession build() { + return new ChannelSession.BufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + + public static final class UnbufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ReadObjectRequest, SettableApiFuture, UnbufferedReadableByteChannel> + f; + private ReadObjectRequest request; + + private UnbufferedReadableByteChannelSessionBuilder( + BiFunction, UnbufferedReadableByteChannel> + f) { + this.f = f; + } + + public UnbufferedReadableByteChannelSessionBuilder setReadObjectRequest( + ReadObjectRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public UnbufferedReadableByteChannelSession build() { + return new ChannelSession.UnbufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java new file mode 100644 index 000000000000..b24851390f16 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java @@ -0,0 +1,361 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.GrpcUtils.contextWithBucketName; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GapicUnbufferedChunkedResumableWritableByteChannel + implements UnbufferedWritableByteChannel { + + private final SettableApiFuture resultFuture; + private final ChunkSegmenter chunkSegmenter; + private final ClientStreamingCallable write; + + private final String bucketName; + private final WriteCtx writeCtx; + private final RetrierWithAlg retrier; + private final Supplier baseContextSupplier; + + private volatile boolean open = true; + private boolean finished = false; + + GapicUnbufferedChunkedResumableWritableByteChannel( + SettableApiFuture resultFuture, + @NonNull ChunkSegmenter chunkSegmenter, + ClientStreamingCallable write, + WriteCtx writeCtx, + RetrierWithAlg retrier, + Supplier baseContextSupplier) { + this.resultFuture = resultFuture; + this.chunkSegmenter = chunkSegmenter; + this.write = write; + this.bucketName = writeCtx.getRequestFactory().bucketName(); + this.writeCtx = writeCtx; + this.retrier = retrier; + this.baseContextSupplier = baseContextSupplier; + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + return internalWrite(srcs, srcsOffset, srcsLength, false); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + long write = internalWrite(srcs, srcsOffset, srcsLength, true); + close(); + return write; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (open && !finished) { + WriteObjectRequest message = finishMessage(true); + try { + flush(ImmutableList.of(message), null, true); + finished = true; + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + } + open = false; + } + + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength, boolean finalize) + throws ClosedChannelException { + if (!open) { + throw new ClosedChannelException(); + } + + long begin = writeCtx.getConfirmedBytes().get(); + RewindableContent content = RewindableContent.of(srcs, srcsOffset, srcsLength); + ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength, finalize); + if (data.length == 0) { + return 0; + } + // we consumed some bytes from srcs, flag our content as dirty since we aren't writing + // those bytes to implicitly flag as dirty. + content.flagDirty(); + + List messages = new ArrayList<>(); + + boolean first = true; + for (ChunkSegment datum : data) { + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ByteString b = datum.getB(); + int contentSize = b.size(); + long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); + Crc32cLengthKnown cumulative = + writeCtx + .getCumulativeCrc32c() + .accumulateAndGet(crc32c, chunkSegmenter.getHasher()::nullSafeConcat); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + WriteObjectRequest.Builder builder = + writeCtx + .newRequestBuilder() + .clearWriteObjectSpec() + .clearObjectChecksums() + .setWriteOffset(offset) + .setChecksummedData(checksummedData.build()); + if (!first) { + builder.clearUploadId(); + } + if (!datum.isOnlyFullBlocks()) { + builder.setFinishWrite(true); + if (cumulative != null) { + builder.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulative.getValue()).build()); + } + finished = true; + } + + WriteObjectRequest build = builder.build(); + first = false; + messages.add(build); + } + if (finalize && !finished) { + messages.add(finishMessage(first)); + finished = true; + } + + try { + flush(messages, content, finalize); + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + + long end = writeCtx.getConfirmedBytes().get(); + + long bytesConsumed = end - begin; + return bytesConsumed; + } + + @NonNull + private WriteObjectRequest finishMessage(boolean first) { + long offset = writeCtx.getTotalSentBytes().get(); + Crc32cLengthKnown crc32cValue = writeCtx.getCumulativeCrc32c().get(); + + WriteObjectRequest.Builder b = + writeCtx.newRequestBuilder().setFinishWrite(true).setWriteOffset(offset); + if (!first) { + b.clearUploadId(); + } + if (crc32cValue != null) { + b.setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(crc32cValue.getValue()).build()); + } + WriteObjectRequest message = b.build(); + return message; + } + + private void flush( + @NonNull List segments, + @Nullable RewindableContent content, + boolean finalizing) { + GrpcCallContext internalContext = contextWithBucketName(bucketName, baseContextSupplier.get()); + ClientStreamingCallable callable = + write.withDefaultCallContext(internalContext); + + retrier.run( + () -> { + Observer observer = new Observer(content, finalizing, segments, internalContext); + ApiStreamObserver write = callable.clientStreamingCall(observer); + + for (WriteObjectRequest message : segments) { + write.onNext(message); + } + write.onCompleted(); + try { + observer.await(); + } catch (Throwable t) { + t.addSuppressed(new AsyncStorageTaskException()); + throw t; + } + return null; + }, + Decoder.identity()); + } + + @VisibleForTesting + WriteCtx getWriteCtx() { + return writeCtx; + } + + class Observer implements ApiStreamObserver { + + private final RewindableContent content; + private final boolean finalizing; + private final List segments; + private final GrpcCallContext context; + + private final SettableApiFuture invocationHandle; + private volatile WriteObjectResponse last; + + Observer( + @Nullable RewindableContent content, + boolean finalizing, + @NonNull List segments, + GrpcCallContext context) { + this.content = content; + this.finalizing = finalizing; + this.segments = segments; + this.context = context; + this.invocationHandle = SettableApiFuture.create(); + } + + @Override + public void onNext(WriteObjectResponse value) { + last = value; + } + + @Override + public void onError(Throwable t) { + if (t instanceof OutOfRangeException) { + OutOfRangeException oore = (OutOfRangeException) t; + open = false; + ErrorDetails ed = oore.getErrorDetails(); + if (!(ed != null + && ed.getErrorInfo() != null + && ed.getErrorInfo().getReason().equals("GRPC_MISMATCHED_UPLOAD_SIZE"))) { + StorageException storageException = + UploadFailureScenario.SCENARIO_5.toStorageException(segments, null, context, oore); + invocationHandle.setException(storageException); + return; + } + } + if (t instanceof ApiException) { + // use StorageExceptions logic to translate from ApiException to our status codes ensuring + // things fall in line with our retry handlers. + // This is suboptimal, as it will initialize a second exception, however this is the + // unusual case, and it should not cause a significant overhead given its rarity. + StorageException tmp = StorageException.asStorageException((ApiException) t); + StorageException storageException = + UploadFailureScenario.toStorageException( + tmp.getCode(), tmp.getMessage(), tmp.getReason(), segments, null, context, t); + invocationHandle.setException(storageException); + } + } + + @Override + public void onCompleted() { + try { + if (last == null) { + throw new StorageException( + 0, "onComplete without preceding onNext, unable to determine success."); + } else if (!finalizing && last.hasPersistedSize()) { // incremental + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long persistedSize = last.getPersistedSize(); + + if (totalSentBytes == persistedSize) { + writeCtx.getConfirmedBytes().set(persistedSize); + } else if (persistedSize < totalSentBytes) { + long delta = totalSentBytes - persistedSize; + // rewind our content and any state that my have run ahead of the actual ack'd bytes + content.rewindTo(delta); + writeCtx.getTotalSentBytes().set(persistedSize); + writeCtx.getConfirmedBytes().set(persistedSize); + } else { + throw UploadFailureScenario.SCENARIO_7.toStorageException( + segments, last, context, null); + } + } else if (finalizing && last.hasResource()) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long finalSize = last.getResource().getSize(); + if (totalSentBytes == finalSize) { + writeCtx.getConfirmedBytes().set(finalSize); + resultFuture.set(last); + } else if (finalSize < totalSentBytes) { + throw UploadFailureScenario.SCENARIO_4_1.toStorageException( + segments, last, context, null); + } else { + throw UploadFailureScenario.SCENARIO_4_2.toStorageException( + segments, last, context, null); + } + } else if (!finalizing && last.hasResource()) { + throw UploadFailureScenario.SCENARIO_1.toStorageException(segments, last, context, null); + } else if (finalizing && last.hasPersistedSize()) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long persistedSize = last.getPersistedSize(); + if (persistedSize < totalSentBytes) { + throw UploadFailureScenario.SCENARIO_3.toStorageException( + segments, last, context, null); + } else { + throw UploadFailureScenario.SCENARIO_2.toStorageException( + segments, last, context, null); + } + } else { + throw UploadFailureScenario.SCENARIO_0.toStorageException(segments, last, context, null); + } + } catch (Throwable se) { + open = false; + invocationHandle.setException(se); + } finally { + invocationHandle.set(null); + } + } + + void await() { + try { + invocationHandle.get(); + } catch (InterruptedException | ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException(e); + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java new file mode 100644 index 000000000000..95f6472acae7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java @@ -0,0 +1,287 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.GrpcUtils.contextWithBucketName; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.WriteCtx.SimpleWriteObjectRequestBuilderFactory; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.ExecutionException; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class GapicUnbufferedDirectWritableByteChannel implements UnbufferedWritableByteChannel { + + private final SettableApiFuture resultFuture; + private final ChunkSegmenter chunkSegmenter; + private final ClientStreamingCallable write; + + private final WriteCtx writeCtx; + + private final Observer responseObserver; + private volatile ApiStreamObserver stream; + + private boolean open = true; + private boolean first = true; + private boolean finished = false; + private volatile WriteObjectRequest lastWrittenRequest; + + GapicUnbufferedDirectWritableByteChannel( + SettableApiFuture resultFuture, + ChunkSegmenter chunkSegmenter, + ClientStreamingCallable write, + WriteCtx writeCtx) { + String bucketName = writeCtx.getRequestFactory().bucketName(); + this.resultFuture = resultFuture; + this.chunkSegmenter = chunkSegmenter; + + GrpcCallContext internalContext = + contextWithBucketName(bucketName, GrpcCallContext.createDefault()); + this.write = write.withDefaultCallContext(internalContext); + + this.writeCtx = writeCtx; + this.responseObserver = new Observer(internalContext); + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + + ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength); + if (data.length == 0) { + return 0; + } + + try { + ApiStreamObserver openedStream = openedStream(); + int bytesConsumed = 0; + for (ChunkSegment datum : data) { + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ByteString b = datum.getB(); + int contentSize = b.size(); + long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); + Crc32cLengthKnown cumulative = + writeCtx + .getCumulativeCrc32c() + .accumulateAndGet(crc32c, chunkSegmenter.getHasher()::nullSafeConcat); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + WriteObjectRequest.Builder builder = writeCtx.newRequestBuilder(); + if (!first) { + builder.clearWriteObjectSpec(); + builder.clearObjectChecksums(); + } + builder.setWriteOffset(offset).setChecksummedData(checksummedData.build()); + if (!datum.isOnlyFullBlocks()) { + builder.setFinishWrite(true); + if (cumulative != null) { + builder.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulative.getValue()).build()); + } + finished = true; + } + + WriteObjectRequest build = builder.build(); + first = false; + bytesConsumed += contentSize; + lastWrittenRequest = build; + openedStream.onNext(build); + } + return bytesConsumed; + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + ApiStreamObserver openedStream = openedStream(); + if (!finished) { + WriteObjectRequest message = finishMessage(); + lastWrittenRequest = message; + try { + openedStream.onNext(message); + openedStream.onCompleted(); + finished = true; + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + } else { + try { + openedStream.onCompleted(); + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + } + open = false; + responseObserver.await(); + } + + @NonNull + private WriteObjectRequest finishMessage() { + long offset = writeCtx.getTotalSentBytes().get(); + Crc32cLengthKnown crc32cValue = writeCtx.getCumulativeCrc32c().get(); + + WriteObjectRequest.Builder b = writeCtx.newRequestBuilder(); + if (!first) { + b.clearWriteObjectSpec(); + b.clearObjectChecksums(); + first = false; + } + b.setFinishWrite(true).setWriteOffset(offset); + if (crc32cValue != null) { + b.setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(crc32cValue.getValue()).build()); + } + return b.build(); + } + + private ApiStreamObserver openedStream() { + if (stream == null) { + synchronized (this) { + if (stream == null) { + stream = write.clientStreamingCall(responseObserver); + } + } + } + return stream; + } + + class Observer implements ApiStreamObserver { + + private final GrpcCallContext context; + + private final SettableApiFuture invocationHandle; + private volatile WriteObjectResponse last; + + Observer(GrpcCallContext context) { + this.context = context; + this.invocationHandle = SettableApiFuture.create(); + } + + @Override + public void onNext(WriteObjectResponse value) { + last = value; + } + + /** + * observed exceptions so far + * + *
    + *
  1. {@link com.google.api.gax.rpc.OutOfRangeException} + *
  2. {@link com.google.api.gax.rpc.AlreadyExistsException} + *
  3. {@link io.grpc.StatusRuntimeException} + *
+ */ + @Override + public void onError(Throwable t) { + if (t instanceof ApiException) { + // use StorageExceptions logic to translate from ApiException to our status codes ensuring + // things fall in line with our retry handlers. + // This is suboptimal, as it will initialize a second exception, however this is the + // unusual case, and it should not cause a significant overhead given its rarity. + StorageException tmp = StorageException.asStorageException((ApiException) t); + StorageException storageException = + UploadFailureScenario.toStorageException( + tmp.getCode(), tmp.getMessage(), tmp.getReason(), getRequests(), null, context, t); + invocationHandle.setException(storageException); + } else { + invocationHandle.setException(t); + } + } + + @Override + public void onCompleted() { + try { + if (last == null) { + throw new StorageException( + 0, "onComplete without preceding onNext, unable to determine success."); + } else if (last.hasResource()) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long finalSize = last.getResource().getSize(); + if (totalSentBytes == finalSize) { + writeCtx.getConfirmedBytes().set(finalSize); + resultFuture.set(last); + } else if (finalSize < totalSentBytes) { + throw UploadFailureScenario.SCENARIO_4_1.toStorageException( + getRequests(), last, context, null); + } else { + throw UploadFailureScenario.SCENARIO_4_2.toStorageException( + getRequests(), last, context, null); + } + } else { + throw UploadFailureScenario.SCENARIO_0.toStorageException( + getRequests(), last, context, null); + } + } catch (Throwable se) { + open = false; + invocationHandle.setException(se); + } finally { + invocationHandle.set(null); + } + } + + private @NonNull ImmutableList<@NonNull WriteObjectRequest> getRequests() { + if (lastWrittenRequest == null) { + return ImmutableList.of(); + } else { + return ImmutableList.of(lastWrittenRequest); + } + } + + void await() { + try { + invocationHandle.get(); + } catch (InterruptedException | ExecutionException e) { + RuntimeException runtimeException; + if (e.getCause() instanceof RuntimeException) { + runtimeException = (RuntimeException) e.getCause(); + } else { + runtimeException = new RuntimeException(e); + } + runtimeException.addSuppressed(new AsyncStorageTaskException()); + throw runtimeException; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java new file mode 100644 index 000000000000..32248227cd3e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java @@ -0,0 +1,313 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.GrpcUtils.contextWithBucketName; +import static com.google.cloud.storage.Utils.nullSafeList; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.ExecutionException; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel + implements UnbufferedWritableByteChannel { + + private final SettableApiFuture resultFuture; + private final ChunkSegmenter chunkSegmenter; + private final ClientStreamingCallable write; + + private final WriteCtx writeCtx; + + private final Observer responseObserver; + private volatile ApiStreamObserver stream; + + private boolean open = true; + private boolean first = true; + private boolean finished = false; + private volatile WriteObjectRequest lastWrittenRequest; + + GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + SettableApiFuture resultFuture, + ChunkSegmenter chunkSegmenter, + ClientStreamingCallable write, + WriteCtx writeCtx) { + String bucketName = writeCtx.getRequestFactory().bucketName(); + this.resultFuture = resultFuture; + this.chunkSegmenter = chunkSegmenter; + + GrpcCallContext internalContext = + contextWithBucketName(bucketName, GrpcCallContext.createDefault()); + this.write = write.withDefaultCallContext(internalContext); + + this.writeCtx = writeCtx; + this.responseObserver = new Observer(internalContext); + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + return internalWrite(srcs, srcsOffset, srcsLength, false); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + long write = internalWrite(srcs, srcsOffset, srcsLength, true); + close(); + return write; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + open = false; + ApiStreamObserver openedStream = openedStream(); + try { + if (!finished) { + WriteObjectRequest message = finishMessage(); + lastWrittenRequest = message; + openedStream.onNext(message); + finished = true; + } + openedStream.onCompleted(); + responseObserver.await(); + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + } + + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength, boolean finalize) + throws ClosedChannelException { + if (!open) { + throw new ClosedChannelException(); + } + + ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength); + if (data.length == 0) { + return 0; + } + + ApiStreamObserver openedStream = openedStream(); + int bytesConsumed = 0; + try { + for (int i = 0; i < data.length; i++) { + ChunkSegment datum = data[i]; + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ByteString b = datum.getB(); + int contentSize = b.size(); + long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); + Crc32cLengthKnown cumulative = + writeCtx + .getCumulativeCrc32c() + .accumulateAndGet(crc32c, chunkSegmenter.getHasher()::nullSafeConcat); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + WriteObjectRequest.Builder builder = writeCtx.newRequestBuilder(); + if (!first) { + builder.clearUploadId(); + builder.clearWriteObjectSpec(); + builder.clearObjectChecksums(); + } + builder.setWriteOffset(offset).setChecksummedData(checksummedData.build()); + if (!datum.isOnlyFullBlocks() || (finalize && i + 1 == data.length)) { + builder.setFinishWrite(true); + if (cumulative != null) { + builder.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulative.getValue()).build()); + } + finished = true; + } + + WriteObjectRequest build = builder.build(); + first = false; + lastWrittenRequest = build; + openedStream.onNext(build); + bytesConsumed += contentSize; + } + if (finalize && !finished) { + WriteObjectRequest finishMessage = finishMessage(); + lastWrittenRequest = finishMessage; + openedStream.onNext(finishMessage); + finished = true; + } + } catch (RuntimeException e) { + resultFuture.setException(e); + throw e; + } + + return bytesConsumed; + } + + @NonNull + private WriteObjectRequest finishMessage() { + long offset = writeCtx.getTotalSentBytes().get(); + Crc32cLengthKnown crc32cValue = writeCtx.getCumulativeCrc32c().get(); + + WriteObjectRequest.Builder b = + writeCtx.newRequestBuilder().setFinishWrite(true).setWriteOffset(offset); + if (crc32cValue != null) { + b.setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(crc32cValue.getValue()).build()); + } + WriteObjectRequest message = b.build(); + return message; + } + + private ApiStreamObserver openedStream() { + if (stream == null) { + synchronized (this) { + if (stream == null) { + stream = write.clientStreamingCall(responseObserver); + } + } + } + return stream; + } + + class Observer implements ApiStreamObserver { + + private final GrpcCallContext context; + + private final SettableApiFuture invocationHandle; + private volatile WriteObjectResponse last; + + Observer(GrpcCallContext context) { + this.context = context; + this.invocationHandle = SettableApiFuture.create(); + } + + @Override + public void onNext(WriteObjectResponse value) { + last = value; + } + + @Override + public void onError(Throwable t) { + if (t instanceof ApiException) { + // use StorageExceptions logic to translate from ApiException to our status codes ensuring + // things fall in line with our retry handlers. + // This is suboptimal, as it will initialize a second exception, however this is the + // unusual case, and it should not cause a significant overhead given its rarity. + StorageException tmp = StorageException.asStorageException((ApiException) t); + StorageException storageException = + UploadFailureScenario.toStorageException( + tmp.getCode(), + tmp.getMessage(), + tmp.getReason(), + nullSafeList(lastWrittenRequest), + null, + context, + t); + resultFuture.setException(storageException); + invocationHandle.setException(storageException); + } else { + resultFuture.setException(t); + invocationHandle.setException(t); + } + } + + @Override + public void onCompleted() { + boolean finalizing = lastWrittenRequest.getFinishWrite(); + if (last == null) { + clientDetectedError( + UploadFailureScenario.toStorageException( + 0, + "onComplete without preceding onNext, unable to determine success.", + "invalid", + nullSafeList(lastWrittenRequest), + null, + context, + null)); + } else if (last.hasResource() /* && finalizing*/) { + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + long finalSize = last.getResource().getSize(); + if (totalSentBytes == finalSize) { + ok(finalSize); + } else if (finalSize < totalSentBytes) { + clientDetectedError( + UploadFailureScenario.SCENARIO_4_1.toStorageException( + nullSafeList(lastWrittenRequest), last, context, null)); + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_4_2.toStorageException( + nullSafeList(lastWrittenRequest), last, context, null)); + } + } else if (!finalizing || last.hasPersistedSize()) { // unexpected incremental response + clientDetectedError( + UploadFailureScenario.toStorageException( + 0, + "Unexpected incremental response for finalizing request.", + "invalid", + nullSafeList(lastWrittenRequest), + last, + context, + null)); + } else { + clientDetectedError( + UploadFailureScenario.SCENARIO_0.toStorageException( + nullSafeList(lastWrittenRequest), last, context, null)); + } + } + + private void ok(long persistedSize) { + writeCtx.getConfirmedBytes().set(persistedSize); + resultFuture.set(last); + invocationHandle.set(null); + } + + private void clientDetectedError(StorageException storageException) { + open = false; + resultFuture.setException(storageException); + invocationHandle.setException(storageException); + } + + void await() { + try { + invocationHandle.get(); + } catch (InterruptedException | ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException(e); + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java new file mode 100644 index 000000000000..cef751213c6d --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java @@ -0,0 +1,485 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.http.HttpStatusCodes; +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.api.gax.rpc.StateCheckingResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.api.gax.rpc.WatchdogTimeoutException; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Hasher.UncheckedChecksumMismatchException; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.base.Suppliers; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import io.grpc.Status.Code; +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ScatteringByteChannel; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GapicUnbufferedReadableByteChannel + implements UnbufferedReadableByteChannel, ScatteringByteChannel { + private static final java.lang.Object EOF_MARKER = new java.lang.Object(); + + private final SettableApiFuture result; + private final ZeroCopyServerStreamingCallable read; + private final ReadObjectRequest req; + private final Hasher hasher; + private final Retrier retrier; + private final ResultRetryAlgorithm alg; + private final SimpleBlockingQueue queue; + + private final AtomicLong fetchOffset; + private volatile ReadObjectObserver readObjectObserver; + private volatile boolean open = true; + private volatile boolean complete = false; + + private long blobOffset; + private Object metadata; + + private ReadObjectResponseChildRef leftovers; + + GapicUnbufferedReadableByteChannel( + SettableApiFuture result, + ZeroCopyServerStreamingCallable read, + ReadObjectRequest req, + Hasher hasher, + Retrier retrier, + ResultRetryAlgorithm alg) { + this.result = result; + this.read = read; + this.req = req; + this.hasher = hasher; + this.fetchOffset = new AtomicLong(req.getReadOffset()); + this.blobOffset = req.getReadOffset(); + this.retrier = retrier; + this.alg = + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry( + Throwable previousThrowable, java.lang.Object previousResponse) { + // unfortunately we can't unit test this as this time, because WatchdogTimeoutException + // does not have a publicly accessible way of constructing it. + boolean isWatchdogTimeout = + previousThrowable instanceof StorageException + && previousThrowable.getCause() instanceof WatchdogTimeoutException; + boolean isChecksumMismatch = + previousThrowable instanceof StorageException + && previousThrowable.getCause() instanceof UncheckedChecksumMismatchException; + boolean shouldRetry = + isWatchdogTimeout || isChecksumMismatch || alg.shouldRetry(previousThrowable, null); + if (previousThrowable != null && !shouldRetry) { + result.setException(previousThrowable); + } + return shouldRetry; + } + }; + // The reasoning for 2 elements below allow for a single response and the EOF/error signal + // from onComplete or onError. Same thing com.google.api.gax.rpc.QueuingResponseObserver does. + this.queue = new SimpleBlockingQueue<>(2); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (complete && open) { + close(); + return -1; + } + if (!open) { + throw new ClosedChannelException(); + } + + long totalBufferCapacity = Buffers.totalRemaining(dsts, offset, length); + ReadCursor c = new ReadCursor(blobOffset, blobOffset + totalBufferCapacity); + while (c.hasRemaining()) { + if (leftovers != null) { + leftovers.copy(c, dsts, offset, length); + if (!leftovers.hasRemaining()) { + leftovers.close(); + leftovers = null; + } + continue; + } + + ensureStreamOpen(); + java.lang.Object take; + try { + take = queue.poll(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException(); + } + if (take instanceof IOException) { + IOException ioe = (IOException) take; + if (alg.shouldRetry(ioe, null)) { + readObjectObserver = null; + continue; + } else { + ioe.addSuppressed(new AsyncStorageTaskException()); + throw ioe; + } + } + if (take instanceof Throwable) { + Throwable throwable = (Throwable) take; + BaseServiceException coalesce = StorageException.coalesce(throwable); + if (alg.shouldRetry(coalesce, null)) { + readObjectObserver = null; + continue; + } else { + close(); + throw new IOException(coalesce); + } + } + if (take == EOF_MARKER) { + complete = true; + break; + } + + ReadObjectResponseChildRef ref = (ReadObjectResponseChildRef) take; + ref.copy(c, dsts, offset, length); + if (ref.hasRemaining()) { + leftovers = ref; + } else { + ref.close(); + } + } + long read = c.read(); + + blobOffset += read; + + return read; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + open = false; + try { + if (leftovers != null) { + leftovers.close(); + } + ReadObjectObserver obs = readObjectObserver; + if (obs != null && !obs.cancellation.isDone()) { + obs.cancel(); + drainQueue(); + try { + // make sure our waiting doesn't lockup permanently + obs.cancellation.get(1, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException ioe = new InterruptedIOException(); + ioe.initCause(e); + ioe.addSuppressed(new AsyncStorageTaskException()); + throw ioe; + } catch (ExecutionException e) { + Throwable cause = e; + if (e.getCause() != null) { + cause = e.getCause(); + } + IOException ioException = new IOException(cause); + ioException.addSuppressed(new AsyncStorageTaskException()); + throw ioException; + } catch (TimeoutException ignore) { + } + } + } finally { + drainQueue(); + } + } + + private void drainQueue() throws IOException { + boolean shouldInterupt = false; + try { + IOException ioException = null; + while (queue.nonEmpty()) { + try { + java.lang.Object queueValue = queue.poll(); + if (queueValue instanceof ReadObjectResponse) { + ReadObjectResponse resp = (ReadObjectResponse) queueValue; + ResponseContentLifecycleHandle handle = + read.getResponseContentLifecycleManager().get(resp); + handle.close(); + } else if (queueValue == EOF_MARKER || queueValue instanceof Throwable) { + break; + } + } catch (IOException e) { + if (ioException == null) { + ioException = e; + } else if (ioException != e) { + ioException.addSuppressed(e); + } + } catch (InterruptedException e) { + shouldInterupt = true; + if (ioException == null) { + ioException = new InterruptedIOException(); + } else { + ioException.addSuppressed(e); + } + } + } + if (ioException != null) { + throw ioException; + } + } finally { + if (shouldInterupt) { + Thread.currentThread().interrupt(); + } + } + } + + ApiFuture getResult() { + return result; + } + + private void ensureStreamOpen() { + if (readObjectObserver == null) { + java.lang.Object peek = queue.peek(); + if (peek instanceof Throwable || peek == EOF_MARKER) { + // If our queue has an error or EOF, do not send another request + return; + } + readObjectObserver = + retrier.run( + alg, + () -> { + ReadObjectObserver tmp = new ReadObjectObserver(); + ReadObjectRequest.Builder builder = req.toBuilder(); + long currentFetchOffset = fetchOffset.get(); + if (req.getReadOffset() != currentFetchOffset) { + builder.setReadOffset(currentFetchOffset); + } + if (metadata != null && req.getGeneration() == 0) { + builder.setGeneration(metadata.getGeneration()); + } + read.call(builder.build(), tmp); + ApiExceptions.callAndTranslateApiException(tmp.open); + return tmp; + }, + Decoder.identity()); + } + } + + private IOException createError(String message) throws IOException { + StorageException cause = + new StorageException(HttpStatusCodes.STATUS_CODE_PRECONDITION_FAILED, message); + return new IOException(message, cause); + } + + private final class ReadObjectObserver extends StateCheckingResponseObserver { + + private final SettableApiFuture open = SettableApiFuture.create(); + private final SettableApiFuture cancellation = SettableApiFuture.create(); + + private volatile StreamController controller; + + void cancel() { + controller.cancel(); + } + + @Override + protected void onStartImpl(StreamController controller) { + this.controller = controller; + controller.disableAutoInboundFlowControl(); + controller.request(1); + } + + @Override + protected void onResponseImpl(ReadObjectResponse response) { + controller.request(1); + open.set(null); + try (ResponseContentLifecycleHandle handle = + read.getResponseContentLifecycleManager().get(response)) { + ChecksummedData checksummedData = response.getChecksummedData(); + ByteString content = checksummedData.getContent(); + int contentSize = content.size(); + // Very important to know whether a crc32c value is set. Without checking, protobuf will + // happily return 0, which is a valid crc32c value. + if (checksummedData.hasCrc32C()) { + Crc32cLengthKnown expected = Crc32cValue.of(checksummedData.getCrc32C(), contentSize); + try { + hasher.validateUnchecked(expected, content); + } catch (UncheckedChecksumMismatchException e) { + queue.offer(e); + return; + } + } + if (response.hasMetadata()) { + Object respMetadata = response.getMetadata(); + if (metadata == null) { + metadata = respMetadata; + } else if (metadata.getGeneration() != respMetadata.getGeneration()) { + IOException exception = + createError( + String.format( + Locale.US, + "Mismatch Generation between subsequent reads. Expected %d but received %d", + metadata.getGeneration(), + respMetadata.getGeneration())); + queue.offer(exception); + return; + } + } + queue.offer(ReadObjectResponseChildRef.from(handle)); + fetchOffset.addAndGet(contentSize); + if (response.hasMetadata() && !result.isDone()) { + result.set(response.getMetadata()); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw Code.ABORTED.toStatus().withCause(e).asRuntimeException(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected void onErrorImpl(Throwable t) { + if (t instanceof OutOfRangeException) { + try { + queue.offer(EOF_MARKER); + open.set(null); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw Code.ABORTED.toStatus().withCause(e).asRuntimeException(); + } + } + if (t instanceof CancellationException) { + cancellation.set(t); + } + if (!open.isDone()) { + open.setException(t); + } + try { + queue.offer(t); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw Code.ABORTED.toStatus().withCause(e).asRuntimeException(); + } + } + + @Override + protected void onCompleteImpl() { + try { + cancellation.set(null); + queue.offer(EOF_MARKER); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw Code.ABORTED.toStatus().withCause(e).asRuntimeException(); + } + } + } + + /** + * Simplified wrapper around an {@link java.util.concurrent.ArrayBlockingQueue}. We don't need the + * majority of methods/functionality just blocking offer/poll. + */ + static final class SimpleBlockingQueue { + + private final ArrayBlockingQueue queue; + + SimpleBlockingQueue(int poolMaxSize) { + this.queue = new ArrayBlockingQueue<>(poolMaxSize); + } + + public boolean nonEmpty() { + return !queue.isEmpty(); + } + + @Nullable + public T peek() { + return queue.peek(); + } + + @NonNull + public T poll() throws InterruptedException { + return queue.take(); + } + + public void offer(@NonNull T element) throws InterruptedException { + queue.put(element); + } + } + + private static final class ReadObjectResponseChildRef implements Closeable { + private final ChildRef ref; + private final Supplier> lazyBuffers; + + ReadObjectResponseChildRef(ChildRef ref) { + this.ref = ref; + this.lazyBuffers = Suppliers.memoize(() -> ref.byteString().asReadOnlyByteBufferList()); + } + + static ReadObjectResponseChildRef from( + ResponseContentLifecycleHandle handle) { + return new ReadObjectResponseChildRef( + handle.borrow(response -> response.getChecksummedData().getContent())); + } + + void copy(ReadCursor c, ByteBuffer[] dsts, int offset, int length) { + List buffers = lazyBuffers.get(); + for (ByteBuffer b : buffers) { + long copiedBytes = Buffers.copy(b, dsts, offset, length); + c.advance(copiedBytes); + if (b.hasRemaining()) break; + } + } + + boolean hasRemaining() { + List buffers = lazyBuffers.get(); + for (ByteBuffer b : buffers) { + if (b.hasRemaining()) return true; + } + return false; + } + + public void close() throws IOException { + ref.close(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUploadSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUploadSessionBuilder.java new file mode 100644 index 000000000000..1e4001fcaece --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUploadSessionBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.util.function.Function; + +final class GapicUploadSessionBuilder { + + private GapicUploadSessionBuilder() {} + + static GapicUploadSessionBuilder create() { + return new GapicUploadSessionBuilder(); + } + + GapicWritableByteChannelSessionBuilder byteChannel( + ClientStreamingCallable write) { + return new GapicWritableByteChannelSessionBuilder(write); + } + + GapicBidiWritableByteChannelSessionBuilder bidiByteChannel( + BidiStreamingCallable write) { + return new GapicBidiWritableByteChannelSessionBuilder(write); + } + + ApiFuture resumableWrite( + UnaryCallable callable, + WriteObjectRequest writeObjectRequest, + Opts opts, + RetrierWithAlg retrier) { + StartResumableWriteRequest.Builder b = StartResumableWriteRequest.newBuilder(); + if (writeObjectRequest.hasWriteObjectSpec()) { + b.setWriteObjectSpec(writeObjectRequest.getWriteObjectSpec()); + } + if (writeObjectRequest.hasCommonObjectRequestParams()) { + b.setCommonObjectRequestParams(writeObjectRequest.getCommonObjectRequestParams()); + } + if (writeObjectRequest.hasObjectChecksums()) { + b.setObjectChecksums(writeObjectRequest.getObjectChecksums()); + } + StartResumableWriteRequest req = opts.startResumableWriteRequest().apply(b).build(); + Function f = + uploadId -> + writeObjectRequest.toBuilder().clearWriteObjectSpec().setUploadId(uploadId).build(); + SettableApiFuture future = SettableApiFuture.create(); + try { + ResumableWrite resumableWrite = + retrier.run( + () -> { + StartResumableWriteResponse resp = callable.call(req); + return new ResumableWrite(req, resp, f); + }, + Decoder.identity()); + future.set(resumableWrite); + } catch (StorageException e) { + future.setException(e); + } + return future; + } + + ApiFuture bidiResumableWrite( + UnaryCallable x, + BidiWriteObjectRequest writeObjectRequest, + Opts opts, + RetrierWithAlg retrier) { + StartResumableWriteRequest.Builder b = StartResumableWriteRequest.newBuilder(); + if (writeObjectRequest.hasWriteObjectSpec()) { + b.setWriteObjectSpec(writeObjectRequest.getWriteObjectSpec()); + } + if (writeObjectRequest.hasCommonObjectRequestParams()) { + b.setCommonObjectRequestParams(writeObjectRequest.getCommonObjectRequestParams()); + } + if (writeObjectRequest.hasObjectChecksums()) { + b.setObjectChecksums(writeObjectRequest.getObjectChecksums()); + } + StartResumableWriteRequest req = opts.startResumableWriteRequest().apply(b).build(); + Function f = + uploadId -> + writeObjectRequest.toBuilder().clearWriteObjectSpec().setUploadId(uploadId).build(); + SettableApiFuture future = SettableApiFuture.create(); + try { + BidiResumableWrite resumableWrite = + retrier.run( + () -> { + StartResumableWriteResponse resp = x.call(req); + return new BidiResumableWrite(req, resp, f); + }, + Decoder.identity()); + future.set(resumableWrite); + } catch (StorageException e) { + future.setException(e); + } + return future; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicWritableByteChannelSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicWritableByteChannelSessionBuilder.java new file mode 100644 index 000000000000..a256d39b7a80 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicWritableByteChannelSessionBuilder.java @@ -0,0 +1,466 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.ChannelSession.BufferedWriteSession; +import com.google.cloud.storage.ChannelSession.UnbufferedWriteSession; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.WriteCtx.WriteObjectRequestBuilderFactory; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.ServiceConstants.Values; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class GapicWritableByteChannelSessionBuilder { + + private static final int DEFAULT_BUFFER_CAPACITY = ByteSizeConstants._16MiB; + private final ClientStreamingCallable write; + private Hasher hasher; + private ByteStringStrategy byteStringStrategy; + + GapicWritableByteChannelSessionBuilder( + ClientStreamingCallable write) { + this.write = write; + this.hasher = Hasher.defaultHasher(); + this.byteStringStrategy = ByteStringStrategy.copy(); + } + + /** + * Set the {@link Hasher} to apply to the bytes passing through the built session's channel. + * + *

Default: {@link Hasher#defaultHasher()} + * + * @see Hasher#enabled() + * @see Hasher#noop() + */ + GapicWritableByteChannelSessionBuilder setHasher(Hasher hasher) { + this.hasher = requireNonNull(hasher, "hasher must be non null"); + return this; + } + + /** + * Set the {@link ByteStringStrategy} to be used when constructing {@link + * com.google.protobuf.ByteString ByteString}s from {@link ByteBuffer}s. + * + *

Default: {@link ByteStringStrategy#copy()} + * + *

Note: usage of {@link ByteStringStrategy#noCopy()} requires that any {@link ByteBuffer} + * passed to the session's channel not be modified while {@link + * java.nio.channels.WritableByteChannel#write(ByteBuffer)} is processing. + * + * @see ByteStringStrategy#copy() + * @see ByteStringStrategy#noCopy() + */ + GapicWritableByteChannelSessionBuilder setByteStringStrategy( + ByteStringStrategy byteStringStrategy) { + this.byteStringStrategy = + requireNonNull(byteStringStrategy, "byteStringStrategy must be non null"); + return this; + } + + /** + * The built {@link WritableByteChannelSession} will perform a "Direct" upload. + * + *

A "Direct" upload will only sync the transmitted data with GCS when the channel is closed. + * If an error is returned the entire upload will need to be attempted again. + */ + DirectUploadBuilder direct() { + return new DirectUploadBuilder(); + } + + /** + * The build {@link WritableByteChannelSession} will perform a "Resumable" upload. + * + *

A "Resumable" upload will sync the transmitted data with GCS upon each individual flush and + * when the channel is closed. + * + *

If an error is returned the individual flush can be transparently retried. + */ + ResumableUploadBuilder resumable() { + return new ResumableUploadBuilder(); + } + + JournalingResumableUploadBuilder journaling() { + return new JournalingResumableUploadBuilder(); + } + + private @NonNull ChunkSegmenter getChunkSegmenter() { + // it is theoretically possible that the setter methods for the following variables could + // be called again between when this method is invoked and the resulting function is invoked. + // To ensure we are using the specified values at the point in time they are bound to the + // function read them into local variables which will be closed over rather than the class + // fields. + ByteStringStrategy boundStrategy = byteStringStrategy; + Hasher boundHasher = hasher; + return new ChunkSegmenter(boundHasher, boundStrategy, Values.MAX_WRITE_CHUNK_BYTES_VALUE); + } + + private static + BiFunction, UnbufferedWritableByteChannel> + lift( + BiFunction< + StartT, SettableApiFuture, UnbufferedWritableByteChannel> + func) { + return func; + } + + final class DirectUploadBuilder { + + /** + * Do not apply any intermediate buffering. Any call to {@link + * java.nio.channels.WritableByteChannel#write(ByteBuffer)} will be segmented as is and sent to + * GCS. + * + *

Note: this is considered an advanced API, and should not be used in circumstances in which + * control of {@link ByteBuffer}s sent to {@code write} is not self-contained. + */ + UnbufferedDirectUploadBuilder unbuffered() { + return new UnbufferedDirectUploadBuilder(); + } + + /** Buffer up to {@link #DEFAULT_BUFFER_CAPACITY} worth of bytes before attempting to flush */ + BufferedDirectUploadBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + /** + * Buffer using {@code byteBuffer} worth of space before attempting to flush. + * + *

The provided {@link ByteBuffer} should be aligned with GCSs block size of 256 + * KiB if mu + */ + BufferedDirectUploadBuilder buffered(ByteBuffer byteBuffer) { + return buffered(BufferHandle.handleOf(byteBuffer)); + } + + BufferedDirectUploadBuilder buffered(BufferHandle bufferHandle) { + return new BufferedDirectUploadBuilder(bufferHandle); + } + + final class UnbufferedDirectUploadBuilder { + + private WriteObjectRequest req; + + /** Specify the {@link WriteObjectRequest} which will be used to start the Write stream. */ + UnbufferedDirectUploadBuilder setRequest(WriteObjectRequest req) { + this.req = requireNonNull(req, "req must be non null"); + return this; + } + + UnbufferedWritableByteChannelSession build() { + ChunkSegmenter chunkSegmenter = getChunkSegmenter(); + return new UnbufferedWriteSession<>( + ApiFutures.immediateFuture(requireNonNull(req, "req must be non null")), + lift((WriteObjectRequest start, SettableApiFuture resultFuture) -> + new GapicUnbufferedDirectWritableByteChannel( + resultFuture, + chunkSegmenter, + write, + WriteCtx.of( + WriteObjectRequestBuilderFactory.simple(start), + chunkSegmenter.getHasher()))) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + + final class BufferedDirectUploadBuilder { + + private final BufferHandle bufferHandle; + private WriteObjectRequest req; + + BufferedDirectUploadBuilder(BufferHandle bufferHandle) { + this.bufferHandle = bufferHandle; + } + + /** Specify the {@link WriteObjectRequest} which will be used to start the Write stream. */ + BufferedDirectUploadBuilder setRequest(WriteObjectRequest req) { + this.req = requireNonNull(req, "req must be non null"); + return this; + } + + BufferedWritableByteChannelSession build() { + ChunkSegmenter chunkSegmenter = getChunkSegmenter(); + return new BufferedWriteSession<>( + ApiFutures.immediateFuture(requireNonNull(req, "req must be non null")), + lift((WriteObjectRequest start, SettableApiFuture resultFuture) -> + new GapicUnbufferedDirectWritableByteChannel( + resultFuture, + chunkSegmenter, + write, + WriteCtx.of( + WriteObjectRequestBuilderFactory.simple(start), + chunkSegmenter.getHasher()))) + .andThen(c -> new DefaultBufferedWritableByteChannel(bufferHandle, c)) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + } + + final class ResumableUploadBuilder { + + private RetrierWithAlg retrier; + private boolean fsyncEvery; + + ResumableUploadBuilder() { + this.retrier = RetrierWithAlg.attemptOnce(); + this.fsyncEvery = true; + } + + ResumableUploadBuilder withRetryConfig(RetrierWithAlg retrier) { + this.retrier = requireNonNull(retrier, "deps must be non null"); + return this; + } + + @InternalApi + ResumableUploadBuilder setFsyncEvery(boolean fsyncEvery) { + this.fsyncEvery = fsyncEvery; + return this; + } + + /** + * Do not apply any intermediate buffering. Any call to {@link + * java.nio.channels.WritableByteChannel#write(ByteBuffer)} will be segmented as is and sent to + * GCS. + * + *

Note: this is considered an advanced API, and should not be used in circumstances in which + * control of {@link ByteBuffer}s sent to {@code write} is not self-contained. + */ + UnbufferedResumableUploadBuilder unbuffered() { + return new UnbufferedResumableUploadBuilder(); + } + + /** Buffer up to {@link #DEFAULT_BUFFER_CAPACITY} worth of bytes before attempting to flush */ + BufferedResumableUploadBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + /** + * Buffer using {@code byteBuffer} worth of space before attempting to flush. + * + *

The provided {@link ByteBuffer} should be aligned with GCSs block size of 256 + * KiB. + */ + BufferedResumableUploadBuilder buffered(ByteBuffer byteBuffer) { + return buffered(BufferHandle.handleOf(byteBuffer)); + } + + BufferedResumableUploadBuilder buffered(BufferHandle bufferHandle) { + return new BufferedResumableUploadBuilder(bufferHandle); + } + + final class UnbufferedResumableUploadBuilder { + + private ApiFuture start; + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the + * Write stream. + */ + UnbufferedResumableUploadBuilder setStartAsync(ApiFuture start) { + this.start = requireNonNull(start, "start must be non null"); + return this; + } + + UnbufferedWritableByteChannelSession build() { + RetrierWithAlg boundRetrier = retrier; + ChunkSegmenter chunkSegmenter = getChunkSegmenter(); + return new UnbufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + lift((ResumableWrite start, SettableApiFuture result) -> { + if (fsyncEvery) { + return new GapicUnbufferedChunkedResumableWritableByteChannel( + result, + chunkSegmenter, + write, + WriteCtx.of(start, chunkSegmenter.getHasher()), + boundRetrier, + Retrying::newCallContext); + } else { + return new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + result, + chunkSegmenter, + write, + WriteCtx.of(start, chunkSegmenter.getHasher())); + } + }) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + + final class BufferedResumableUploadBuilder { + + private final BufferHandle bufferHandle; + + private ApiFuture start; + + BufferedResumableUploadBuilder(BufferHandle bufferHandle) { + this.bufferHandle = bufferHandle; + } + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the + * Write stream. + */ + BufferedResumableUploadBuilder setStartAsync(ApiFuture start) { + this.start = requireNonNull(start, "start must be non null"); + return this; + } + + BufferedWritableByteChannelSession build() { + ChunkSegmenter chunkSegmenter = getChunkSegmenter(); + return new BufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + lift((ResumableWrite start, SettableApiFuture result) -> { + if (fsyncEvery) { + return new GapicUnbufferedChunkedResumableWritableByteChannel( + result, + chunkSegmenter, + write, + WriteCtx.of(start, chunkSegmenter.getHasher()), + retrier, + Retrying::newCallContext); + } else { + return new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + result, + chunkSegmenter, + write, + WriteCtx.of(start, chunkSegmenter.getHasher())); + } + }) + .andThen(c -> new DefaultBufferedWritableByteChannel(bufferHandle, c)) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + } + + final class JournalingResumableUploadBuilder { + + private Retrier retrier; + private ResultRetryAlgorithm alg; + private BufferHandle bufferHandle; + private BufferHandle recoveryBuffer; + private RecoveryFile recoveryFile; + private UnaryCallable query; + + JournalingResumableUploadBuilder() { + this.retrier = Retrier.attemptOnce(); + this.alg = Retrying.neverRetry(); + } + + JournalingResumableUploadBuilder withRetryConfig( + Retrier retrier, + ResultRetryAlgorithm alg, + UnaryCallable query) { + this.retrier = requireNonNull(retrier, "retrier must be non null"); + this.alg = requireNonNull(alg, "alg must be non null"); + this.query = requireNonNull(query, "query must be non null"); + return this; + } + + JournalingResumableUploadBuilder withBuffer(BufferHandle bufferHandle) { + this.bufferHandle = requireNonNull(bufferHandle, "bufferHandle must be non null"); + return this; + } + + JournalingResumableUploadBuilder withRecoveryBuffer(BufferHandle bufferHandle) { + this.recoveryBuffer = requireNonNull(bufferHandle, "bufferHandle must be non null"); + return this; + } + + JournalingResumableUploadBuilder withRecoveryFile(RecoveryFile recoveryFile) { + this.recoveryFile = requireNonNull(recoveryFile, "recoveryFile must be non null"); + return this; + } + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the Write + * stream. + */ + BuildableJournalingResumableUploadBuilder setStartAsync( + ApiFuture> start) { + requireNonNull(start, "start must be non null"); + return new BuildableJournalingResumableUploadBuilder(start); + } + + final class BuildableJournalingResumableUploadBuilder { + private final ApiFuture> start; + + private BuildableJournalingResumableUploadBuilder(ApiFuture> start) { + this.start = start; + } + + BufferedWritableByteChannelSession build() { + return new BufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + bindFunction() + .andThen(c -> new DefaultBufferedWritableByteChannel(bufferHandle, c)) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + + private BiFunction< + WriteCtx, + SettableApiFuture, + UnbufferedWritableByteChannel> + bindFunction() { + // it is theoretically possible that the setter methods for the following variables could + // be called again between when this method is invoked and the resulting function is + // invoked. + // To ensure we are using the specified values at the point in time they are bound to the + // function read them into local variables which will be closed over rather than the class + // fields. + Retrier boundRetrier = JournalingResumableUploadBuilder.this.retrier; + ResultRetryAlgorithm alg = JournalingResumableUploadBuilder.this.alg; + BufferHandle recoveryBuffer = JournalingResumableUploadBuilder.this.recoveryBuffer; + RecoveryFile recoveryFile = JournalingResumableUploadBuilder.this.recoveryFile; + UnaryCallable query = + JournalingResumableUploadBuilder.this.query; + ByteStringStrategy boundStrategy = byteStringStrategy; + Hasher boundHasher = hasher; + return (writeCtx, resultFuture) -> + new SyncAndUploadUnbufferedWritableByteChannel( + write, + query, + resultFuture, + new ChunkSegmenter(boundHasher, boundStrategy, Values.MAX_WRITE_CHUNK_BYTES_VALUE), + boundRetrier, + alg, + writeCtx, + recoveryFile, + recoveryBuffer); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java new file mode 100644 index 000000000000..9113af1e0c91 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java @@ -0,0 +1,93 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.storage.GapicDownloadSessionBuilder.ReadableByteChannelSessionBuilder; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class GrpcBlobReadChannel extends BaseStorageReadChannel { + + private final ZeroCopyServerStreamingCallable read; + private final Retrier retrier; + private final ResultRetryAlgorithm resultRetryAlgorithm; + private final ReadObjectRequest request; + private final boolean autoGzipDecompression; + + GrpcBlobReadChannel( + ZeroCopyServerStreamingCallable read, + Retrier retrier, + ResultRetryAlgorithm resultRetryAlgorithm, + ReadObjectRequest request, + boolean autoGzipDecompression) { + super(Conversions.grpc().blobInfo()); + this.read = read; + this.retrier = retrier; + this.resultRetryAlgorithm = resultRetryAlgorithm; + this.request = request; + this.autoGzipDecompression = autoGzipDecompression; + } + + @Override + public RestorableState capture() { + return CrossTransportUtils.throwHttpJsonOnly(ReadChannel.class, "capture"); + } + + @Override + protected LazyReadChannel newLazyReadChannel() { + return new LazyReadChannel<>( + () -> { + ReadableByteChannelSessionBuilder b = + ResumableMedia.gapic() + .read() + .byteChannel(read, retrier, resultRetryAlgorithm) + .setHasher(Hasher.defaultHasher()) + .setAutoGzipDecompression(autoGzipDecompression); + BufferHandle bufferHandle = getBufferHandle(); + // because we're erasing the specific type of channel, we need to declare it here. + // If we don't, the compiler complains we're not returning a compliant type. + ReadableByteChannelSession session; + if (bufferHandle.capacity() > 0) { + session = + b.buffered(getBufferHandle()).setReadObjectRequest(getReadObjectRequest()).build(); + } else { + session = b.unbuffered().setReadObjectRequest(getReadObjectRequest()).build(); + } + return session; + }); + } + + @NonNull + private ReadObjectRequest getReadObjectRequest() { + ByteRangeSpec rangeSpec = getByteRangeSpec(); + ReadObjectRequest.Builder b = request.toBuilder(); + if (request.getGeneration() == 0) { + Object resolvedObject = getResolvedObject(); + if (resolvedObject != null) { + b.setGeneration(resolvedObject.getGeneration()); + } + } + return rangeSpec.seekReadObjectRequest(b).build(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobWriteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobWriteChannel.java new file mode 100644 index 000000000000..3a7bc2a790c1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobWriteChannel.java @@ -0,0 +1,67 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.util.function.Supplier; + +final class GrpcBlobWriteChannel extends BaseStorageWriteChannel { + + private final ClientStreamingCallable write; + private final RetrierWithAlg retrier; + private final Supplier> start; + private final Hasher hasher; + + GrpcBlobWriteChannel( + ClientStreamingCallable write, + RetrierWithAlg retrier, + Supplier> start, + Hasher hasher) { + super(Conversions.grpc().blobInfo().compose(WriteObjectResponse::getResource)); + this.write = write; + this.retrier = retrier; + this.start = start; + this.hasher = hasher; + } + + @Override + public RestorableState capture() { + return CrossTransportUtils.throwHttpJsonOnly(WriteChannel.class, "capture"); + } + + @Override + protected LazyWriteChannel newLazyWriteChannel() { + return new LazyWriteChannel<>( + () -> + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(hasher) + .setByteStringStrategy(ByteStringStrategy.copy()) + .resumable() + .withRetryConfig(retrier) + .buffered(getBufferHandle()) + .setStartAsync(start.get()) + .build()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcConversions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcConversions.java new file mode 100644 index 000000000000..93100059614e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcConversions.java @@ -0,0 +1,1316 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Storage.BucketField.IP_FILTER; +import static com.google.cloud.storage.Storage.BucketField.SOFT_DELETE_POLICY; +import static com.google.cloud.storage.Utils.bucketNameCodec; +import static com.google.cloud.storage.Utils.ifNonNull; +import static com.google.cloud.storage.Utils.lift; +import static com.google.cloud.storage.Utils.projectNumberResourceCodec; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.cloud.Binding; +import com.google.cloud.Condition; +import com.google.cloud.Policy; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BlobInfo.CustomerEncryption; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.BucketInfo.CustomerManagedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.CustomerSuppliedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.EncryptionEnforcementRestrictionMode; +import com.google.cloud.storage.BucketInfo.GoogleManagedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.IpFilter; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.AbortIncompleteMPUAction; +import com.google.cloud.storage.BucketInfo.Logging; +import com.google.cloud.storage.BucketInfo.PublicAccessPrevention; +import com.google.cloud.storage.Conversions.Codec; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Maps; +import com.google.common.io.BaseEncoding; +import com.google.protobuf.ByteString; +import com.google.protobuf.ProtocolStringList; +import com.google.protobuf.Timestamp; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.Bucket.Billing; +import com.google.storage.v2.Bucket.Encryption; +import com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource; +import com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource; +import com.google.storage.v2.Bucket.Website; +import com.google.storage.v2.BucketAccessControl; +import com.google.storage.v2.CryptoKeyName; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.ObjectContexts; +import com.google.storage.v2.ObjectCustomContextPayload; +import com.google.storage.v2.Owner; +import com.google.type.Date; +import com.google.type.Expr; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GrpcConversions { + static final GrpcConversions INSTANCE = new GrpcConversions(); + + private final Codec entityCodec = + Codec.of(this::entityEncode, this::entityDecode); + private final Codec objectAclCodec = + Codec.of(this::objectAclEncode, this::objectAclDecode); + private final Codec bucketAclCodec = + Codec.of(this::bucketAclEncode, this::bucketAclDecode); + private final Codec corsCodec = Codec.of(this::corsEncode, this::corsDecode); + private final Codec loggingCodec = + Codec.of(this::loggingEncode, this::loggingDecode); + private final Codec iamConfigurationCodec = + Codec.of(this::iamConfigEncode, this::iamConfigDecode); + private final Codec autoclassCodec = + Codec.of(this::autoclassEncode, this::autoclassDecode); + + private final Codec softDeletePolicyCodec = + Codec.of(this::softDeletePolicyEncode, this::softDeletePolicyDecode); + private final Codec lifecycleRuleCodec = + Codec.of(this::lifecycleRuleEncode, this::lifecycleRuleDecode); + private final Codec bucketInfoCodec = + Codec.of(this::bucketInfoEncode, this::bucketInfoDecode); + private final Codec + customerEncryptionCodec = + Codec.of(this::customerEncryptionEncode, this::customerEncryptionDecode); + private final Codec blobIdCodec = + Codec.of(this::blobIdEncode, this::blobIdDecode); + private final Codec blobInfoCodec = + Codec.of(this::blobInfoEncode, this::blobInfoDecode); + private final Codec policyCodec = + Codec.of(this::policyEncode, this::policyDecode); + private final Codec bindingCodec = + Codec.of(this::bindingEncode, this::bindingDecode); + private final Codec iamConditionCodec = + Codec.of(this::conditionEncode, this::conditionDecode); + + private final Codec + hierarchicalNamespaceCodec = + Codec.of(this::hierarchicalNamespaceEncode, this::hierarchicalNamespaceDecode); + + private final Codec ipFilterCodec = + Codec.of(this::ipFilterEncode, this::ipFilterDecode); + private final Codec + publicNetworkSourceCodec = + Codec.of(this::publicNetworkSourceEncode, this::publicNetworkSourceDecode); + private final Codec + vpcNetworkSourceCodec = Codec.of(this::vpcNetworkSourceEncode, this::vpcNetworkSourceDecode); + + private final Codec byteStringB64StringCodec = + Codec.of( + bs -> Base64.getEncoder().encodeToString(bs.toByteArray()), + s -> ByteString.copyFrom(Base64.getDecoder().decode(s.getBytes(StandardCharsets.UTF_8)))); + + private final Codec objectContextsCodec = + Codec.of(this::objectContextsEncode, this::objectContextsDecode); + private final Codec + customContextPayloadCodec = + Codec.of(this::objectCustomContextPayloadEncode, this::objectCustomContextPayloadDecode); + + @VisibleForTesting + final Codec timestampCodec = + Codec.of( + odt -> + Timestamp.newBuilder() + .setSeconds(odt.toEpochSecond()) + .setNanos(odt.getNano()) + .build(), + t -> + Instant.ofEpochSecond(t.getSeconds()) + .plusNanos(t.getNanos()) + .atOffset(ZoneOffset.UTC)); + + @VisibleForTesting + final Codec durationCodec = + Codec.of( + javaDuration -> + com.google.protobuf.Duration.newBuilder() + .setSeconds(javaDuration.getSeconds()) + .setNanos(javaDuration.getNano()) + .build(), + protoDuration -> + Duration.ofSeconds(protoDuration.getSeconds()).plusNanos(protoDuration.getNanos())); + + @VisibleForTesting + final Codec odtDateCodec = + Codec.of( + odt -> { + OffsetDateTime utc = odt.withOffsetSameInstant(ZoneOffset.UTC); + return Date.newBuilder() + .setYear(utc.getYear()) + .setMonth(utc.getMonthValue()) + .setDay(utc.getDayOfMonth()) + .build(); + }, + d -> + LocalDate.of(d.getYear(), d.getMonth(), d.getDay()) + .atStartOfDay() + .atOffset(ZoneOffset.UTC)); + + private final Codec + encryptionEnforcementRestrictionModeCodec = + Codec.of( + EncryptionEnforcementRestrictionMode::toString, + EncryptionEnforcementRestrictionMode::valueOf); + private final Codec< + GoogleManagedEncryptionEnforcementConfig, + Encryption.GoogleManagedEncryptionEnforcementConfig> + googleManagedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Encryption.GoogleManagedEncryptionEnforcementConfig.Builder to = + Encryption.GoogleManagedEncryptionEnforcementConfig.newBuilder(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), timestampCodec::encode, to::setEffectiveTime); + return to.build(); + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.hasRestrictionMode()) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.hasEffectiveTime()) { + return GoogleManagedEncryptionEnforcementConfig.of( + mode, timestampCodec.decode(from.getEffectiveTime())); + } + return GoogleManagedEncryptionEnforcementConfig.of(mode); + }); + private final Codec< + CustomerManagedEncryptionEnforcementConfig, + Encryption.CustomerManagedEncryptionEnforcementConfig> + customerManagedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Encryption.CustomerManagedEncryptionEnforcementConfig.Builder to = + Encryption.CustomerManagedEncryptionEnforcementConfig.newBuilder(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), timestampCodec::encode, to::setEffectiveTime); + return to.build(); + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.hasRestrictionMode()) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.hasEffectiveTime()) { + return CustomerManagedEncryptionEnforcementConfig.of( + mode, timestampCodec.decode(from.getEffectiveTime())); + } + return CustomerManagedEncryptionEnforcementConfig.of(mode); + }); + private final Codec< + CustomerSuppliedEncryptionEnforcementConfig, + Encryption.CustomerSuppliedEncryptionEnforcementConfig> + customerSuppliedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Encryption.CustomerSuppliedEncryptionEnforcementConfig.Builder to = + Encryption.CustomerSuppliedEncryptionEnforcementConfig.newBuilder(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), timestampCodec::encode, to::setEffectiveTime); + return to.build(); + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.hasRestrictionMode()) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.hasEffectiveTime()) { + return CustomerSuppliedEncryptionEnforcementConfig.of( + mode, timestampCodec.decode(from.getEffectiveTime())); + } + return CustomerSuppliedEncryptionEnforcementConfig.of(mode); + }); + + private GrpcConversions() {} + + Codec entity() { + return entityCodec; + } + + Codec objectAcl() { + return objectAclCodec; + } + + Codec bucketAcl() { + return bucketAclCodec; + } + + Codec cors() { + return corsCodec; + } + + Codec logging() { + return loggingCodec; + } + + Codec iamConfiguration() { + return iamConfigurationCodec; + } + + Codec lifecycleRule() { + return lifecycleRuleCodec; + } + + Codec bucketInfo() { + return bucketInfoCodec; + } + + Codec customerEncryption() { + return customerEncryptionCodec; + } + + Codec blobId() { + return blobIdCodec; + } + + Codec blobInfo() { + return blobInfoCodec; + } + + Codec policyCodec() { + return policyCodec; + } + + private BucketInfo bucketInfoDecode(Bucket from) { + BucketInfo.Builder to = new BucketInfo.BuilderImpl(bucketNameCodec.decode(from.getName())); + if (!from.getProject().isEmpty()) { + to.setProject(projectNumberResourceCodec.decode(from.getProject())); + } + to.setGeneratedId(from.getBucketId()); + maybeDecodeRetentionPolicy(from, to); + ifNonNull(from.getLocation(), to::setLocation); + ifNonNull(from.getLocationType(), to::setLocationType); + ifNonNull(from.getMetageneration(), to::setMetageneration); + if (from.hasBilling()) { + Billing billing = from.getBilling(); + to.setRequesterPays(billing.getRequesterPays()); + } + if (from.hasCreateTime()) { + to.setCreateTimeOffsetDateTime(timestampCodec.decode(from.getCreateTime())); + } + if (from.hasUpdateTime()) { + to.setUpdateTimeOffsetDateTime(timestampCodec.decode(from.getUpdateTime())); + } + if (from.hasEncryption()) { + Encryption e = from.getEncryption(); + if (!e.getDefaultKmsKey().isEmpty()) { + to.setDefaultKmsKeyName(e.getDefaultKmsKey()); + } + if (e.hasGoogleManagedEncryptionEnforcementConfig()) { + to.setGoogleManagedEncryptionEnforcementConfig( + googleManagedEncryptionEnforcementConfigCodec.decode( + e.getGoogleManagedEncryptionEnforcementConfig())); + } + if (e.hasCustomerManagedEncryptionEnforcementConfig()) { + to.setCustomerManagedEncryptionEnforcementConfig( + customerManagedEncryptionEnforcementConfigCodec.decode( + e.getCustomerManagedEncryptionEnforcementConfig())); + } + if (e.hasCustomerSuppliedEncryptionEnforcementConfig()) { + to.setCustomerSuppliedEncryptionEnforcementConfig( + customerSuppliedEncryptionEnforcementConfigCodec.decode( + e.getCustomerSuppliedEncryptionEnforcementConfig())); + } + } + if (!from.getRpo().isEmpty()) { + to.setRpo(Rpo.valueOf(from.getRpo())); + } + if (!from.getStorageClass().isEmpty()) { + to.setStorageClass(StorageClass.valueOf(from.getStorageClass())); + } + if (from.hasVersioning()) { + to.setVersioningEnabled(from.getVersioning().getEnabled()); + } + ifNonNull(from.getDefaultEventBasedHold(), to::setDefaultEventBasedHold); + Map labelsMap = from.getLabelsMap(); + if (!labelsMap.isEmpty()) { + to.setLabels(labelsMap); + } + if (from.hasWebsite()) { + Website website = from.getWebsite(); + String mainPageSuffix = website.getMainPageSuffix(); + if (!mainPageSuffix.isEmpty()) { + to.setIndexPage(mainPageSuffix); + } + String notFoundPage = website.getNotFoundPage(); + if (!notFoundPage.isEmpty()) { + to.setNotFoundPage(notFoundPage); + } + } + if (from.hasLifecycle()) { + to.setLifecycleRules( + toImmutableListOf(lifecycleRuleCodec::decode).apply(from.getLifecycle().getRuleList())); + } + List corsList = from.getCorsList(); + if (!corsList.isEmpty()) { + to.setCors(toImmutableListOf(corsCodec::decode).apply(corsList)); + } + if (from.hasLogging()) { + Bucket.Logging logging = from.getLogging(); + if (!logging.getLogBucket().isEmpty() || !logging.getLogObjectPrefix().isEmpty()) { + to.setLogging(loggingCodec.decode(logging)); + } + } + if (from.hasOwner()) { + to.setOwner(entityCodec.decode(from.getOwner().getEntity())); + } + + List defaultObjectAclList = from.getDefaultObjectAclList(); + if (!defaultObjectAclList.isEmpty()) { + to.setDefaultAcl(toImmutableListOf(objectAclCodec::decode).apply(defaultObjectAclList)); + } + List bucketAclList = from.getAclList(); + if (!bucketAclList.isEmpty()) { + to.setAcl(toImmutableListOf(bucketAclCodec::decode).apply(bucketAclList)); + } + if (from.hasIamConfig()) { + to.setIamConfiguration(iamConfigurationCodec.decode(from.getIamConfig())); + } + if (from.hasAutoclass()) { + to.setAutoclass(autoclassCodec.decode(from.getAutoclass())); + } + if (from.hasSoftDeletePolicy()) { + to.setSoftDeletePolicy(softDeletePolicyCodec.decode(from.getSoftDeletePolicy())); + } + if (from.hasCustomPlacementConfig()) { + Bucket.CustomPlacementConfig customPlacementConfig = from.getCustomPlacementConfig(); + to.setCustomPlacementConfig( + CustomPlacementConfig.newBuilder() + .setDataLocations(customPlacementConfig.getDataLocationsList()) + .build()); + } + if (from.hasHierarchicalNamespace()) { + to.setHierarchicalNamespace( + hierarchicalNamespaceCodec.decode(from.getHierarchicalNamespace())); + } + if (from.hasIpFilter()) { + to.setIpFilter(ipFilterCodec.decode(from.getIpFilter())); + } + if (!from.getEtag().isEmpty()) { + to.setEtag(from.getEtag()); + } + return to.build(); + } + + private Bucket bucketInfoEncode(BucketInfo from) { + Bucket.Builder to = Bucket.newBuilder(); + to.setName(bucketNameCodec.encode(from.getName())); + ifNonNull(from.getProject(), projectNumberResourceCodec::encode, to::setProject); + ifNonNull(from.getGeneratedId(), to::setBucketId); + maybeEncodeRetentionPolicy(from, to); + ifNonNull(from.getLocation(), to::setLocation); + ifNonNull(from.getLocationType(), to::setLocationType); + ifNonNull(from.getMetageneration(), to::setMetageneration); + if (from.requesterPays() != null) { + Bucket.Billing.Builder billingBuilder = Billing.newBuilder(); + ifNonNull(from.requesterPays(), billingBuilder::setRequesterPays); + to.setBilling(billingBuilder.build()); + } + ifNonNull(from.getCreateTimeOffsetDateTime(), timestampCodec::encode, to::setCreateTime); + ifNonNull(from.getUpdateTimeOffsetDateTime(), timestampCodec::encode, to::setUpdateTime); + if (Stream.of( + from.getDefaultKmsKeyName(), + from.getGoogleManagedEncryptionEnforcementConfig(), + from.getCustomerManagedEncryptionEnforcementConfig(), + from.getCustomerSuppliedEncryptionEnforcementConfig()) + .anyMatch(Objects::nonNull)) { + Bucket.Encryption.Builder encryptionBuilder = Bucket.Encryption.newBuilder(); + ifNonNull(from.getDefaultKmsKeyName(), encryptionBuilder::setDefaultKmsKey); + ifNonNull( + from.getGoogleManagedEncryptionEnforcementConfig(), + googleManagedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setGoogleManagedEncryptionEnforcementConfig); + ifNonNull( + from.getCustomerManagedEncryptionEnforcementConfig(), + customerManagedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setCustomerManagedEncryptionEnforcementConfig); + ifNonNull( + from.getCustomerSuppliedEncryptionEnforcementConfig(), + customerSuppliedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setCustomerSuppliedEncryptionEnforcementConfig); + to.setEncryption(encryptionBuilder.build()); + } + if (from.getIndexPage() != null || from.getNotFoundPage() != null) { + Bucket.Website.Builder websiteBuilder = Bucket.Website.newBuilder(); + ifNonNull(from.getIndexPage(), websiteBuilder::setMainPageSuffix); + ifNonNull(from.getNotFoundPage(), websiteBuilder::setNotFoundPage); + to.setWebsite(websiteBuilder.build()); + } + ifNonNull(from.getRpo(), Rpo::toString, to::setRpo); + ifNonNull(from.getStorageClass(), StorageClass::toString, to::setStorageClass); + if (from.versioningEnabled() != null) { + Bucket.Versioning.Builder versioningBuilder = Bucket.Versioning.newBuilder(); + ifNonNull(from.versioningEnabled(), versioningBuilder::setEnabled); + to.setVersioning(versioningBuilder.build()); + } + ifNonNull(from.getDefaultEventBasedHold(), to::setDefaultEventBasedHold); + ifNonNull(from.getLabels(), this::removeNullValues, to::putAllLabels); + // Do not use, #getLifecycleRules, it can not return null, which is important to our logic here + List lifecycleRules = from.lifecycleRules; + if (lifecycleRules != null) { + Bucket.Lifecycle.Builder lifecycleBuilder = Bucket.Lifecycle.newBuilder(); + if (!lifecycleRules.isEmpty()) { + ImmutableSet set = + from.getLifecycleRules().stream() + .map(lifecycleRuleCodec::encode) + .collect(ImmutableSet.toImmutableSet()); + lifecycleBuilder.addAllRule(ImmutableList.copyOf(set)); + } + to.setLifecycle(lifecycleBuilder.build()); + } + + Logging logging = from.getLogging(); + if (logging != null) { + // an empty bucket name is invalid, don't even attempt to encode if neither name or prefix + // are both empty + if ((logging.getLogBucket() != null && !logging.getLogBucket().isEmpty()) + || (logging.getLogObjectPrefix() != null && !logging.getLogObjectPrefix().isEmpty())) { + to.setLogging(loggingCodec.encode(logging)); + } + } + ifNonNull(from.getCors(), toImmutableListOf(corsCodec::encode), to::addAllCors); + ifNonNull( + from.getOwner(), + lift(entity()::encode).andThen(o -> Owner.newBuilder().setEntity(o).build()), + to::setOwner); + ifNonNull( + from.getDefaultAcl(), + toImmutableListOf(objectAclCodec::encode), + to::addAllDefaultObjectAcl); + ifNonNull(from.getAcl(), toImmutableListOf(bucketAclCodec::encode), to::addAllAcl); + ifNonNull(from.getIamConfiguration(), iamConfigurationCodec::encode, to::setIamConfig); + ifNonNull(from.getAutoclass(), autoclassCodec::encode, to::setAutoclass); + ifNonNull(from.getSoftDeletePolicy(), softDeletePolicyCodec::encode, to::setSoftDeletePolicy); + if (from.getModifiedFields().contains(SOFT_DELETE_POLICY) + && from.getSoftDeletePolicy() == null) { + to.clearSoftDeletePolicy(); + } + CustomPlacementConfig customPlacementConfig = from.getCustomPlacementConfig(); + if (customPlacementConfig != null && customPlacementConfig.getDataLocations() != null) { + to.setCustomPlacementConfig( + Bucket.CustomPlacementConfig.newBuilder() + .addAllDataLocations(customPlacementConfig.getDataLocations()) + .build()); + } + ifNonNull( + from.getHierarchicalNamespace(), + hierarchicalNamespaceCodec::encode, + to::setHierarchicalNamespace); + ifNonNull(from.getIpFilter(), ipFilterCodec::encode, to::setIpFilter); + if (from.getModifiedFields().contains(IP_FILTER) && from.getIpFilter() == null) { + to.clearIpFilter(); + } + ifNonNull(from.getEtag(), to::setEtag); + return to.build(); + } + + private void maybeEncodeRetentionPolicy(BucketInfo from, Bucket.Builder to) { + if (from.getRetentionPeriodDuration() != null + || from.retentionPolicyIsLocked() != null + || from.getRetentionEffectiveTimeOffsetDateTime() != null) { + Bucket.RetentionPolicy.Builder retentionPolicyBuilder = to.getRetentionPolicyBuilder(); + ifNonNull( + from.getRetentionPeriodDuration(), + durationCodec::encode, + retentionPolicyBuilder::setRetentionDuration); + ifNonNull(from.retentionPolicyIsLocked(), retentionPolicyBuilder::setIsLocked); + ifNonNull( + from.getRetentionEffectiveTimeOffsetDateTime(), + timestampCodec::encode, + retentionPolicyBuilder::setEffectiveTime); + to.setRetentionPolicy(retentionPolicyBuilder.build()); + } + } + + private void maybeDecodeRetentionPolicy(Bucket from, BucketInfo.Builder to) { + if (from.hasRetentionPolicy()) { + Bucket.RetentionPolicy retentionPolicy = from.getRetentionPolicy(); + to.setRetentionPolicyIsLocked(retentionPolicy.getIsLocked()); + if (retentionPolicy.hasRetentionDuration()) { + to.setRetentionPeriodDuration(durationCodec.decode(retentionPolicy.getRetentionDuration())); + } + if (retentionPolicy.hasEffectiveTime()) { + to.setRetentionEffectiveTimeOffsetDateTime( + timestampCodec.decode(retentionPolicy.getEffectiveTime())); + } + } + } + + private Bucket.Logging loggingEncode(BucketInfo.Logging from) { + Bucket.Logging.Builder to = Bucket.Logging.newBuilder(); + if (from.getLogObjectPrefix() != null && !from.getLogObjectPrefix().isEmpty()) { + to.setLogObjectPrefix(from.getLogObjectPrefix()); + } + ifNonNull(from.getLogBucket(), bucketNameCodec::encode, to::setLogBucket); + return to.build(); + } + + private BucketInfo.Logging loggingDecode(Bucket.Logging from) { + BucketInfo.Logging.Builder to = BucketInfo.Logging.newBuilder(); + String logObjectPrefix = from.getLogObjectPrefix(); + if (!logObjectPrefix.isEmpty()) { + to.setLogObjectPrefix(logObjectPrefix); + } + String logBucket = from.getLogBucket(); + if (!logBucket.isEmpty()) { + to.setLogBucket(bucketNameCodec.decode(logBucket)); + } + return to.build(); + } + + private Bucket.Cors corsEncode(Cors from) { + Bucket.Cors.Builder to = Bucket.Cors.newBuilder(); + to.setMaxAgeSeconds(from.getMaxAgeSeconds()); + to.addAllResponseHeader(from.getResponseHeaders()); + ifNonNull(from.getMethods(), toImmutableListOf(java.lang.Object::toString), to::addAllMethod); + ifNonNull(from.getOrigins(), toImmutableListOf(java.lang.Object::toString), to::addAllOrigin); + return to.build(); + } + + private Cors corsDecode(Bucket.Cors from) { + Cors.Builder to = Cors.newBuilder().setMaxAgeSeconds(from.getMaxAgeSeconds()); + ifNonNull( + from.getMethodList(), + m -> + m.stream() + .map(String::toUpperCase) + .map(HttpMethod::valueOf) + .collect(ImmutableList.toImmutableList()), + to::setMethods); + ifNonNull(from.getOriginList(), toImmutableListOf(Cors.Origin::of), to::setOrigins); + to.setResponseHeaders(from.getResponseHeaderList()); + return to.build(); + } + + private String entityEncode(Acl.Entity from) { + if (from instanceof Acl.RawEntity) { + return from.getValue(); + } else if (from instanceof Acl.User) { + switch (from.getValue()) { + case Acl.User.ALL_AUTHENTICATED_USERS: + return Acl.User.ALL_AUTHENTICATED_USERS; + case Acl.User.ALL_USERS: + return Acl.User.ALL_USERS; + default: + break; + } + } + // intentionally not an else so that if the default is hit above it will fall through to here + return from.getType().name().toLowerCase() + "-" + from.getValue(); + } + + private Acl.Entity entityDecode(String from) { + if (from.startsWith("user-")) { + return new Acl.User(from.substring(5)); + } + if (from.equals(Acl.User.ALL_USERS)) { + return Acl.User.ofAllUsers(); + } + if (from.equals(Acl.User.ALL_AUTHENTICATED_USERS)) { + return Acl.User.ofAllAuthenticatedUsers(); + } + if (from.startsWith("group-")) { + return new Acl.Group(from.substring(6)); + } + if (from.startsWith("domain-")) { + return new Acl.Domain(from.substring(7)); + } + if (from.startsWith("project-")) { + int idx = from.indexOf('-', 8); + String team = from.substring(8, idx); + String projectId = from.substring(idx + 1); + return new Acl.Project(Acl.Project.ProjectRole.valueOf(team.toUpperCase()), projectId); + } + return new Acl.RawEntity(from); + } + + private Acl objectAclDecode(ObjectAccessControl from) { + Acl.Role role = Acl.Role.valueOf(from.getRole()); + Acl.Entity entity = entityCodec.decode(from.getEntity()); + Acl.Builder to = Acl.newBuilder(entity, role); + if (!from.getId().isEmpty()) { + to.setId(from.getId()); + } + if (!from.getEtag().isEmpty()) { + to.setEtag(from.getEtag()); + } + return to.build(); + } + + private ObjectAccessControl objectAclEncode(Acl from) { + ObjectAccessControl.Builder to = ObjectAccessControl.newBuilder(); + ifNonNull(from.getEntity(), entityCodec::encode, to::setEntity); + ifNonNull(from.getRole(), Role::name, to::setRole); + ifNonNull(from.getId(), to::setId); + ifNonNull(from.getEtag(), to::setEtag); + return to.build(); + } + + private Acl bucketAclDecode(com.google.storage.v2.BucketAccessControl from) { + Role role = Role.valueOf(from.getRole()); + Entity entity = entityDecode(from.getEntity()); + Acl.Builder to = Acl.newBuilder(entity, role).setId(from.getId()); + if (!from.getEtag().isEmpty()) { + to.setEtag(from.getEtag()); + } + return to.build(); + } + + private com.google.storage.v2.BucketAccessControl bucketAclEncode(Acl from) { + BucketAccessControl.Builder to = BucketAccessControl.newBuilder(); + ifNonNull(from.getEntity(), entityCodec::encode, to::setEntity); + ifNonNull(from.getRole(), Role::toString, to::setRole); + ifNonNull(from.getId(), to::setId); + ifNonNull(from.getEtag(), to::setEtag); + return to.build(); + } + + private Bucket.IamConfig.UniformBucketLevelAccess ublaEncode(BucketInfo.IamConfiguration from) { + Bucket.IamConfig.UniformBucketLevelAccess.Builder to = + Bucket.IamConfig.UniformBucketLevelAccess.newBuilder(); + ifNonNull(from.isUniformBucketLevelAccessEnabled(), to::setEnabled); + if (from.isUniformBucketLevelAccessEnabled() == Boolean.TRUE) { + ifNonNull( + from.getUniformBucketLevelAccessLockedTimeOffsetDateTime(), + timestampCodec::encode, + to::setLockTime); + } + return to.build(); + } + + private BucketInfo.Autoclass autoclassDecode(Bucket.Autoclass from) { + BucketInfo.Autoclass.Builder to = BucketInfo.Autoclass.newBuilder(); + to.setEnabled(from.getEnabled()); + ifNonNull(from.getToggleTime(), timestampCodec::decode, to::setToggleTime); + + String terminalStorageClass = from.getTerminalStorageClass(); + if (!terminalStorageClass.isEmpty()) { + to.setTerminalStorageClass(StorageClass.valueOf(terminalStorageClass)); + } + ifNonNull( + from.getTerminalStorageClassUpdateTime(), + timestampCodec::decode, + to::setTerminalStorageClassUpdateTime); + return to.build(); + } + + private Bucket.Autoclass autoclassEncode(BucketInfo.Autoclass from) { + Bucket.Autoclass.Builder to = Bucket.Autoclass.newBuilder(); + ifNonNull(from.getEnabled(), to::setEnabled); + ifNonNull(from.getToggleTime(), timestampCodec::encode, to::setToggleTime); + ifNonNull(from.getTerminalStorageClass(), StorageClass::toString, to::setTerminalStorageClass); + ifNonNull( + from.getTerminalStorageClassUpdateTime(), + timestampCodec::encode, + to::setTerminalStorageClassUpdateTime); + return to.build(); + } + + private BucketInfo.SoftDeletePolicy softDeletePolicyDecode(Bucket.SoftDeletePolicy from) { + BucketInfo.SoftDeletePolicy.Builder to = BucketInfo.SoftDeletePolicy.newBuilder(); + ifNonNull(from.getRetentionDuration(), durationCodec::decode, to::setRetentionDuration); + ifNonNull(from.getEffectiveTime(), timestampCodec::decode, to::setEffectiveTime); + return to.build(); + } + + private Bucket.SoftDeletePolicy softDeletePolicyEncode(BucketInfo.SoftDeletePolicy from) { + Bucket.SoftDeletePolicy.Builder to = Bucket.SoftDeletePolicy.newBuilder(); + ifNonNull(from.getRetentionDuration(), durationCodec::encode, to::setRetentionDuration); + return to.build(); + } + + private Bucket.HierarchicalNamespace hierarchicalNamespaceEncode( + BucketInfo.HierarchicalNamespace from) { + Bucket.HierarchicalNamespace.Builder to = Bucket.HierarchicalNamespace.newBuilder(); + ifNonNull(from.getEnabled(), to::setEnabled); + return to.build(); + } + + private BucketInfo.HierarchicalNamespace hierarchicalNamespaceDecode( + Bucket.HierarchicalNamespace from) { + BucketInfo.HierarchicalNamespace.Builder to = BucketInfo.HierarchicalNamespace.newBuilder(); + to.setEnabled(from.getEnabled()); + return to.build(); + } + + private Bucket.IamConfig iamConfigEncode(BucketInfo.IamConfiguration from) { + Bucket.IamConfig.Builder to = Bucket.IamConfig.newBuilder(); + to.setUniformBucketLevelAccess(ublaEncode(from)); + if (from.getPublicAccessPrevention() != null) { + ifNonNull(from.getPublicAccessPrevention().getValue(), to::setPublicAccessPrevention); + } + return to.build(); + } + + private BucketInfo.IamConfiguration iamConfigDecode(Bucket.IamConfig from) { + Bucket.IamConfig.UniformBucketLevelAccess ubla = from.getUniformBucketLevelAccess(); + + BucketInfo.IamConfiguration.Builder to = BucketInfo.IamConfiguration.newBuilder(); + ifNonNull(ubla.getEnabled(), to::setIsUniformBucketLevelAccessEnabled); + if (ubla.hasLockTime()) { + to.setUniformBucketLevelAccessLockedTimeOffsetDateTime( + timestampCodec.decode(ubla.getLockTime())); + } + if (!from.getPublicAccessPrevention().isEmpty()) { + to.setPublicAccessPrevention(PublicAccessPrevention.parse(from.getPublicAccessPrevention())); + } + return to.build(); + } + + private Bucket.Lifecycle.Rule lifecycleRuleEncode(BucketInfo.LifecycleRule from) { + Bucket.Lifecycle.Rule.Builder to = Bucket.Lifecycle.Rule.newBuilder(); + to.setAction(ruleActionEncode(from.getAction())); + to.setCondition(ruleConditionEncode(from.getCondition())); + return to.build(); + } + + private Bucket.Lifecycle.Rule.Condition ruleConditionEncode( + BucketInfo.LifecycleRule.LifecycleCondition from) { + Bucket.Lifecycle.Rule.Condition.Builder to = Bucket.Lifecycle.Rule.Condition.newBuilder(); + if (from.getAge() != null) { + to.setAgeDays(from.getAge()); + } + if (from.getIsLive() != null) { + to.setIsLive(from.getIsLive()); + } + if (from.getNumberOfNewerVersions() != null) { + to.setNumNewerVersions(from.getNumberOfNewerVersions()); + } + if (from.getDaysSinceNoncurrentTime() != null) { + to.setDaysSinceNoncurrentTime(from.getDaysSinceNoncurrentTime()); + } + if (from.getDaysSinceCustomTime() != null) { + to.setDaysSinceCustomTime(from.getDaysSinceCustomTime()); + } + ifNonNull(from.getCreatedBeforeOffsetDateTime(), odtDateCodec::encode, to::setCreatedBefore); + ifNonNull( + from.getNoncurrentTimeBeforeOffsetDateTime(), + odtDateCodec::encode, + to::setNoncurrentTimeBefore); + ifNonNull( + from.getCustomTimeBeforeOffsetDateTime(), odtDateCodec::encode, to::setCustomTimeBefore); + ifNonNull( + from.getMatchesStorageClass(), + toImmutableListOf(StorageClass::toString), + to::addAllMatchesStorageClass); + ifNonNull(from.getMatchesPrefix(), to::addAllMatchesPrefix); + ifNonNull(from.getMatchesSuffix(), to::addAllMatchesSuffix); + return to.build(); + } + + private Bucket.Lifecycle.Rule.Action ruleActionEncode( + BucketInfo.LifecycleRule.LifecycleAction from) { + Bucket.Lifecycle.Rule.Action.Builder to = + Bucket.Lifecycle.Rule.Action.newBuilder().setType(from.getActionType()); + if (from.getActionType().equals(BucketInfo.LifecycleRule.SetStorageClassLifecycleAction.TYPE)) { + to.setStorageClass( + ((BucketInfo.LifecycleRule.SetStorageClassLifecycleAction) from) + .getStorageClass() + .toString()); + } + return to.build(); + } + + private BucketInfo.LifecycleRule lifecycleRuleDecode(Bucket.Lifecycle.Rule from) { + BucketInfo.LifecycleRule.LifecycleAction lifecycleAction; + + Bucket.Lifecycle.Rule.Action action = from.getAction(); + + switch (action.getType()) { + case BucketInfo.LifecycleRule.DeleteLifecycleAction.TYPE: + lifecycleAction = BucketInfo.LifecycleRule.LifecycleAction.newDeleteAction(); + break; + case BucketInfo.LifecycleRule.SetStorageClassLifecycleAction.TYPE: + lifecycleAction = + BucketInfo.LifecycleRule.LifecycleAction.newSetStorageClassAction( + StorageClass.valueOf(action.getStorageClass())); + break; + case AbortIncompleteMPUAction.TYPE: + lifecycleAction = + BucketInfo.LifecycleRule.LifecycleAction.newAbortIncompleteMPUploadAction(); + break; + default: + BucketInfo.log.warning( + "The lifecycle action " + + action.getType() + + " is not supported by this version of the library. " + + "Attempting to update with this rule may cause errors. Please " + + "update to the latest version of google-cloud-storage."); + lifecycleAction = + BucketInfo.LifecycleRule.LifecycleAction.newLifecycleAction("Unknown action"); + } + + Bucket.Lifecycle.Rule.Condition condition = from.getCondition(); + + BucketInfo.LifecycleRule.LifecycleCondition.Builder conditionBuilder = + BucketInfo.LifecycleRule.LifecycleCondition.newBuilder(); + if (condition.hasAgeDays()) { + conditionBuilder.setAge(condition.getAgeDays()); + } + if (condition.hasCreatedBefore()) { + conditionBuilder.setCreatedBeforeOffsetDateTime( + odtDateCodec.nullable().decode(condition.getCreatedBefore())); + } + if (condition.hasIsLive()) { + conditionBuilder.setIsLive(condition.getIsLive()); + } + if (condition.hasNumNewerVersions()) { + conditionBuilder.setNumberOfNewerVersions(condition.getNumNewerVersions()); + } + if (condition.hasDaysSinceNoncurrentTime()) { + conditionBuilder.setDaysSinceNoncurrentTime(condition.getDaysSinceNoncurrentTime()); + } + if (condition.hasNoncurrentTimeBefore()) { + conditionBuilder.setNoncurrentTimeBeforeOffsetDateTime( + odtDateCodec.decode(condition.getNoncurrentTimeBefore())); + } + if (condition.hasCustomTimeBefore()) { + conditionBuilder.setCustomTimeBeforeOffsetDateTime( + odtDateCodec.decode(condition.getCustomTimeBefore())); + } + if (condition.hasDaysSinceCustomTime()) { + conditionBuilder.setDaysSinceCustomTime(condition.getDaysSinceCustomTime()); + } + if (!condition.getMatchesStorageClassList().isEmpty()) { + ImmutableList collect = + condition.getMatchesStorageClassList().stream() + .map(StorageClass::valueOf) + .collect(ImmutableList.toImmutableList()); + conditionBuilder.setMatchesStorageClass(collect); + } + if (!condition.getMatchesPrefixList().isEmpty()) { + conditionBuilder.setMatchesPrefix(condition.getMatchesPrefixList()); + } + if (!condition.getMatchesSuffixList().isEmpty()) { + conditionBuilder.setMatchesSuffix(condition.getMatchesSuffixList()); + } + return new BucketInfo.LifecycleRule(lifecycleAction, conditionBuilder.build()); + } + + private com.google.storage.v2.CustomerEncryption customerEncryptionEncode( + CustomerEncryption from) { + return com.google.storage.v2.CustomerEncryption.newBuilder() + .setEncryptionAlgorithm(from.getEncryptionAlgorithm()) + .setKeySha256Bytes(ByteString.copyFrom(BaseEncoding.base64().decode(from.getKeySha256()))) + .build(); + } + + private CustomerEncryption customerEncryptionDecode( + com.google.storage.v2.CustomerEncryption from) { + return new CustomerEncryption( + from.getEncryptionAlgorithm(), + BaseEncoding.base64().encode(from.getKeySha256Bytes().toByteArray())); + } + + private Object blobIdEncode(BlobId from) { + Object.Builder to = Object.newBuilder(); + ifNonNull(from.getBucket(), bucketNameCodec::encode, to::setBucket); + ifNonNull(from.getName(), to::setName); + ifNonNull(from.getGeneration(), to::setGeneration); + return to.build(); + } + + private BlobId blobIdDecode(Object from) { + String bucketName = bucketNameCodec.decode(from.getBucket()); + return BlobId.of(bucketName, from.getName(), from.getGeneration()); + } + + private Object blobInfoEncode(BlobInfo from) { + Object.Builder toBuilder = Object.newBuilder(); + ifNonNull(from.getBucket(), bucketNameCodec::encode, toBuilder::setBucket); + ifNonNull(from.getName(), toBuilder::setName); + ifNonNull(from.getGeneration(), toBuilder::setGeneration); + ifNonNull(from.getCacheControl(), toBuilder::setCacheControl); + ifNonNull(from.getSize(), toBuilder::setSize); + ifNonNull(from.getContentType(), toBuilder::setContentType); + ifNonNull(from.getContentEncoding(), toBuilder::setContentEncoding); + ifNonNull(from.getContentDisposition(), toBuilder::setContentDisposition); + ifNonNull(from.getContentLanguage(), toBuilder::setContentLanguage); + ifNonNull(from.getComponentCount(), toBuilder::setComponentCount); + if (from.getMd5() != null || from.getCrc32c() != null) { + ObjectChecksums.Builder objectChecksums = ObjectChecksums.newBuilder(); + if (from.getMd5() != null) { + objectChecksums.setMd5Hash( + ByteString.copyFrom(BaseEncoding.base64().decode(from.getMd5()))); + } + if (from.getCrc32c() != null) { + objectChecksums.setCrc32C(Utils.crc32cCodec.decode(from.getCrc32c())); + } + toBuilder.setChecksums(objectChecksums.build()); + } + ifNonNull(from.getMetageneration(), toBuilder::setMetageneration); + ifNonNull(from.getDeleteTimeOffsetDateTime(), timestampCodec::encode, toBuilder::setDeleteTime); + ifNonNull(from.getUpdateTimeOffsetDateTime(), timestampCodec::encode, toBuilder::setUpdateTime); + ifNonNull(from.getCreateTimeOffsetDateTime(), timestampCodec::encode, toBuilder::setCreateTime); + ifNonNull(from.getCustomTimeOffsetDateTime(), timestampCodec::encode, toBuilder::setCustomTime); + ifNonNull(from.getSoftDeleteTime(), timestampCodec::encode, toBuilder::setSoftDeleteTime); + ifNonNull(from.getHardDeleteTime(), timestampCodec::encode, toBuilder::setHardDeleteTime); + ifNonNull( + from.getCustomerEncryption(), + customerEncryptionCodec::encode, + toBuilder::setCustomerEncryption); + ifNonNull(from.getStorageClass(), StorageClass::toString, toBuilder::setStorageClass); + ifNonNull( + from.getTimeStorageClassUpdatedOffsetDateTime(), + timestampCodec::encode, + toBuilder::setUpdateStorageClassTime); + ifNonNull(from.getKmsKeyName(), this::removeKmsVersion, toBuilder::setKmsKey); + ifNonNull(from.getEventBasedHold(), toBuilder::setEventBasedHold); + ifNonNull(from.getTemporaryHold(), toBuilder::setTemporaryHold); + ifNonNull( + from.getRetentionExpirationTimeOffsetDateTime(), + timestampCodec::encode, + toBuilder::setRetentionExpireTime); + // TODO(sydmunro): Add Selflink when available + ifNonNull(from.getEtag(), toBuilder::setEtag); + Entity entity = from.getOwner(); + if (entity != null) { + toBuilder.setOwner(Owner.newBuilder().setEntity(entityEncode(entity)).build()); + } + ifNonNull(from.getMetadata(), this::removeNullValues, toBuilder::putAllMetadata); + ifNonNull(from.getAcl(), toImmutableListOf(objectAcl()::encode), toBuilder::addAllAcl); + ifNonNull(from.getContexts(), objectContextsCodec::encode, toBuilder::setContexts); + return toBuilder.build(); + } + + private BlobInfo blobInfoDecode(Object from) { + BlobInfo.Builder toBuilder = + BlobInfo.newBuilder( + BlobId.of( + bucketNameCodec.decode(from.getBucket()), from.getName(), from.getGeneration())); + ifNonNull(from.getCacheControl(), toBuilder::setCacheControl); + ifNonNull(from.getSize(), toBuilder::setSize); + ifNonNull(from.getContentType(), toBuilder::setContentType); + ifNonNull(from.getContentEncoding(), toBuilder::setContentEncoding); + ifNonNull(from.getContentDisposition(), toBuilder::setContentDisposition); + ifNonNull(from.getContentLanguage(), toBuilder::setContentLanguage); + ifNonNull(from.getComponentCount(), toBuilder::setComponentCount); + if (from.hasChecksums()) { + ObjectChecksums checksums = from.getChecksums(); + if (checksums.hasCrc32C()) { + toBuilder.setCrc32c(Utils.crc32cCodec.encode(checksums.getCrc32C())); + } + ByteString md5Hash = checksums.getMd5Hash(); + if (!md5Hash.isEmpty()) { + toBuilder.setMd5(BaseEncoding.base64().encode(md5Hash.toByteArray())); + } + } + ifNonNull(from.getMetageneration(), toBuilder::setMetageneration); + if (from.hasDeleteTime()) { + toBuilder.setDeleteTimeOffsetDateTime(timestampCodec.decode(from.getDeleteTime())); + } + if (from.hasUpdateTime()) { + toBuilder.setUpdateTimeOffsetDateTime(timestampCodec.decode(from.getUpdateTime())); + } + if (from.hasCreateTime()) { + toBuilder.setCreateTimeOffsetDateTime(timestampCodec.decode(from.getCreateTime())); + } + if (from.hasCustomTime()) { + toBuilder.setCustomTimeOffsetDateTime(timestampCodec.decode(from.getCustomTime())); + } + if (from.hasCustomerEncryption()) { + toBuilder.setCustomerEncryption(customerEncryptionCodec.decode(from.getCustomerEncryption())); + } + if (from.hasSoftDeleteTime()) { + toBuilder.setSoftDeleteTime(timestampCodec.decode(from.getSoftDeleteTime())); + } + if (from.hasHardDeleteTime()) { + toBuilder.setHardDeleteTime(timestampCodec.decode(from.getHardDeleteTime())); + } + String storageClass = from.getStorageClass(); + if (!storageClass.isEmpty()) { + toBuilder.setStorageClass(StorageClass.valueOf(storageClass)); + } + if (from.hasUpdateStorageClassTime()) { + toBuilder.setTimeStorageClassUpdatedOffsetDateTime( + timestampCodec.decode(from.getUpdateStorageClassTime())); + } + if (!from.getKmsKey().isEmpty()) { + toBuilder.setKmsKeyName(from.getKmsKey()); + } + if (from.hasEventBasedHold()) { + toBuilder.setEventBasedHold(from.getEventBasedHold()); + } + toBuilder.setTemporaryHold(from.getTemporaryHold()); + if (from.hasRetentionExpireTime()) { + toBuilder.setRetentionExpirationTimeOffsetDateTime( + timestampCodec.decode(from.getRetentionExpireTime())); + } + if (!from.getMetadataMap().isEmpty()) { + toBuilder.setMetadata(from.getMetadataMap()); + } + if (from.hasOwner()) { + Owner owner = from.getOwner(); + if (!owner.getEntity().isEmpty()) { + toBuilder.setOwner(entityDecode(owner.getEntity())); + } + } + if (!from.getEtag().isEmpty()) { + toBuilder.setEtag(from.getEtag()); + } + ifNonNull(from.getAclList(), toImmutableListOf(objectAcl()::decode), toBuilder::setAcl); + if (from.hasContexts()) { + toBuilder.setContexts(objectContextsCodec.decode(from.getContexts())); + } + return toBuilder.build(); + } + + private com.google.iam.v1.Policy policyEncode(Policy from) { + com.google.iam.v1.Policy.Builder to = com.google.iam.v1.Policy.newBuilder(); + ifNonNull(from.getEtag(), byteStringB64StringCodec::decode, to::setEtag); + ifNonNull(from.getVersion(), to::setVersion); + from.getBindingsList().stream().map(bindingCodec::encode).forEach(to::addBindings); + return to.build(); + } + + private Policy policyDecode(com.google.iam.v1.Policy from) { + Policy.Builder to = Policy.newBuilder(); + ByteString etag = from.getEtag(); + if (!etag.isEmpty()) { + to.setEtag(byteStringB64StringCodec.encode(etag)); + } + to.setVersion(from.getVersion()); + List bindingsList = from.getBindingsList(); + if (!bindingsList.isEmpty()) { + ImmutableList bindings = + bindingsList.stream().map(bindingCodec::decode).collect(ImmutableList.toImmutableList()); + to.setBindings(bindings); + } + return to.build(); + } + + private com.google.iam.v1.Binding bindingEncode(Binding from) { + com.google.iam.v1.Binding.Builder to = com.google.iam.v1.Binding.newBuilder(); + ifNonNull(from.getRole(), to::setRole); + ImmutableList members = from.getMembers(); + if (!members.isEmpty()) { + to.addAllMembers(members); + } + ifNonNull(from.getCondition(), iamConditionCodec::encode, to::setCondition); + return to.build(); + } + + private Binding bindingDecode(com.google.iam.v1.Binding from) { + Binding.Builder to = Binding.newBuilder(); + String role = from.getRole(); + if (!role.isEmpty()) { + to.setRole(role); + } + ProtocolStringList membersList = from.getMembersList(); + if (!membersList.isEmpty()) { + to.setMembers(membersList); + } + if (from.hasCondition()) { + to.setCondition(iamConditionCodec.decode(from.getCondition())); + } + return to.build(); + } + + private Expr conditionEncode(Condition from) { + Expr.Builder to = Expr.newBuilder(); + ifNonNull(from.getExpression(), to::setExpression); + ifNonNull(from.getTitle(), to::setTitle); + ifNonNull(from.getDescription(), to::setDescription); + // apiary doesn't have a "location" field like grpc does + return to.build(); + } + + private Condition conditionDecode(Expr from) { + Condition.Builder to = Condition.newBuilder(); + String expression = from.getExpression(); + if (!expression.isEmpty()) { + to.setExpression(expression); + } + String title = from.getTitle(); + if (!title.isEmpty()) { + to.setTitle(title); + } + String description = from.getDescription(); + if (!description.isEmpty()) { + to.setDescription(description); + } + return to.build(); + } + + private Map removeNullValues(Map from) { + Map to = new HashMap<>(from); + to.values().removeAll(Collections.singleton(null)); + return to; + } + + private String removeKmsVersion(String from) { + PathTemplate versionedKmsTemplate = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{version}"); + if (versionedKmsTemplate.matches(from)) { + Map res = versionedKmsTemplate.match(from); + return CryptoKeyName.format( + res.get("project"), res.get("location"), res.get("key_ring"), res.get("crypto_key")); + } + return from; + } + + private Bucket.IpFilter ipFilterEncode(IpFilter from) { + Bucket.IpFilter.Builder to = Bucket.IpFilter.newBuilder(); + ifNonNull(from.getMode(), to::setMode); + ifNonNull( + from.getPublicNetworkSource(), + publicNetworkSourceCodec::encode, + to::setPublicNetworkSource); + ifNonNull( + from.getVpcNetworkSources(), + toImmutableListOf(vpcNetworkSourceCodec::encode), + to::addAllVpcNetworkSources); + ifNonNull(from.getAllowCrossOrgVpcs(), to::setAllowCrossOrgVpcs); + ifNonNull(from.getAllowAllServiceAgentAccess(), to::setAllowAllServiceAgentAccess); + return to.build(); + } + + private IpFilter ipFilterDecode(Bucket.IpFilter from) { + IpFilter.Builder to = IpFilter.newBuilder(); + if (!from.getMode().isEmpty()) { + to.setMode(from.getMode()); + } + ifNonNull( + from.getPublicNetworkSource(), + publicNetworkSourceCodec::decode, + to::setPublicNetworkSource); + ifNonNull( + from.getVpcNetworkSourcesList(), + toImmutableListOf(vpcNetworkSourceCodec::decode), + to::setVpcNetworkSources); + ifNonNull(from.getAllowCrossOrgVpcs(), to::setAllowCrossOrgVpcs); + if (from.hasAllowAllServiceAgentAccess()) { + to.setAllowAllServiceAgentAccess(from.getAllowAllServiceAgentAccess()); + } + return to.build(); + } + + private PublicNetworkSource publicNetworkSourceEncode(IpFilter.PublicNetworkSource from) { + PublicNetworkSource.Builder to = PublicNetworkSource.newBuilder(); + ifNonNull(from.getAllowedIpCidrRanges(), to::addAllAllowedIpCidrRanges); + return to.build(); + } + + private IpFilter.PublicNetworkSource publicNetworkSourceDecode(PublicNetworkSource from) { + return IpFilter.PublicNetworkSource.of(from.getAllowedIpCidrRangesList()); + } + + private VpcNetworkSource vpcNetworkSourceEncode(IpFilter.VpcNetworkSource from) { + VpcNetworkSource.Builder to = VpcNetworkSource.newBuilder(); + ifNonNull(from.getNetwork(), to::setNetwork); + ifNonNull(from.getAllowedIpCidrRanges(), to::addAllAllowedIpCidrRanges); + return to.build(); + } + + private IpFilter.VpcNetworkSource vpcNetworkSourceDecode(VpcNetworkSource from) { + IpFilter.VpcNetworkSource.Builder to = IpFilter.VpcNetworkSource.newBuilder(); + if (from.hasNetwork()) { + to.setNetwork(from.getNetwork()); + } + if (!from.getAllowedIpCidrRangesList().isEmpty()) { + to.setAllowedIpCidrRanges(from.getAllowedIpCidrRangesList()); + } + return to.build(); + } + + private ObjectContexts objectContextsEncode(BlobInfo.ObjectContexts from) { + if (from == null) { + return null; + } + ObjectContexts.Builder to = ObjectContexts.newBuilder(); + if (from.getCustom() != null) { + to.putAllCustom( + Maps.transformValues( + Maps.filterValues(from.getCustom(), Objects::nonNull), + customContextPayloadCodec::encode)); + } + return to.build(); + } + + private BlobInfo.ObjectContexts objectContextsDecode(ObjectContexts from) { + return BlobInfo.ObjectContexts.newBuilder() + .setCustom(Maps.transformValues(from.getCustomMap(), customContextPayloadCodec::decode)) + .build(); + } + + private ObjectCustomContextPayload objectCustomContextPayloadEncode( + BlobInfo.ObjectCustomContextPayload from) { + ObjectCustomContextPayload.Builder to = ObjectCustomContextPayload.newBuilder(); + ifNonNull(from.getValue(), to::setValue); + ifNonNull(from.getCreateTime(), timestampCodec::encode, to::setCreateTime); + ifNonNull(from.getUpdateTime(), timestampCodec::encode, to::setUpdateTime); + return to.build(); + } + + private BlobInfo.ObjectCustomContextPayload objectCustomContextPayloadDecode( + ObjectCustomContextPayload from) { + BlobInfo.ObjectCustomContextPayload.Builder to = + BlobInfo.ObjectCustomContextPayload.newBuilder(); + to.setValue(from.getValue()); + + if (from.hasCreateTime()) { + to.setCreateTime(timestampCodec.decode(from.getCreateTime())); + } + if (from.hasUpdateTime()) { + to.setUpdateTime(timestampCodec.decode(from.getUpdateTime())); + } + return to.build(); + } + + /** + * Several properties are translating lists of one type to another. This convenience method allows + * specifying a mapping function and composing as part of an {@code #isNonNull} definition. + */ + private static Function, ImmutableList> toImmutableListOf( + Function f) { + return l -> l.stream().map(f).collect(ImmutableList.toImmutableList()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcResumableSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcResumableSession.java new file mode 100644 index 000000000000..83664d95719f --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcResumableSession.java @@ -0,0 +1,118 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFutures; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.storage.v2.Object; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.util.concurrent.atomic.AtomicBoolean; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GrpcResumableSession { + + private final RetrierWithAlg retrier; + private final ClientStreamingCallable writeCallable; + private final UnaryCallable + queryWriteStatusCallable; + private final ResumableWrite resumableWrite; + private final Hasher hasher; + + GrpcResumableSession( + RetrierWithAlg retrier, + ClientStreamingCallable writeCallable, + UnaryCallable queryWriteStatusCallable, + ResumableWrite resumableWrite, + Hasher hasher) { + this.retrier = retrier; + this.writeCallable = writeCallable; + this.queryWriteStatusCallable = queryWriteStatusCallable; + this.resumableWrite = resumableWrite; + this.hasher = hasher; + } + + ResumableOperationResult<@Nullable Object> query() { + QueryWriteStatusRequest.Builder b = + QueryWriteStatusRequest.newBuilder().setUploadId(resumableWrite.getRes().getUploadId()); + if (resumableWrite.getReq().hasCommonObjectRequestParams()) { + b.setCommonObjectRequestParams(resumableWrite.getReq().getCommonObjectRequestParams()); + } + QueryWriteStatusRequest req = b.build(); + try { + QueryWriteStatusResponse response = queryWriteStatusCallable.call(req); + if (response.hasResource()) { + return ResumableOperationResult.complete( + response.getResource(), response.getResource().getSize()); + } else { + return ResumableOperationResult.incremental(response.getPersistedSize()); + } + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + ResumableOperationResult<@Nullable Object> put(RewindableContent content) { + AtomicBoolean dirty = new AtomicBoolean(false); + GrpcCallContext retryingCallContext = Retrying.newCallContext(); + BufferHandle handle = BufferHandle.allocate(ByteSizeConstants._2MiB); + + return retrier.run( + () -> { + if (dirty.getAndSet(true)) { + ResumableOperationResult<@Nullable Object> query = query(); + if (query.getObject() != null) { + return query; + } else { + handle.get().clear(); + content.rewindTo(query.getPersistedSize()); + } + } + WritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(writeCallable.withDefaultCallContext(retryingCallContext)) + .setByteStringStrategy(ByteStringStrategy.copy()) + .setHasher(hasher) + .resumable() + .setFsyncEvery(false) + .buffered(handle) + .setStartAsync(ApiFutures.immediateFuture(resumableWrite)) + .build(); + + try (BufferedWritableByteChannel channel = session.open()) { + content.writeTo(channel); + } + + WriteObjectResponse response = session.getResult().get(); + if (response.hasResource()) { + return ResumableOperationResult.complete( + response.getResource(), response.getResource().getSize()); + } else { + return ResumableOperationResult.incremental(response.getPersistedSize()); + } + }, + Decoder.identity()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java new file mode 100644 index 000000000000..ef125fa4e22e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java @@ -0,0 +1,200 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.common.base.MoreObjects; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import java.io.Serializable; +import java.util.Objects; + +final class GrpcRetryAlgorithmManager implements Serializable { + + private static final long serialVersionUID = 3084833873820431477L; + final StorageRetryStrategy retryStrategy; + + GrpcRetryAlgorithmManager(StorageRetryStrategy retryStrategy) { + this.retryStrategy = retryStrategy; + } + + /** + * Some operations are inherently idempotent after they're started (Resumable uploads, rewrites) + * provide access to the idempotent {@link ResultRetryAlgorithm} for those uses. + */ + ResultRetryAlgorithm idempotent() { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(ComposeObjectRequest req) { + return req.hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(CreateBucketRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(DeleteBucketRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(DeleteObjectRequest req) { + if (req.getGeneration() > 0 || req.hasIfGenerationMatch()) { + return retryStrategy.getIdempotentHandler(); + } + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(GetBucketRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(GetIamPolicyRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(GetObjectRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(RestoreObjectRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(ListBucketsRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(ListObjectsRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(LockBucketRetentionPolicyRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(QueryWriteStatusRequest req) { + // unique upload Id, always idempotent + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(ReadObjectRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(RewriteObjectRequest req) { + return req.hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(MoveObjectRequest req) { + return req.hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(SetIamPolicyRequest req) { + if (req.getPolicy().getEtag().equals(ByteString.empty())) { + return retryStrategy.getNonidempotentHandler(); + } else { + return retryStrategy.getIdempotentHandler(); + } + } + + public ResultRetryAlgorithm getFor(StartResumableWriteRequest req) { + return req.getWriteObjectSpec().hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(TestIamPermissionsRequest req) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getFor(UpdateBucketRequest req) { + // TODO: account for acl "patch" + // TODO: etag + return req.hasIfMetagenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(UpdateObjectRequest req) { + // TODO: account for acl "patch" + return req.hasIfMetagenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(WriteObjectRequest req) { + return req.getWriteObjectSpec().hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getFor(BidiWriteObjectRequest req) { + return req.getWriteObjectSpec().hasIfGenerationMatch() + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GrpcRetryAlgorithmManager)) { + return false; + } + GrpcRetryAlgorithmManager that = (GrpcRetryAlgorithmManager) o; + return Objects.equals(retryStrategy, that.retryStrategy); + } + + @Override + public int hashCode() { + return Objects.hashCode(retryStrategy); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("retryStrategy", retryStrategy).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java new file mode 100644 index 000000000000..b536340e9c23 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java @@ -0,0 +1,2149 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._16MiB; +import static com.google.cloud.storage.ByteSizeConstants._1MiB; +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.CrossTransportUtils.fmtMethodName; +import static com.google.cloud.storage.CrossTransportUtils.throwHttpJsonOnly; +import static com.google.cloud.storage.StorageV2ProtoUtils.bucketAclEntityOrAltEq; +import static com.google.cloud.storage.StorageV2ProtoUtils.objectAclEntityOrAltEq; +import static com.google.cloud.storage.Utils.bucketNameCodec; +import static com.google.cloud.storage.Utils.ifNonNull; +import static com.google.cloud.storage.Utils.projectNameCodec; +import static com.google.common.base.MoreObjects.firstNonNull; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.NotFoundException; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.BaseService; +import com.google.cloud.Policy; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BlobWriteSessionConfig.WriterFactory; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.Storage.ComposeRequest.SourceBlob; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.UnifiedOpts.BucketListOpt; +import com.google.cloud.storage.UnifiedOpts.BucketSourceOpt; +import com.google.cloud.storage.UnifiedOpts.BucketTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Fields; +import com.google.cloud.storage.UnifiedOpts.Mapper; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.cloud.storage.UnifiedOpts.ObjectListOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.UnifiedOpts.ProjectId; +import com.google.cloud.storage.UnifiedOpts.UserProject; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Streams; +import com.google.common.io.ByteStreams; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BucketAccessControl; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.ComposeObjectRequest.SourceObject; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.ListObjectsResponse; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.RewriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import com.google.storage.v2.WriteObjectSpec; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.Spliterator; +import java.util.Spliterators.AbstractSpliterator; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GrpcStorageImpl extends BaseService + implements Storage, StorageInternal { + + private static final byte[] ZERO_BYTES = new byte[0]; + private static final Set READ_OPS = ImmutableSet.of(StandardOpenOption.READ); + private static final Set WRITE_OPS = + ImmutableSet.of( + StandardOpenOption.WRITE, + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING); + private static final BucketSourceOption[] EMPTY_BUCKET_SOURCE_OPTIONS = new BucketSourceOption[0]; + + private static final Opts ALL_BLOB_FIELDS = + Opts.from(UnifiedOpts.fields(ImmutableSet.copyOf(BlobField.values()))); + private static final Opts ALL_BUCKET_FIELDS = + // todo: b/308194853 + Opts.from( + UnifiedOpts.fields( + Arrays.stream(BucketField.values()) + .filter(f -> !f.equals(BucketField.OBJECT_RETENTION)) + .collect(ImmutableSet.toImmutableSet()))); + + final StorageClient storageClient; + final StorageDataClient storageDataClient; + final ResponseContentLifecycleManager responseContentLifecycleManager; + final WriterFactory writerFactory; + final GrpcConversions codecs; + final GrpcRetryAlgorithmManager retryAlgorithmManager; + final SyntaxDecoders syntaxDecoders; + final Retrier retrier; + + // workaround for https://github.com/googleapis/java-storage/issues/1736 + private final Opts defaultOpts; + @Deprecated private final Supplier defaultProjectId; + + GrpcStorageImpl( + GrpcStorageOptions options, + StorageClient storageClient, + StorageDataClient storageDataClient, + ResponseContentLifecycleManager responseContentLifecycleManager, + WriterFactory writerFactory, + Retrier retrier, + Opts defaultOpts) { + super(options); + this.storageClient = storageClient; + this.storageDataClient = storageDataClient; + this.responseContentLifecycleManager = responseContentLifecycleManager; + this.writerFactory = writerFactory; + this.retrier = retrier; + this.defaultOpts = defaultOpts; + this.codecs = Conversions.grpc(); + this.retryAlgorithmManager = options.getRetryAlgorithmManager(); + this.syntaxDecoders = new SyntaxDecoders(); + this.defaultProjectId = Suppliers.memoize(() -> UnifiedOpts.projectId(options.getProjectId())); + } + + @Override + public void close() throws Exception { + try (StorageClient s = storageClient; + StorageDataClient ignore = storageDataClient) { + s.shutdownNow(); + java.time.Duration terminationAwaitDuration = getOptions().getTerminationAwaitDuration(); + s.awaitTermination(terminationAwaitDuration.toMillis(), TimeUnit.MILLISECONDS); + } + } + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(bucketInfo).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + com.google.storage.v2.Bucket bucket = codecs.bucketInfo().encode(bucketInfo); + CreateBucketRequest.Builder builder = + CreateBucketRequest.newBuilder() + .setBucket(bucket) + .setBucketId(bucketInfo.getName()) + .setParent("projects/_"); + if (bucketInfo.getProject() == null) { + builder.getBucketBuilder().setProject(projectNameCodec.encode(getOptions().getProjectId())); + } + CreateBucketRequest req = opts.createBucketsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.createBucketCallable().call(req, merge), + syntaxDecoders.bucket); + } + + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + return create(blobInfo, null, options); + } + + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + content = firstNonNull(content, ZERO_BYTES); + return create(blobInfo, content, 0, content.length, options); + } + + @Override + public Blob create( + BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + return internalDirectUpload(blobInfo, opts, ByteBuffer.wrap(content, offset, length)) + .asBlob(this); + } + + @Override + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + try { + requireNonNull(blobInfo, "blobInfo must be non null"); + InputStream inputStreamParam = firstNonNull(content, new ByteArrayInputStream(ZERO_BYTES)); + + Opts optsWithDefaults = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + optsWithDefaults.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(blobInfo, optsWithDefaults); + Hasher hasher = optsWithDefaults.getHasher(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + UnbufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(storageClient.writeObjectCallable().withDefaultCallContext(merge)) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .setHasher(hasher) + .direct() + .unbuffered() + .setRequest(req) + .build(); + + try (UnbufferedWritableByteChannel c = session.open()) { + ByteStreams.copy(Channels.newChannel(inputStreamParam), c); + } + ApiFuture responseApiFuture = session.getResult(); + return this.getBlob(responseApiFuture); + } catch (IOException | ApiException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, BlobWriteOption... options) + throws IOException { + return createFrom(blobInfo, path, _16MiB, options); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, int bufferSize, BlobWriteOption... options) + throws IOException { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo).prepend(defaultOpts); + return internalCreateFrom(path, blobInfo, opts); + } + + @Override + public Blob internalCreateFrom(Path path, BlobInfo info, Opts opts) + throws IOException { + requireNonNull(path, "path must be non null"); + if (Files.isDirectory(path)) { + throw new StorageException(0, path + " is a directory"); + } + + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(info, opts); + + ClientStreamingCallable write = + storageClient.writeObjectCallable().withDefaultCallContext(grpcCallContext); + + ApiFuture start = startResumableWrite(grpcCallContext, req, opts); + ApiFuture session2 = + ApiFutures.transform( + start, + rw -> + ResumableSession.grpc( + retrier.withAlg(retryAlgorithmManager.idempotent()), + write, + storageClient.queryWriteStatusCallable(), + rw, + opts.getHasher()), + MoreExecutors.directExecutor()); + try { + GrpcResumableSession got = session2.get(); + ResumableOperationResult<@Nullable Object> put = got.put(RewindableContent.of(path)); + Object object = put.getObject(); + if (object == null) { + // if by some odd chance the put didn't get the Object, query for it + ResumableOperationResult<@Nullable Object> query = got.query(); + object = query.getObject(); + } + return codecs.blobInfo().decode(object).asBlob(this); + } catch (InterruptedException | ExecutionException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Blob createFrom(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) + throws IOException { + return createFrom(blobInfo, content, _16MiB, options); + } + + @Override + public Blob createFrom( + BlobInfo blobInfo, InputStream in, int bufferSize, BlobWriteOption... options) + throws IOException { + requireNonNull(blobInfo, "blobInfo must be non null"); + + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(blobInfo, opts); + + ApiFuture start = startResumableWrite(grpcCallContext, req, opts); + + BufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel( + storageClient.writeObjectCallable().withDefaultCallContext(grpcCallContext)) + .setHasher(opts.getHasher()) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .resumable() + .withRetryConfig(retrier.withAlg(retryAlgorithmManager.idempotent())) + .buffered(Buffers.allocateAligned(bufferSize, _256KiB)) + .setStartAsync(start) + .build(); + + // Specifically not in the try-with, so we don't close the provided stream + ReadableByteChannel src = + Channels.newChannel(firstNonNull(in, new ByteArrayInputStream(ZERO_BYTES))); + try (BufferedWritableByteChannel dst = session.open()) { + ByteStreams.copy(src, dst); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + return getBlob(session.getResult()); + } + + @Override + public Bucket get(String bucket, BucketGetOption... options) { + Opts unwrap = Opts.unwrap(options); + return internalBucketGet(bucket, unwrap); + } + + @Override + public Bucket lockRetentionPolicy(BucketInfo bucket, BucketTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(bucket).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + LockBucketRetentionPolicyRequest.Builder builder = + LockBucketRetentionPolicyRequest.newBuilder() + .setBucket(bucketNameCodec.encode(bucket.getName())); + LockBucketRetentionPolicyRequest req = + opts.lockBucketRetentionPolicyRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.lockBucketRetentionPolicyCallable().call(req, merge), + syntaxDecoders.bucket); + } + + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + return get(BlobId.of(bucket, blob), options); + } + + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + Opts unwrap = Opts.unwrap(options); + return internalBlobGet(blob, unwrap); + } + + @Override + public Blob get(BlobId blob) { + return get(blob, new BlobGetOption[0]); + } + + @Override + public Blob restore(BlobId blob, BlobRestoreOption... options) { + Opts unwrap = Opts.unwrap(options); + return internalObjectRestore(blob, unwrap); + } + + private Blob internalObjectRestore(BlobId blobId, Opts opts) { + Opts finalOpts = opts.prepend(defaultOpts).prepend(ALL_BLOB_FIELDS); + GrpcCallContext grpcCallContext = + finalOpts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + RestoreObjectRequest.Builder builder = + RestoreObjectRequest.newBuilder() + .setBucket(bucketNameCodec.encode(blobId.getBucket())) + .setObject(blobId.getName()); + ifNonNull(blobId.getGeneration(), builder::setGeneration); + RestoreObjectRequest req = finalOpts.restoreObjectRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.restoreObjectCallable().call(req, merge), + resp -> { + BlobInfo tmp = codecs.blobInfo().decode(resp); + return finalOpts.clearBlobFields().decode(tmp).asBlob(this); + }); + } + + @Override + public Page list(BucketListOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts).prepend(ALL_BUCKET_FIELDS); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + + ListBucketsRequest request = + defaultProjectId + .get() + .listBuckets() + .andThen(opts.listBucketsRequest()) + .apply(ListBucketsRequest.newBuilder()) + .build(); + + if (!request.getReturnPartialSuccess()) { + try { + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(request), + () -> storageClient.listBucketsPagedCallable().call(request, merge), + resp -> + new TransformingPageDecorator<>( + resp.getPage(), + syntaxDecoders.bucket.andThen(opts.clearBucketFields()), + retrier, + retryAlgorithmManager.getFor(request))); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } else { + try { + com.google.storage.v2.ListBucketsResponse response = listBuckets(grpcCallContext, request); + return new ListBucketsWithPartialSuccessPage(grpcCallContext, request, response, opts); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + } + + @Override + public Page list(String bucket, BlobListOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts).prepend(ALL_BLOB_FIELDS); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + ListObjectsRequest.Builder builder = + ListObjectsRequest.newBuilder().setParent(bucketNameCodec.encode(bucket)); + ListObjectsRequest req = opts.listObjectsRequest().apply(builder).build(); + try { + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.listObjectsCallable().call(req, merge), + resp -> new ListObjectsWithSyntheticDirectoriesPage(grpcCallContext, req, resp)); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + Opts unwrap = Opts.unwrap(options); + if (bucketInfo.getModifiedFields().isEmpty()) { + return internalBucketGet(bucketInfo.getName(), unwrap.constrainTo(BucketSourceOpt.class)); + } + Opts opts = unwrap.resolveFrom(bucketInfo).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + com.google.storage.v2.Bucket bucket = codecs.bucketInfo().encode(bucketInfo); + UpdateBucketRequest.Builder builder = + opts.updateBucketsRequest().apply(UpdateBucketRequest.newBuilder().setBucket(bucket)); + builder + .getUpdateMaskBuilder() + .addAllPaths( + bucketInfo.getModifiedFields().stream() + .map(NamedField::getGrpcName) + .collect(ImmutableList.toImmutableList())); + UpdateBucketRequest req = builder.build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.updateBucketCallable().call(req, merge), + syntaxDecoders.bucket); + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + Opts unwrap = Opts.unwrap(options); + if (blobInfo.getModifiedFields().isEmpty()) { + return internalBlobGet(blobInfo.getBlobId(), unwrap.constrainTo(ObjectSourceOpt.class)); + } + Opts opts = unwrap.resolveFrom(blobInfo).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + Object object = codecs.blobInfo().encode(blobInfo); + UpdateObjectRequest.Builder builder = + opts.updateObjectsRequest().apply(UpdateObjectRequest.newBuilder().setObject(object)); + builder + .getUpdateMaskBuilder() + .addAllPaths( + blobInfo.getModifiedFields().stream() + .map(NamedField::getGrpcName) + .collect(ImmutableList.toImmutableList())); + UpdateObjectRequest req = builder.build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.updateObjectCallable().call(req, merge), + syntaxDecoders.blob); + } + + @Override + public Blob update(BlobInfo blobInfo) { + return update(blobInfo, new BlobTargetOption[0]); + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + DeleteBucketRequest.Builder builder = + DeleteBucketRequest.newBuilder().setName(bucketNameCodec.encode(bucket)); + DeleteBucketRequest req = opts.deleteBucketsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return Boolean.TRUE.equals( + retrier.run( + retryAlgorithmManager.getFor(req), + () -> { + try { + storageClient.deleteBucketCallable().call(req, merge); + return true; + } catch (NotFoundException e) { + return false; + } + }, + Decoder.identity())); + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + return delete(BlobId.of(bucket, blob), options); + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + Opts opts = Opts.unwrap(options); + try { + internalObjectDelete(blob, opts); + return true; + } catch (NotFoundException e) { + return false; + } catch (StorageException e) { + if (e.getCode() == 404) { + return false; + } + throw e; + } + } + + @Override + public boolean delete(BlobId blob) { + return delete(blob, new BlobSourceOption[0]); + } + + @Override + public Void internalObjectDelete(BlobId id, Opts opts) { + Opts finalOpts = opts.resolveFrom(id).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + finalOpts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + DeleteObjectRequest.Builder builder = + DeleteObjectRequest.newBuilder() + .setBucket(bucketNameCodec.encode(id.getBucket())) + .setObject(id.getName()); + ifNonNull(id.getGeneration(), builder::setGeneration); + DeleteObjectRequest req = finalOpts.deleteObjectsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> { + storageClient.deleteObjectCallable().call(req, merge); + return null; + }, + Decoder.identity()); + } + + @Override + public Blob compose(ComposeRequest composeRequest) { + Opts opts = composeRequest.getTargetOpts().prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + ComposeObjectRequest.Builder builder = ComposeObjectRequest.newBuilder(); + composeRequest.getSourceBlobs().stream() + .map(src -> sourceObjectEncode(src)) + .forEach(builder::addSourceObjects); + final Object target = codecs.blobInfo().encode(composeRequest.getTarget()); + builder.setDestination(target); + ComposeObjectRequest req = opts.composeObjectsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.composeObjectCallable().call(req, merge), + syntaxDecoders.blob); + } + + @Override + public CopyWriter copy(CopyRequest copyRequest) { + BlobId src = copyRequest.getSource(); + BlobInfo dst = copyRequest.getTarget(); + Opts srcOpts = + Opts.unwrap(copyRequest.getSourceOptions()) + .projectAsSource() + .resolveFrom(src) + .prepend(defaultOpts); + Opts dstOpts = + Opts.unwrap(copyRequest.getTargetOptions()).resolveFrom(dst).prepend(defaultOpts); + + Mapper requestBuilderMapper = + srcOpts.rewriteObjectsRequest().andThen(dstOpts.rewriteObjectsRequest()); + Mapper grpcCallContextMapper = + srcOpts.grpcMetadataMapper().andThen(dstOpts.grpcMetadataMapper()); + + Object srcProto = codecs.blobId().encode(src); + Object dstProto = codecs.blobInfo().encode(dst); + + RewriteObjectRequest.Builder b = + RewriteObjectRequest.newBuilder() + .setDestinationName(dstProto.getName()) + .setDestinationBucket(dstProto.getBucket()) + .setSourceBucket(srcProto.getBucket()) + .setSourceObject(srcProto.getName()); + + // according to the docs in the protos, it is illegal to populate the following fields, + // clear them out if they are set + // * destination_kms_key comes from dstOpts + // * destination_predefined_acl comes from dstOpts + // * if_*_match come from srcOpts and dstOpts + // * copy_source_encryption_* come from srcOpts + // * common_object_request_params come from dstOpts + Object cleanedDst = dstProto.toBuilder().clearName().clearBucket().clearKmsKey().build(); + // only set the destination if it is not equal to the default instance + // otherwise we will clobber default values populated in the gcs server side for the object + // metadata + if (!cleanedDst.equals(Object.getDefaultInstance())) { + b.setDestination(cleanedDst); + } + + if (src.getGeneration() != null) { + b.setSourceGeneration(src.getGeneration()); + } + + if (copyRequest.getMegabytesCopiedPerChunk() != null) { + b.setMaxBytesRewrittenPerCall(copyRequest.getMegabytesCopiedPerChunk() * _1MiB); + } + + RewriteObjectRequest req = requestBuilderMapper.apply(b).build(); + GrpcCallContext grpcCallContext = grpcCallContextMapper.apply(GrpcCallContext.createDefault()); + UnaryCallable callable = + storageClient.rewriteObjectCallable().withDefaultCallContext(grpcCallContext); + GrpcCallContext retryContext = Retrying.newCallContext(); + RetrierWithAlg retrierWithAlg = retrier.withAlg(retryAlgorithmManager.idempotent()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> callable.call(req, retryContext), + (resp) -> new GapicCopyWriter(this, callable, retrierWithAlg, req, resp)); + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + return readAllBytes(BlobId.of(bucket, blob), options); + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + UnbufferedReadableByteChannelSession session = unbufferedReadSession(blob, options); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (UnbufferedReadableByteChannel r = session.open(); + WritableByteChannel w = Channels.newChannel(baos)) { + ByteStreams.copy(r, w); + } catch (ApiException | IOException e) { + throw StorageException.coalesce(e); + } + return baos.toByteArray(); + } + + @Override + public StorageBatch batch() { + return throwHttpJsonOnly("batch()"); + } + + @Override + public GrpcBlobReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + return reader(BlobId.of(bucket, blob), options); + } + + @Override + public GrpcBlobReadChannel reader(BlobId blob, BlobSourceOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blob).prepend(defaultOpts); + ReadObjectRequest request = getReadObjectRequest(blob, opts); + GrpcCallContext grpcCallContext = opts.grpcMetadataMapper().apply(Retrying.newCallContext()); + + return new GrpcBlobReadChannel( + readObjectCallable(grpcCallContext), + retrier, + retryAlgorithmManager.getFor(request), + request, + !opts.autoGzipDecompression()); + } + + @Override + public void downloadTo(BlobId blob, Path path, BlobSourceOption... options) { + + UnbufferedReadableByteChannelSession session = unbufferedReadSession(blob, options); + + try (UnbufferedReadableByteChannel r = session.open(); + WritableByteChannel w = Files.newByteChannel(path, WRITE_OPS)) { + ByteStreams.copy(r, w); + } catch (ApiException | IOException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public void downloadTo(BlobId blob, OutputStream outputStream, BlobSourceOption... options) { + + UnbufferedReadableByteChannelSession session = unbufferedReadSession(blob, options); + + try (UnbufferedReadableByteChannel r = session.open(); + WritableByteChannel w = Channels.newChannel(outputStream)) { + ByteStreams.copy(r, w); + } catch (ApiException | IOException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public GrpcBlobWriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(blobInfo, opts); + Hasher hasher = opts.getHasher(); + // in JSON, the starting of the resumable session happens before the invocation of write can + // happen. Emulate the same thing here. + // 1. create the future + ApiFuture startResumableWrite = startResumableWrite(grpcCallContext, req, opts); + // 2. await the result of the future + ResumableWrite resumableWrite = ApiFutureUtils.await(startResumableWrite); + // 3. wrap the result in another future container before constructing the BlobWriteChannel + ApiFuture wrapped = ApiFutures.immediateFuture(resumableWrite); + return new GrpcBlobWriteChannel( + storageClient.writeObjectCallable().withDefaultCallContext(grpcCallContext), + retrier.withAlg(retryAlgorithmManager.idempotent()), + () -> wrapped, + hasher); + } + + @Override + public BlobInfo internalDirectUpload( + BlobInfo blobInfo, Opts opts, ByteBuffer buf) { + requireNonNull(blobInfo, "blobInfo must be non null"); + requireNonNull(buf, "content must be non null"); + Opts optsWithDefaults = opts.prepend(defaultOpts); + GrpcCallContext grpcCallContext = + optsWithDefaults.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(blobInfo, optsWithDefaults); + Hasher hasher = opts.getHasher(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + RewindableContent content = RewindableContent.of(buf); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> { + content.rewindTo(0); + UnbufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(storageClient.writeObjectCallable().withDefaultCallContext(merge)) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .setHasher(hasher) + .direct() + .unbuffered() + .setRequest(req) + .build(); + + try (UnbufferedWritableByteChannel c = session.open()) { + content.writeTo(c); + } + return session.getResult(); + }, + this::getBlob); + } + + @Override + public WriteChannel writer(URL signedURL) { + return throwHttpJsonOnly(fmtMethodName("writer", URL.class)); + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + return throwHttpJsonOnly( + fmtMethodName("signUrl", BlobInfo.class, long.class, TimeUnit.class, SignUrlOption.class)); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + return throwHttpJsonOnly( + fmtMethodName( + "generateSignedPostPolicyV4", + BlobInfo.class, + long.class, + TimeUnit.class, + PostFieldsV4.class, + PostConditionsV4.class, + PostPolicyV4Option.class)); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostPolicyV4Option... options) { + return throwHttpJsonOnly( + fmtMethodName( + "generateSignedPostPolicyV4", + BlobInfo.class, + long.class, + TimeUnit.class, + PostFieldsV4.class, + PostPolicyV4Option.class)); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + return throwHttpJsonOnly( + fmtMethodName( + "generateSignedPostPolicyV4", + BlobInfo.class, + long.class, + TimeUnit.class, + PostConditionsV4.class, + PostPolicyV4Option.class)); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, long duration, TimeUnit unit, PostPolicyV4Option... options) { + return throwHttpJsonOnly( + fmtMethodName( + "generateSignedPostPolicyV4", + BlobInfo.class, + long.class, + TimeUnit.class, + PostPolicyV4Option.class)); + } + + @Override + public List get(BlobId... blobIds) { + return throwHttpJsonOnly(fmtMethodName("get", BlobId[].class)); + } + + @Override + public List get(Iterable blobIds) { + return throwHttpJsonOnly(fmtMethodName("get", Iterable.class)); + } + + @Override + public List update(BlobInfo... blobInfos) { + return throwHttpJsonOnly(fmtMethodName("update", BlobInfo[].class)); + } + + @Override + public List update(Iterable blobInfos) { + return throwHttpJsonOnly(fmtMethodName("update", Iterable.class)); + } + + @Override + public List delete(BlobId... blobIds) { + return throwHttpJsonOnly(fmtMethodName("delete", BlobId[].class)); + } + + @Override + public List delete(Iterable blobIds) { + return throwHttpJsonOnly(fmtMethodName("delete", Iterable.class)); + } + + @Override + public Acl getAcl(String bucket, Entity entity, BucketSourceOption... options) { + try { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + com.google.storage.v2.Bucket resp = getBucketWithAcls(bucket, opts); + + Predicate entityPredicate = + bucketAclEntityOrAltEq(codecs.entity().encode(entity)); + + Optional first = + resp.getAclList().stream().filter(entityPredicate).findFirst(); + + // HttpStorageRpc defaults to null if Not Found + return first.map(codecs.bucketAcl()::decode).orElse(null); + } catch (NotFoundException e) { + return null; + } catch (StorageException se) { + if (se.getCode() == 404) { + return null; + } else { + throw se; + } + } + } + + @Override + public Acl getAcl(String bucket, Entity entity) { + return getAcl(bucket, entity, EMPTY_BUCKET_SOURCE_OPTIONS); + } + + @Override + public boolean deleteAcl(String bucket, Entity entity, BucketSourceOption... options) { + try { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + com.google.storage.v2.Bucket resp = getBucketWithAcls(bucket, opts); + String encode = codecs.entity().encode(entity); + + Predicate entityPredicate = bucketAclEntityOrAltEq(encode); + + List currentAcls = resp.getAclList(); + ImmutableList newAcls = + currentAcls.stream() + .filter(entityPredicate.negate()) + .collect(ImmutableList.toImmutableList()); + if (newAcls.equals(currentAcls)) { + // we didn't actually filter anything out, no need to send an RPC, simply return false + return false; + } + long metageneration = resp.getMetageneration(); + + UpdateBucketRequest req = createUpdateBucketAclRequest(bucket, newAcls, metageneration); + + com.google.storage.v2.Bucket updateResult = updateBucket(req); + // read the response to ensure there is no longer an acl for the specified entity + Optional first = + updateResult.getAclList().stream().filter(entityPredicate).findFirst(); + return !first.isPresent(); + } catch (NotFoundException e) { + // HttpStorageRpc returns false if the bucket doesn't exist :( + return false; + } catch (StorageException se) { + if (se.getCode() == 404) { + return false; + } else { + throw se; + } + } + } + + @Override + public boolean deleteAcl(String bucket, Entity entity) { + return deleteAcl(bucket, entity, EMPTY_BUCKET_SOURCE_OPTIONS); + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + return updateAcl(bucket, acl, options); + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + return createAcl(bucket, acl, EMPTY_BUCKET_SOURCE_OPTIONS); + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + try { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + com.google.storage.v2.Bucket resp = getBucketWithAcls(bucket, opts); + BucketAccessControl encode = codecs.bucketAcl().encode(acl); + String entity = encode.getEntity(); + + Predicate entityPredicate = bucketAclEntityOrAltEq(entity); + + ImmutableList newDefaultAcls = + Streams.concat( + resp.getAclList().stream().filter(entityPredicate.negate()), Stream.of(encode)) + .collect(ImmutableList.toImmutableList()); + + UpdateBucketRequest req = + createUpdateBucketAclRequest(bucket, newDefaultAcls, resp.getMetageneration()); + + com.google.storage.v2.Bucket updateResult = updateBucket(req); + + Optional first = + updateResult.getAclList().stream() + .filter(entityPredicate) + .findFirst() + .map(codecs.bucketAcl()::decode); + + return first.orElseThrow( + () -> new StorageException(0, "Acl update call success, but not in response")); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + return updateAcl(bucket, acl, EMPTY_BUCKET_SOURCE_OPTIONS); + } + + @Override + public List listAcls(String bucket, BucketSourceOption... options) { + try { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + com.google.storage.v2.Bucket resp = getBucketWithAcls(bucket, opts); + return resp.getAclList().stream() + .map(codecs.bucketAcl()::decode) + .collect(ImmutableList.toImmutableList()); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public List listAcls(String bucket) { + return listAcls(bucket, EMPTY_BUCKET_SOURCE_OPTIONS); + } + + @Override + public Acl getDefaultAcl(String bucket, Entity entity) { + try { + com.google.storage.v2.Bucket resp = getBucketWithDefaultAcls(bucket); + + Predicate entityPredicate = + objectAclEntityOrAltEq(codecs.entity().encode(entity)); + + Optional first = + resp.getDefaultObjectAclList().stream().filter(entityPredicate).findFirst(); + + // HttpStorageRpc defaults to null if Not Found + return first.map(codecs.objectAcl()::decode).orElse(null); + } catch (NotFoundException e) { + return null; + } catch (StorageException se) { + if (se.getCode() == 404) { + return null; + } else { + throw se; + } + } + } + + @Override + public boolean deleteDefaultAcl(String bucket, Entity entity) { + try { + com.google.storage.v2.Bucket resp = getBucketWithDefaultAcls(bucket); + String encode = codecs.entity().encode(entity); + + Predicate entityPredicate = objectAclEntityOrAltEq(encode); + + List currentDefaultAcls = resp.getDefaultObjectAclList(); + ImmutableList newDefaultAcls = + currentDefaultAcls.stream() + .filter(entityPredicate.negate()) + .collect(ImmutableList.toImmutableList()); + if (newDefaultAcls.equals(currentDefaultAcls)) { + // we didn't actually filter anything out, no need to send an RPC, simply return false + return false; + } + long metageneration = resp.getMetageneration(); + + UpdateBucketRequest req = + createUpdateDefaultAclRequest(bucket, newDefaultAcls, metageneration); + + com.google.storage.v2.Bucket updateResult = updateBucket(req); + // read the response to ensure there is no longer an acl for the specified entity + Optional first = + updateResult.getDefaultObjectAclList().stream().filter(entityPredicate).findFirst(); + return !first.isPresent(); + } catch (NotFoundException e) { + // HttpStorageRpc returns false if the bucket doesn't exist :( + return false; + } catch (StorageException se) { + if (se.getCode() == 404) { + return false; + } else { + throw se; + } + } + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + return updateDefaultAcl(bucket, acl); + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + try { + com.google.storage.v2.Bucket resp = getBucketWithDefaultAcls(bucket); + ObjectAccessControl encode = codecs.objectAcl().encode(acl); + String entity = encode.getEntity(); + + Predicate entityPredicate = objectAclEntityOrAltEq(entity); + + ImmutableList newDefaultAcls = + Streams.concat( + resp.getDefaultObjectAclList().stream().filter(entityPredicate.negate()), + Stream.of(encode)) + .collect(ImmutableList.toImmutableList()); + + UpdateBucketRequest req = + createUpdateDefaultAclRequest(bucket, newDefaultAcls, resp.getMetageneration()); + + com.google.storage.v2.Bucket updateResult = updateBucket(req); + + Optional first = + updateResult.getDefaultObjectAclList().stream() + .filter(entityPredicate) + .findFirst() + .map(codecs.objectAcl()::decode); + + return first.orElseThrow( + () -> new StorageException(0, "Acl update call success, but not in response")); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public List listDefaultAcls(String bucket) { + try { + com.google.storage.v2.Bucket resp = getBucketWithDefaultAcls(bucket); + return resp.getDefaultObjectAclList().stream() + .map(codecs.objectAcl()::decode) + .collect(ImmutableList.toImmutableList()); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Acl getAcl(BlobId blob, Entity entity) { + try { + Object req = codecs.blobId().encode(blob); + Object resp = getObjectWithAcls(req); + + Predicate entityPredicate = + objectAclEntityOrAltEq(codecs.entity().encode(entity)); + + Optional first = + resp.getAclList().stream().filter(entityPredicate).findFirst(); + + // HttpStorageRpc defaults to null if Not Found + return first.map(codecs.objectAcl()::decode).orElse(null); + } catch (NotFoundException e) { + return null; + } catch (StorageException se) { + if (se.getCode() == 404) { + return null; + } else { + throw se; + } + } + } + + @Override + public boolean deleteAcl(BlobId blob, Entity entity) { + try { + Object obj = codecs.blobId().encode(blob); + Object resp = getObjectWithAcls(obj); + String encode = codecs.entity().encode(entity); + + Predicate entityPredicate = objectAclEntityOrAltEq(encode); + + List currentDefaultAcls = resp.getAclList(); + ImmutableList newDefaultAcls = + currentDefaultAcls.stream() + .filter(entityPredicate.negate()) + .collect(ImmutableList.toImmutableList()); + if (newDefaultAcls.equals(currentDefaultAcls)) { + // we didn't actually filter anything out, no need to send an RPC, simply return false + return false; + } + long metageneration = resp.getMetageneration(); + + UpdateObjectRequest req = createUpdateObjectAclRequest(obj, newDefaultAcls, metageneration); + + Object updateResult = updateObject(req); + // read the response to ensure there is no longer an acl for the specified entity + Optional first = + updateResult.getAclList().stream().filter(entityPredicate).findFirst(); + return !first.isPresent(); + } catch (NotFoundException e) { + // HttpStorageRpc returns false if the bucket doesn't exist :( + return false; + } catch (StorageException se) { + if (se.getCode() == 404) { + return false; + } else { + throw se; + } + } + } + + @Override + public Acl createAcl(BlobId blob, Acl acl) { + return updateAcl(blob, acl); + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + try { + Object obj = codecs.blobId().encode(blob); + Object resp = getObjectWithAcls(obj); + ObjectAccessControl encode = codecs.objectAcl().encode(acl); + String entity = encode.getEntity(); + + Predicate entityPredicate = objectAclEntityOrAltEq(entity); + + ImmutableList newDefaultAcls = + Streams.concat( + resp.getAclList().stream().filter(entityPredicate.negate()), Stream.of(encode)) + .collect(ImmutableList.toImmutableList()); + + UpdateObjectRequest req = + createUpdateObjectAclRequest(obj, newDefaultAcls, resp.getMetageneration()); + + Object updateResult = updateObject(req); + + Optional first = + updateResult.getAclList().stream() + .filter(entityPredicate) + .findFirst() + .map(codecs.objectAcl()::decode); + + return first.orElseThrow( + () -> new StorageException(0, "Acl update call success, but not in response")); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public List listAcls(BlobId blob) { + try { + Object req = codecs.blobId().encode(blob); + Object resp = getObjectWithAcls(req); + return resp.getAclList().stream() + .map(codecs.objectAcl()::decode) + .collect(ImmutableList.toImmutableList()); + } catch (NotFoundException e) { + throw StorageException.coalesce(e); + } + } + + @Override + public HmacKey createHmacKey(ServiceAccount serviceAccount, CreateHmacKeyOption... options) { + return CrossTransportUtils.throwHttpJsonOnly(Storage.class, "createHmacKey"); + } + + @Override + public Page listHmacKeys(ListHmacKeysOption... options) { + return CrossTransportUtils.throwHttpJsonOnly(Storage.class, "listHmacKey"); + } + + @Override + public HmacKeyMetadata getHmacKey(String accessId, GetHmacKeyOption... options) { + return CrossTransportUtils.throwHttpJsonOnly(Storage.class, "getHmacKey"); + } + + @Override + public void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, DeleteHmacKeyOption... options) { + CrossTransportUtils.throwHttpJsonOnly(Storage.class, "deleteHmacKey"); + } + + @Override + public HmacKeyMetadata updateHmacKeyState( + HmacKeyMetadata hmacKeyMetadata, HmacKeyState state, UpdateHmacKeyOption... options) { + return CrossTransportUtils.throwHttpJsonOnly(Storage.class, "updateHmacKeyState"); + } + + @Override + public Policy getIamPolicy(String bucket, BucketSourceOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + GetIamPolicyRequest.Builder builder = + GetIamPolicyRequest.newBuilder().setResource(bucketNameCodec.encode(bucket)); + GetIamPolicyRequest req = opts.getIamPolicyRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.getIamPolicyCallable().call(req, merge), + codecs.policyCodec()); + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + SetIamPolicyRequest req = + SetIamPolicyRequest.newBuilder() + .setResource(bucketNameCodec.encode(bucket)) + .setPolicy(codecs.policyCodec().encode(policy)) + .build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.setIamPolicyCallable().call(req, merge), + codecs.policyCodec()); + } + + @Override + public List testIamPermissions( + String bucket, List permissions, BucketSourceOption... options) { + Opts opts = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + TestIamPermissionsRequest req = + TestIamPermissionsRequest.newBuilder() + .setResource(bucketNameCodec.encode(bucket)) + .addAllPermissions(permissions) + .build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.testIamPermissionsCallable().call(req, merge), + resp -> { + Set heldPermissions = ImmutableSet.copyOf(resp.getPermissionsList()); + return permissions.stream() + .map(heldPermissions::contains) + .collect(ImmutableList.toImmutableList()); + }); + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + return CrossTransportUtils.throwHttpJsonOnly(Storage.class, "getServiceAccount"); + } + + @Override + public Notification createNotification(String bucket, NotificationInfo notificationInfo) { + return throwHttpJsonOnly( + fmtMethodName("createNotification", String.class, NotificationInfo.class)); + } + + @Override + public Notification getNotification(String bucket, String notificationId) { + return throwHttpJsonOnly(fmtMethodName("getNotification", String.class, String.class)); + } + + @Override + public List listNotifications(String bucket) { + return throwHttpJsonOnly(fmtMethodName("listNotifications", String.class)); + } + + @Override + public boolean deleteNotification(String bucket, String notificationId) { + return throwHttpJsonOnly(fmtMethodName("deleteNotification", String.class, String.class)); + } + + @BetaApi + @Override + public BlobWriteSession blobWriteSession(BlobInfo info, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(info); + WritableByteChannelSession writableByteChannelSession = + writerFactory.writeSession(this, info, opts); + return BlobWriteSessions.of(writableByteChannelSession); + } + + @BetaApi + @Override + public BlobAppendableUpload blobAppendableUpload( + BlobInfo blobInfo, BlobAppendableUploadConfig uploadConfig, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + return uploadConfig.create(this, blobInfo, opts); + } + + @Override + public Blob moveBlob(MoveBlobRequest request) { + Object srcObj = codecs.blobId().encode(request.getSource()); + Object dstObj = codecs.blobId().encode(request.getTarget()); + Opts srcOpts = + Opts.unwrap(request.getSourceOptions()).resolveFrom(request.getSource()).projectAsSource(); + Opts dstOpts = + Opts.unwrap(request.getTargetOptions()).resolveFrom(request.getTarget()); + MoveObjectRequest.Builder b = + MoveObjectRequest.newBuilder() + .setBucket(srcObj.getBucket()) + .setSourceObject(srcObj.getName()) + .setDestinationObject(dstObj.getName()); + + srcOpts.moveObjectsRequest().apply(b); + dstOpts.moveObjectsRequest().apply(b); + + MoveObjectRequest req = b.build(); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.moveObjectCallable().call(req), + syntaxDecoders.blob); + } + + @Override + public ApiFuture blobReadSession(BlobId id, BlobSourceOption... options) { + Opts opts = Opts.unwrap(options); + Object object = codecs.blobId().encode(id); + + BidiReadObjectSpec.Builder spec = + BidiReadObjectSpec.newBuilder().setBucket(object.getBucket()).setObject(object.getName()); + + long generation = object.getGeneration(); + if (generation > 0) { + spec.setGeneration(generation); + } + BidiReadObjectRequest.Builder b = BidiReadObjectRequest.newBuilder(); + b.setReadObjectSpec(spec); + opts.bidiReadObjectRequest().apply(b); + BidiReadObjectRequest req = b.build(); + + GrpcCallContext context = opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + ApiFuture session = storageDataClient.readSession(req, context); + + return BlobReadSessionAdapter.wrap(session); + } + + @Override + public GrpcStorageOptions getOptions() { + return (GrpcStorageOptions) super.getOptions(); + } + + boolean isClosed() { + return storageClient.isShutdown(); + } + + private Blob getBlob(ApiFuture result) { + try { + WriteObjectResponse response = ApiExceptions.callAndTranslateApiException(result); + return syntaxDecoders.blob.decode(response.getResource()); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + public AppendableUploadState getAppendableState( + BlobInfo info, Opts opts, long maxPendingBytes) { + boolean takeOver = info.getGeneration() != null; + BidiWriteObjectRequest req = + takeOver + ? getBidiWriteObjectRequestForTakeover(info, opts) + : getBidiWriteObjectRequest(info, opts, /* appendable= */ true); + AppendableUploadState state; + if (takeOver) { + state = + BidiUploadState.appendableTakeover( + req, + Retrying::newCallContext, + maxPendingBytes, + SettableApiFuture.create(), + /* initialCrc32c= */ null); + } else { + state = + BidiUploadState.appendableNew( + req, + Retrying::newCallContext, + maxPendingBytes, + SettableApiFuture.create(), + opts.getHasher().initialValue()); + } + return state; + } + + /** Bind some decoders for our "Syntax" classes to this instance of GrpcStorageImpl */ + private final class SyntaxDecoders { + + final Decoder blob = + o -> codecs.blobInfo().decode(o).asBlob(GrpcStorageImpl.this); + final Decoder bucket = + b -> codecs.bucketInfo().decode(b).asBucket(GrpcStorageImpl.this); + } + + /** + * Today {@link com.google.cloud.storage.spi.v1.HttpStorageRpc#list(String, Map)} creates + * synthetic objects to represent {@code prefixes} ("directories") returned as part of a list + * objects response. Specifically, a StorageObject with an `isDirectory` attribute added. + * + *

This approach is not sound, and presents an otherwise ephemeral piece of metadata as an + * actual piece of data. (A {@code prefix} is not actually an object, and therefor can't be + * queried for other object metadata.) + * + *

In an effort to preserve compatibility with the current public API, this class attempts to + * encapsulate the process of producing these Synthetic Directory Objects and lifting them into + * the Page. + * + *

This behavior should NOT be carried forward to any possible new API for the storage client. + */ + private final class ListObjectsWithSyntheticDirectoriesPage implements Page { + + private final GrpcCallContext ctx; + private final ListObjectsRequest req; + private final ListObjectsResponse resp; + + private ListObjectsWithSyntheticDirectoriesPage( + GrpcCallContext ctx, ListObjectsRequest req, ListObjectsResponse resp) { + this.ctx = ctx; + this.req = req; + this.resp = resp; + } + + @Override + public boolean hasNextPage() { + return !resp.getNextPageToken().isEmpty(); + } + + @Override + public String getNextPageToken() { + return resp.getNextPageToken(); + } + + @Override + public Page getNextPage() { + ListObjectsRequest nextPageReq = + req.toBuilder().setPageToken(resp.getNextPageToken()).build(); + try { + GrpcCallContext merge = Utils.merge(ctx, Retrying.newCallContext()); + ListObjectsResponse nextPageResp = + retrier.run( + retryAlgorithmManager.getFor(nextPageReq), + () -> storageClient.listObjectsCallable().call(nextPageReq, merge), + Decoder.identity()); + return new ListObjectsWithSyntheticDirectoriesPage(ctx, nextPageReq, nextPageResp); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Iterable iterateAll() { + // drop to our interface type to help type inference below with the stream. + Page curr = this; + Predicate> exhausted = p -> p != null && p.hasNextPage(); + // Create a stream which will attempt to call getNextPage repeatedly until we meet our + // condition of exhaustion. By doing this we are able to rely on the retry logic in + // getNextPage + return () -> + streamIterate(curr, exhausted, Page::getNextPage) + .filter(Objects::nonNull) + .flatMap(p -> StreamSupport.stream(p.getValues().spliterator(), false)) + .iterator(); + } + + @Override + public Iterable getValues() { + return () -> { + String bucketName = bucketNameCodec.decode(req.getParent()); + return Streams.concat( + resp.getObjectsList().stream().map(syntaxDecoders.blob::decode), + resp.getPrefixesList().stream() + .map( + prefix -> + BlobInfo.newBuilder(bucketName, prefix) + .setSize(0L) + .setIsDirectory(true) + .build()) + .map(info -> info.asBlob(GrpcStorageImpl.this))) + .iterator(); + }; + } + } + + private final class ListBucketsWithPartialSuccessPage implements Page { + + private final GrpcCallContext ctx; + private final ListBucketsRequest req; + private final com.google.storage.v2.ListBucketsResponse resp; + private final Opts opts; + + private ListBucketsWithPartialSuccessPage( + GrpcCallContext ctx, + ListBucketsRequest req, + com.google.storage.v2.ListBucketsResponse resp, + Opts opts) { + this.ctx = ctx; + this.req = req; + this.resp = resp; + this.opts = opts; + } + + @Override + public boolean hasNextPage() { + return !resp.getNextPageToken().isEmpty(); + } + + @Override + public String getNextPageToken() { + return resp.getNextPageToken(); + } + + @Override + public Page getNextPage() { + if (!hasNextPage()) { + return null; + } + ListBucketsRequest nextPageReq = + req.toBuilder().setPageToken(resp.getNextPageToken()).build(); + try { + com.google.storage.v2.ListBucketsResponse nextPageResp = listBuckets(ctx, nextPageReq); + return new ListBucketsWithPartialSuccessPage(ctx, nextPageReq, nextPageResp, opts); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Override + public Iterable getValues() { + Decoder bucketDecoder = + syntaxDecoders.bucket.andThen(opts.clearBucketFields()); + Stream reachable = resp.getBucketsList().stream().map(bucketDecoder::decode); + Stream unreachable = + resp.getUnreachableList().stream() + .map( + name -> { + String decoded = bucketNameCodec.decode(name); + return BucketInfo.newBuilder(decoded) + .setIsUnreachable(true) + .build() + .asBucket(GrpcStorageImpl.this); + }); + return Streams.concat(reachable, unreachable).collect(ImmutableList.toImmutableList()); + } + + @Override + public Iterable iterateAll() { + Page curr = this; + return () -> + streamIterate(curr, p -> p != null && p.hasNextPage(), Page::getNextPage) + .filter(Objects::nonNull) + .flatMap(p -> StreamSupport.stream(p.getValues().spliterator(), false)) + .iterator(); + } + } + + static final class TransformingPageDecorator< + RequestT, + ResponseT, + ResourceT, + PageT extends AbstractPage, + ModelT> + implements Page { + + private final PageT page; + private final Decoder translator; + private final Retrier retrier; + private final ResultRetryAlgorithm resultRetryAlgorithm; + + TransformingPageDecorator( + PageT page, + Decoder translator, + Retrier retrier, + ResultRetryAlgorithm resultRetryAlgorithm) { + this.page = page; + this.translator = translator; + this.retrier = retrier; + this.resultRetryAlgorithm = resultRetryAlgorithm; + } + + @Override + public boolean hasNextPage() { + return page.hasNextPage(); + } + + @Override + public String getNextPageToken() { + return page.getNextPageToken(); + } + + @Override + public Page getNextPage() { + return new TransformingPageDecorator<>( + page.getNextPage(), translator, retrier, resultRetryAlgorithm); + } + + @SuppressWarnings({"Convert2MethodRef"}) + @Override + public Iterable iterateAll() { + // iterateAll on AbstractPage isn't very friendly to decoration, as getNextPage isn't actually + // ever called. This means we aren't able to apply our retry wrapping there. + // Instead, what we do is create a stream which will attempt to call getNextPage repeatedly + // until we meet some condition of exhaustion. At that point we can apply our retry logic. + return () -> + streamIterate( + page, + p -> p != null && p.hasNextPage(), + prev -> { + // TODO: retry token header + // explicitly define this callable rather than using the method reference to + // prevent a javac 1.8 exception + // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8056984 + Callable c = () -> prev.getNextPage(); + return retrier.run(resultRetryAlgorithm, c, Decoder.identity()); + }) + .filter(Objects::nonNull) + .flatMap(p -> StreamSupport.stream(p.getValues().spliterator(), false)) + .map(translator::decode) + .iterator(); + } + + @Override + public Iterable getValues() { + return () -> + StreamSupport.stream(page.getValues().spliterator(), false) + .map(translator::decode) + .iterator(); + } + } + + private static Stream streamIterate( + T seed, Predicate shouldComputeNext, UnaryOperator computeNext) { + requireNonNull(seed, "seed must be non null"); + requireNonNull(shouldComputeNext, "shouldComputeNext must be non null"); + requireNonNull(computeNext, "computeNext must be non null"); + Spliterator spliterator = + new AbstractSpliterator(Long.MAX_VALUE, 0) { + T prev; + boolean started = false; + boolean done = false; + + @Override + public boolean tryAdvance(Consumer action) { + // if we haven't started, emit our seed and return + if (!started) { + started = true; + action.accept(seed); + prev = seed; + return true; + } + // if we've previously finished quickly return + if (done) { + return false; + } + // test whether we should try and compute the next value + if (shouldComputeNext.test(prev)) { + // compute the next value and figure out if we can use it + T next = computeNext.apply(prev); + if (next != null) { + action.accept(next); + prev = next; + return true; + } + } + + // fallthrough, if we haven't taken an action by now consider the stream done and + // return + done = true; + return false; + } + }; + return StreamSupport.stream(spliterator, false); + } + + ReadObjectRequest getReadObjectRequest(BlobId blob, Opts opts) { + Object object = codecs.blobId().encode(blob); + + ReadObjectRequest.Builder builder = + ReadObjectRequest.newBuilder().setBucket(object.getBucket()).setObject(object.getName()); + + long generation = object.getGeneration(); + if (generation > 0) { + builder.setGeneration(generation); + } + return opts.readObjectRequest().apply(builder).build(); + } + + WriteObjectRequest getWriteObjectRequest(BlobInfo info, Opts opts) { + Object object = codecs.blobInfo().encode(info); + Object.Builder objectBuilder = + object.toBuilder() + // required if the data is changing + .clearChecksums() + // trimmed to shave payload size + .clearGeneration() + .clearMetageneration() + .clearSize() + .clearCreateTime() + .clearUpdateTime(); + WriteObjectSpec.Builder specBuilder = WriteObjectSpec.newBuilder().setResource(objectBuilder); + + WriteObjectRequest.Builder requestBuilder = + WriteObjectRequest.newBuilder().setWriteObjectSpec(specBuilder); + + return opts.writeObjectRequest().apply(requestBuilder).build(); + } + + BidiWriteObjectRequest getBidiWriteObjectRequest( + BlobInfo info, Opts opts, boolean appendable) { + Object object = codecs.blobInfo().encode(info); + Object.Builder objectBuilder = + object.toBuilder() + // clear out the checksums, if a crc32cMatch is specified it'll come back via opts + .clearChecksums() + .clearGeneration() + .clearMetageneration() + .clearSize() + .clearCreateTime() + .clearUpdateTime(); + WriteObjectSpec.Builder specBuilder = WriteObjectSpec.newBuilder().setResource(objectBuilder); + if (appendable) { + specBuilder.setAppendable(true); + } + + BidiWriteObjectRequest.Builder requestBuilder = + BidiWriteObjectRequest.newBuilder().setWriteObjectSpec(specBuilder); + + return opts.bidiWriteObjectRequest().apply(requestBuilder).build(); + } + + BidiWriteObjectRequest getBidiWriteObjectRequestForTakeover( + BlobInfo info, Opts opts) { + Object object = codecs.blobInfo().encode(info); + AppendObjectSpec.Builder specBuilder = + AppendObjectSpec.newBuilder() + .setObject(object.getName()) + .setBucket(object.getBucket()) + .setGeneration(object.getGeneration()); + + BidiWriteObjectRequest.Builder requestBuilder = + BidiWriteObjectRequest.newBuilder().setAppendObjectSpec(specBuilder.build()); + + return opts.bidiWriteObjectRequest().apply(requestBuilder).build(); + } + + private UnbufferedReadableByteChannelSession unbufferedReadSession( + BlobId blob, BlobSourceOption[] options) { + + Opts opts = Opts.unwrap(options).resolveFrom(blob).prepend(defaultOpts); + ReadObjectRequest readObjectRequest = getReadObjectRequest(blob, opts); + GrpcCallContext grpcCallContext = opts.grpcMetadataMapper().apply(Retrying.newCallContext()); + return ResumableMedia.gapic() + .read() + .byteChannel( + readObjectCallable(grpcCallContext), + retrier, + retryAlgorithmManager.getFor(readObjectRequest)) + .setAutoGzipDecompression(!opts.autoGzipDecompression()) + .unbuffered() + .setReadObjectRequest(readObjectRequest) + .build(); + } + + @VisibleForTesting + ApiFuture startResumableWrite( + GrpcCallContext grpcCallContext, WriteObjectRequest req, Opts opts) { + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return ResumableMedia.gapic() + .write() + .resumableWrite( + storageClient.startResumableWriteCallable().withDefaultCallContext(merge), + req, + opts, + retrier.withAlg(retryAlgorithmManager.getFor(req))); + } + + ApiFuture startResumableWrite( + GrpcCallContext grpcCallContext, BidiWriteObjectRequest req, Opts opts) { + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return ResumableMedia.gapic() + .write() + .bidiResumableWrite( + storageClient.startResumableWriteCallable().withDefaultCallContext(merge), + req, + opts, + retrier.withAlg(retryAlgorithmManager.getFor(req))); + } + + private SourceObject sourceObjectEncode(SourceBlob from) { + SourceObject.Builder to = SourceObject.newBuilder(); + to.setName(from.getName()); + ifNonNull(from.getGeneration(), to::setGeneration); + return to.build(); + } + + private com.google.storage.v2.ListBucketsResponse listBuckets( + GrpcCallContext grpcCallContext, ListBucketsRequest request) { + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(request), + () -> storageClient.listBucketsCallable().call(request, merge), + Decoder.identity()); + } + + private com.google.storage.v2.Bucket getBucketWithDefaultAcls(String bucketName) { + Fields fields = + UnifiedOpts.fields( + ImmutableSet.of( + BucketField.ACL, // workaround for b/261771961 + BucketField.DEFAULT_OBJECT_ACL, + BucketField.METAGENERATION)); + GrpcCallContext grpcCallContext = GrpcCallContext.createDefault(); + GetBucketRequest req = + fields + .getBucket() + .apply(GetBucketRequest.newBuilder()) + .setName(bucketNameCodec.encode(bucketName)) + .build(); + + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.getBucketCallable().call(req, merge), + Decoder.identity()); + } + + private com.google.storage.v2.Bucket getBucketWithAcls( + String bucketName, Opts opts) { + Fields fields = + UnifiedOpts.fields(ImmutableSet.of(BucketField.ACL, BucketField.METAGENERATION)); + GrpcCallContext grpcCallContext = GrpcCallContext.createDefault(); + Mapper mapper = opts.getBucketsRequest().andThen(fields.getBucket()); + GetBucketRequest req = + mapper + .apply(GetBucketRequest.newBuilder()) + .setName(bucketNameCodec.encode(bucketName)) + .build(); + + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.getBucketCallable().call(req, merge), + Decoder.identity()); + } + + private com.google.storage.v2.Bucket updateBucket(UpdateBucketRequest req) { + GrpcCallContext grpcCallContext = GrpcCallContext.createDefault(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.updateBucketCallable().call(req, merge), + Decoder.identity()); + } + + private static UpdateBucketRequest createUpdateDefaultAclRequest( + String bucket, ImmutableList newDefaultAcls, long metageneration) { + com.google.storage.v2.Bucket update = + com.google.storage.v2.Bucket.newBuilder() + .setName(bucketNameCodec.encode(bucket)) + .addAllDefaultObjectAcl(newDefaultAcls) + .build(); + Opts opts = + Opts.from( + UnifiedOpts.fields(ImmutableSet.of(BucketField.DEFAULT_OBJECT_ACL)), + UnifiedOpts.metagenerationMatch(metageneration)); + return opts.updateBucketsRequest() + .apply(UpdateBucketRequest.newBuilder()) + .setBucket(update) + .build(); + } + + private static UpdateBucketRequest createUpdateBucketAclRequest( + String bucket, ImmutableList newDefaultAcls, long metageneration) { + com.google.storage.v2.Bucket update = + com.google.storage.v2.Bucket.newBuilder() + .setName(bucketNameCodec.encode(bucket)) + .addAllAcl(newDefaultAcls) + .build(); + Opts opts = + Opts.from( + UnifiedOpts.fields(ImmutableSet.of(BucketField.ACL)), + UnifiedOpts.metagenerationMatch(metageneration)); + return opts.updateBucketsRequest() + .apply(UpdateBucketRequest.newBuilder()) + .setBucket(update) + .build(); + } + + private Object getObjectWithAcls(Object obj) { + Fields fields = + UnifiedOpts.fields(ImmutableSet.of(BucketField.ACL, BucketField.METAGENERATION)); + GrpcCallContext grpcCallContext = GrpcCallContext.createDefault(); + GetObjectRequest req = + fields + .getObject() + .apply(GetObjectRequest.newBuilder()) + .setBucket(obj.getBucket()) + .setObject(obj.getName()) + .build(); + + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.getObjectCallable().call(req, merge), + Decoder.identity()); + } + + private static UpdateObjectRequest createUpdateObjectAclRequest( + Object obj, ImmutableList newAcls, long metageneration) { + Object update = + Object.newBuilder() + .setBucket(obj.getBucket()) + .setName(obj.getName()) + .addAllAcl(newAcls) + .build(); + Opts opts = + Opts.from( + UnifiedOpts.fields(ImmutableSet.of(BlobField.ACL)), + UnifiedOpts.metagenerationMatch(metageneration)); + return opts.updateObjectsRequest() + .apply(UpdateObjectRequest.newBuilder()) + .setObject(update) + .build(); + } + + private Object updateObject(UpdateObjectRequest req) { + GrpcCallContext grpcCallContext = GrpcCallContext.createDefault(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.updateObjectCallable().call(req, merge), + Decoder.identity()); + } + + @NonNull + @Override + public BlobInfo internalObjectGet(BlobId blobId, Opts opts) { + Opts finalOpts = opts.prepend(defaultOpts).prepend(ALL_BLOB_FIELDS); + GrpcCallContext grpcCallContext = + finalOpts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + GetObjectRequest.Builder builder = + GetObjectRequest.newBuilder() + .setBucket(bucketNameCodec.encode(blobId.getBucket())) + .setObject(blobId.getName()); + ifNonNull(blobId.getGeneration(), builder::setGeneration); + GetObjectRequest req = finalOpts.getObjectsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + //noinspection DataFlowIssue + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> storageClient.getObjectCallable().call(req, merge), + resp -> { + BlobInfo tmp = codecs.blobInfo().decode(resp); + return finalOpts.clearBlobFields().decode(tmp); + }); + } + + @Nullable + private Blob internalBlobGet(BlobId blob, Opts unwrap) { + Opts opts = unwrap.resolveFrom(blob); + try { + return internalObjectGet(blob, opts).asBlob(this); + } catch (StorageException e) { + if (e.getCause() instanceof NotFoundException) { + return null; + } else { + throw e; + } + } catch (NotFoundException nfe) { + return null; + } + } + + @Nullable + private Bucket internalBucketGet(String bucket, Opts unwrap) { + Opts opts = unwrap.prepend(defaultOpts).prepend(ALL_BUCKET_FIELDS); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + GetBucketRequest.Builder builder = + GetBucketRequest.newBuilder().setName(bucketNameCodec.encode(bucket)); + GetBucketRequest req = opts.getBucketsRequest().apply(builder).build(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return retrier.run( + retryAlgorithmManager.getFor(req), + () -> { + try { + return storageClient.getBucketCallable().call(req, merge); + } catch (NotFoundException e) { + return null; + } + }, + syntaxDecoders.bucket.andThen(opts.clearBucketFields())); + } + + private ZeroCopyServerStreamingCallable readObjectCallable( + GrpcCallContext grpcCallContext) { + return new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable().withDefaultCallContext(grpcCallContext), + responseContentLifecycleManager); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageOptions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageOptions.java new file mode 100644 index 000000000000..1a6726b9c01b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageOptions.java @@ -0,0 +1,1413 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; +import static com.google.api.gax.util.TimeConversionUtils.toThreetenDuration; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiClock; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.api.gax.rpc.NoHeaderProvider; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.internal.QuotaProjectIdHidingCredentials; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.pathtemplate.PathTemplate; +import com.google.auth.Credentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceFactory; +import com.google.cloud.ServiceOptions; +import com.google.cloud.ServiceRpc; +import com.google.cloud.TransportOptions; +import com.google.cloud.Tuple; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.spi.ServiceRpcFactory; +import com.google.cloud.storage.GrpcUtils.ZeroCopyBidiStreamingCallable; +import com.google.cloud.storage.Hasher.UncheckedChecksumMismatchException; +import com.google.cloud.storage.OpenTelemetryBootstrappingUtils.ChannelConfigurator; +import com.google.cloud.storage.RetryContext.RetryContextProvider; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.UnifiedOpts.UserProject; +import com.google.cloud.storage.spi.StorageRpcFactory; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Message; +import com.google.protobuf.MessageLite; +import com.google.protobuf.Parser; +import com.google.protobuf.UnsafeByteOperations; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageSettings; +import com.google.storage.v2.stub.GrpcStorageCallableFactory; +import com.google.storage.v2.stub.GrpcStorageStub; +import com.google.storage.v2.stub.StorageStub; +import com.google.storage.v2.stub.StorageStubSettings; +import io.grpc.ClientInterceptor; +import io.grpc.Detachable; +import io.grpc.HasByteBuffer; +import io.grpc.KnownLength; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.opentelemetry.api.OpenTelemetry; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * @since 2.14.0 + */ +@TransportCompatibility(Transport.GRPC) +public final class GrpcStorageOptions extends StorageOptions + implements Retrying.RetryingDependencies { + + private static final long serialVersionUID = -4499446543857945349L; + private static final String GCS_SCOPE = "https://www.googleapis.com/auth/devstorage.full_control"; + private static final Set SCOPES = ImmutableSet.of(GCS_SCOPE); + private static final String DEFAULT_HOST = "https://storage.googleapis.com"; + // If true, disable the bound-token-by-default feature for DirectPath. + private static final boolean DIRECT_PATH_BOUND_TOKEN_DISABLED = + Boolean.parseBoolean( + System.getProperty("com.google.cloud.storage.grpc.bound_token", "false")); + + private final GrpcRetryAlgorithmManager retryAlgorithmManager; + private final java.time.Duration terminationAwaitDuration; + private final boolean attemptDirectPath; + private final boolean enableGrpcClientMetrics; + + private final boolean grpcClientMetricsManuallyEnabled; + private final GrpcInterceptorProvider grpcInterceptorProvider; + private final BlobWriteSessionConfig blobWriteSessionConfig; + private transient OpenTelemetry openTelemetry; + + private GrpcStorageOptions(Builder builder, GrpcStorageDefaults serviceDefaults) { + super(builder, serviceDefaults); + this.retryAlgorithmManager = + new GrpcRetryAlgorithmManager( + MoreObjects.firstNonNull( + builder.storageRetryStrategy, serviceDefaults.getStorageRetryStrategy())); + this.terminationAwaitDuration = + MoreObjects.firstNonNull( + builder.terminationAwaitDuration, + serviceDefaults.getTerminationAwaitDurationJavaTime()); + this.attemptDirectPath = builder.attemptDirectPath; + this.enableGrpcClientMetrics = builder.enableGrpcClientMetrics; + this.grpcClientMetricsManuallyEnabled = builder.grpcMetricsManuallyEnabled; + this.grpcInterceptorProvider = builder.grpcInterceptorProvider; + this.blobWriteSessionConfig = builder.blobWriteSessionConfig; + this.openTelemetry = builder.openTelemetry; + } + + @Override + protected Set getScopes() { + return SCOPES; + } + + @InternalApi + GrpcRetryAlgorithmManager getRetryAlgorithmManager() { + return retryAlgorithmManager; + } + + @InternalApi + java.time.Duration getTerminationAwaitDuration() { + return terminationAwaitDuration; + } + + @InternalApi + StorageSettings getStorageSettings() throws IOException { + return resolveSettingsAndOpts().x(); + } + + @InternalApi + GrpcInterceptorProvider getGrpcInterceptorProvider() { + return grpcInterceptorProvider; + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.openTelemetry = HttpStorageOptions.getDefaultInstance().getOpenTelemetry(); + } + + /** + * We have to perform several introspections and detections to cross-wire/support several features + * that are either gapic primitives, ServiceOption primitives or GCS semantic requirements. + * + *

Requester Pays, {@code quota_project_id} and {@code userProject}

+ * + * When using the JSON Api operations destined for requester pays buckets can identify the project + * for billing and quota attribution by specifying either {@code userProject} query parameter or + * {@code x-goog-user-project} HTTP Header. + * + *

If the credentials being used contain the property {@code quota_project_id} this value will + * automatically be set to the {@code x-goog-user-project} header for both JSON and GAPIC. In the + * case of JSON this isn't an issue, as any {@code userProject} query parameter takes precedence. + * However, in gRPC/GAPIC there isn't a {@code userProject} query parameter, instead we are adding + * {@code x-goog-user-project} to the request context as metadata. If the credentials set the + * request metadata and we set the request metadata it results in two different entries in the + * request. This creates ambiguity for GCS which then rejects the request. + * + *

To account for this and to provide a similar level of precedence we are introspecting the + * credentials and service options to save any {@code quota_project_id} into an {@link + * UserProject} which is then used by {@link GrpcStorageImpl} to resolve individual request + * metadata. + * + *

The precedence we provide is as follows

+ * + *
    + *
  1. Any "userProject" Option provided to an individual method + *
  2. Any Non-empty value for {@link #getQuotaProjectId()} + *
  3. Any {@code x-goog-user-project} provided by {@link #credentials} + *
+ */ + private Tuple> resolveSettingsAndOpts() throws IOException { + String endpoint = getHost(); + URI uri = URI.create(endpoint); + String scheme = uri.getScheme(); + int port = uri.getPort(); + // Gax routes the endpoint into a method which can't handle schemes, + // unless for Direct Google Access try and strip here if we can + switch (scheme) { + case "http": + endpoint = String.format(Locale.US, "%s:%s", uri.getHost(), port > 0 ? port : 80); + break; + case "https": + endpoint = String.format(Locale.US, "%s:%s", uri.getHost(), port > 0 ? port : 443); + break; + } + + Opts defaultOpts = Opts.empty(); + CredentialsProvider credentialsProvider; + Preconditions.checkState(credentials != null, "Unable to resolve credentials"); + if (credentials instanceof NoCredentials) { + credentialsProvider = NoCredentialsProvider.create(); + } else { + boolean foundQuotaProject = false; + if (credentials.hasRequestMetadata()) { + try { + Map> requestMetadata = credentials.getRequestMetadata(uri); + for (Entry> e : requestMetadata.entrySet()) { + String key = e.getKey(); + if ("x-goog-user-project".equals(Utils.headerNameToLowerCase(key.trim()))) { + List value = e.getValue(); + if (!value.isEmpty()) { + foundQuotaProject = true; + defaultOpts = Opts.from(UnifiedOpts.userProject(value.get(0))); + break; + } + } + } + } catch (IllegalStateException e) { + // This happens when an instance of OAuth2Credentials attempts to refresh its + // access token during our attempt at getting request metadata. + // This is most easily reproduced by OAuth2Credentials.create(null); + // see com.google.auth.oauth2.OAuth2Credentials.refreshAccessToken + if (!e.getMessage().startsWith("OAuth2Credentials")) { + throw e; + } + } + } + if (foundQuotaProject) { + // fix for https://github.com/googleapis/java-storage/issues/1736 + credentialsProvider = + FixedCredentialsProvider.create(new QuotaProjectIdHidingCredentials(credentials)); + } else { + credentialsProvider = FixedCredentialsProvider.create(credentials); + } + } + + boolean isTm = + Arrays.stream(Thread.currentThread().getStackTrace()) + .anyMatch( + ste -> ste.getClassName().startsWith("com.google.cloud.storage.transfermanager")); + + HeaderProvider internalHeaderProvider = + StorageSettings.defaultApiClientHeaderProviderBuilder() + .setClientLibToken(ServiceOptions.getGoogApiClientLibName(), getLibraryVersion()) + .build(); + if (isTm) { + internalHeaderProvider = + XGoogApiClientHeaderProvider.of( + internalHeaderProvider, ImmutableList.of("gccl-gcs-cmd/tm")); + } + + StorageSettings.Builder builder = + new GapicStorageSettingsBuilder(StorageSettings.newBuilder().build()) + .setInternalHeaderProvider(internalHeaderProvider) + .setEndpoint(endpoint) + .setCredentialsProvider(credentialsProvider) + .setClock(getClock()); + + if (this.getUniverseDomain() != null) { + builder.setUniverseDomain(this.getUniverseDomain()); + } + + // this MUST come after credentials, service options set value has higher priority than creds + String quotaProjectId = this.getQuotaProjectId(); + if (quotaProjectId != null && !quotaProjectId.isEmpty()) { + defaultOpts = Opts.from(UnifiedOpts.userProject(quotaProjectId)); + } + + builder.setHeaderProvider(this.getMergedHeaderProvider(new NoHeaderProvider())); + + InstantiatingGrpcChannelProvider.Builder channelProviderBuilder = + InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(endpoint) + .setAllowNonDefaultServiceAccount(true) + .setAttemptDirectPath(attemptDirectPath); + + if (!DIRECT_PATH_BOUND_TOKEN_DISABLED) { + channelProviderBuilder.setAllowHardBoundTokenTypes( + Collections.singletonList(InstantiatingGrpcChannelProvider.HardBoundTokenTypes.ALTS)); + } + + if (!NoopGrpcInterceptorProvider.INSTANCE.equals(grpcInterceptorProvider)) { + channelProviderBuilder.setInterceptorProvider(grpcInterceptorProvider); + } + + if (attemptDirectPath) { + channelProviderBuilder.setAttemptDirectPathXds(); + } + + if (scheme.equals("http")) { + channelProviderBuilder.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + } + + if (enableGrpcClientMetrics) { + ChannelConfigurator channelConfigurator = + OpenTelemetryBootstrappingUtils.enableGrpcMetrics( + ChannelConfigurator.lift(channelProviderBuilder.getChannelConfigurator()), + endpoint, + this.getProjectId(), + this.getUniverseDomain(), + !grpcClientMetricsManuallyEnabled); + channelProviderBuilder.setChannelConfigurator(channelConfigurator); + } + + builder.setTransportChannelProvider(channelProviderBuilder.build()); + RetrySettings baseRetrySettings = getRetrySettings(); + RetrySettings readRetrySettings = + baseRetrySettings.toBuilder() + // when performing a read via ReadObject, the ServerStream will have a default relative + // deadline set of `requestStartTime() + totalTimeout`, meaning if the specified + // RetrySettings have a totalTimeout of 10 seconds -- which should be plenty for + // metadata RPCs -- the entire ReadObject stream would need to complete within 10 + // seconds. + // To allow read streams to have longer lifespans, crank up their timeouts, instead rely + // on idleTimeout below. + .setLogicalTimeout(java.time.Duration.ofDays(28)) + .build(); + java.time.Duration totalTimeout = baseRetrySettings.getTotalTimeoutDuration(); + + // retries for unary methods are generally handled at a different level, except + // StartResumableWrite + builder.applyToAllUnaryMethods( + input -> { + input.setSimpleTimeoutNoRetriesDuration(totalTimeout); + return null; + }); + + // configure the settings for StartResumableWrite + Duration startResumableTimeoutDuration; + // the default for initialRpcTimeout is the same as totalTimeout. This is not good, because it + // will prevent our retries from even happening. + // If the default values is used, set our per-rpc timeout to 20 seconds to allow our retries + // a chance. + if (baseRetrySettings + .getInitialRpcTimeoutDuration() + .equals(getDefaultRetrySettings().getInitialRpcTimeoutDuration())) { + startResumableTimeoutDuration = Duration.ofSeconds(20); + } else { + startResumableTimeoutDuration = baseRetrySettings.getInitialRpcTimeoutDuration(); + } + builder + .startResumableWriteSettings() + // set this lower, to allow our retries a chance instead of it being totalTimeout + .setSimpleTimeoutNoRetriesDuration(startResumableTimeoutDuration); + // for ReadObject disable retries and move the total timeout to the idle timeout + builder + .readObjectSettings() + .setRetrySettings(readRetrySettings) + // disable gapic retries because we're handling it ourselves + .setRetryableCodes(Collections.emptySet()) + // for reads, the stream can be held open for a long time in order to read all bytes, + // this is totally valid. instead we want to monitor if the stream is doing work and if not + // timeout. + .setIdleTimeoutDuration(totalTimeout); + return Tuple.of(builder.build(), defaultOpts); + } + + /** + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Override + public OpenTelemetry getOpenTelemetry() { + return openTelemetry; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder toBuilder() { + return new GrpcStorageOptions.Builder(this); + } + + @Override + public int hashCode() { + return Objects.hash( + retryAlgorithmManager, + terminationAwaitDuration, + attemptDirectPath, + enableGrpcClientMetrics, + grpcInterceptorProvider, + blobWriteSessionConfig, + openTelemetry, + baseHashCode()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GrpcStorageOptions)) { + return false; + } + GrpcStorageOptions that = (GrpcStorageOptions) o; + return attemptDirectPath == that.attemptDirectPath + && enableGrpcClientMetrics == that.enableGrpcClientMetrics + && Objects.equals(retryAlgorithmManager, that.retryAlgorithmManager) + && Objects.equals(terminationAwaitDuration, that.terminationAwaitDuration) + && Objects.equals(grpcInterceptorProvider, that.grpcInterceptorProvider) + && Objects.equals(blobWriteSessionConfig, that.blobWriteSessionConfig) + && Objects.equals(openTelemetry, that.openTelemetry) + && this.baseEquals(that); + } + + /** + * @since 2.14.0 + */ + public static GrpcStorageOptions.Builder newBuilder() { + return new GrpcStorageOptions.Builder().setHost(DEFAULT_HOST); + } + + /** + * @since 2.14.0 + */ + public static GrpcStorageOptions getDefaultInstance() { + return newBuilder().build(); + } + + /** + * @since 2.14.0 + */ + public static GrpcStorageOptions.GrpcStorageDefaults defaults() { + return GrpcStorageOptions.GrpcStorageDefaults.INSTANCE; + } + + // since our new GrpcStorageImpl can "close" we need to help ServiceOptions know whether it can + // use it's cached instance. + @Override + protected boolean shouldRefreshService(Storage cachedService) { + if (cachedService instanceof GrpcStorageImpl) { + GrpcStorageImpl service = (GrpcStorageImpl) cachedService; + return service.isClosed(); + } + return super.shouldRefreshService(cachedService); + } + + /** + * @since 2.14.0 + */ + public static final class Builder extends StorageOptions.Builder { + + private StorageRetryStrategy storageRetryStrategy; + private java.time.Duration terminationAwaitDuration; + private boolean attemptDirectPath = GrpcStorageDefaults.INSTANCE.isAttemptDirectPath(); + private boolean enableGrpcClientMetrics = + GrpcStorageDefaults.INSTANCE.isEnableGrpcClientMetrics(); + private GrpcInterceptorProvider grpcInterceptorProvider = + GrpcStorageDefaults.INSTANCE.grpcInterceptorProvider(); + private BlobWriteSessionConfig blobWriteSessionConfig = + GrpcStorageDefaults.INSTANCE.getDefaultStorageWriterConfig(); + private OpenTelemetry openTelemetry = GrpcStorageDefaults.INSTANCE.getDefaultOpenTelemetry(); + + private boolean grpcMetricsManuallyEnabled = false; + + Builder() {} + + Builder(StorageOptions options) { + super(options); + GrpcStorageOptions gso = (GrpcStorageOptions) options; + this.storageRetryStrategy = gso.getRetryAlgorithmManager().retryStrategy; + this.terminationAwaitDuration = gso.getTerminationAwaitDuration(); + this.attemptDirectPath = gso.attemptDirectPath; + this.enableGrpcClientMetrics = gso.enableGrpcClientMetrics; + this.grpcInterceptorProvider = gso.grpcInterceptorProvider; + this.blobWriteSessionConfig = gso.blobWriteSessionConfig; + this.openTelemetry = gso.openTelemetry; + } + + /** + * This method is obsolete. Use {@link #setTerminationAwaitJavaTimeDuration(java.time.Duration)} + * instead. + */ + @ObsoleteApi("Use setTerminationAwaitJavaTimeDuration(java.time.Duration) instead") + public Builder setTerminationAwaitDuration(org.threeten.bp.Duration terminationAwaitDuration) { + return setTerminationAwaitJavaTimeDuration(toJavaTimeDuration(terminationAwaitDuration)); + } + + /** + * Set the maximum duration in which to await termination of any outstanding requests when + * calling {@link Storage#close()} + * + * @param terminationAwaitDuration a non-null Duration to use + * @return the builder + * @since 2.14.0 + */ + public Builder setTerminationAwaitJavaTimeDuration( + java.time.Duration terminationAwaitDuration) { + this.terminationAwaitDuration = + requireNonNull(terminationAwaitDuration, "terminationAwaitDuration must be non null"); + return this; + } + + /** + * Option which signifies the client should attempt to connect to gcs via Direct Google Access. + * + *

NOTEThere is no need to specify a new endpoint via {@link #setHost(String)} as the + * underlying code will translate the normal {@code https://storage.googleapis.com:443} into the + * proper Direct Google Access URI for you. + * + * @since 2.14.0 + */ + public GrpcStorageOptions.Builder setAttemptDirectPath(boolean attemptDirectPath) { + this.attemptDirectPath = attemptDirectPath; + return this; + } + + /** + * Option for whether this client should emit internal gRPC client internal metrics to Cloud + * Monitoring. To disable metric reporting, set this to false. True by default. Emitting metrics + * is free and requires minimal CPU and memory. + * + * @since 2.41.0 + */ + public GrpcStorageOptions.Builder setEnableGrpcClientMetrics(boolean enableGrpcClientMetrics) { + this.enableGrpcClientMetrics = enableGrpcClientMetrics; + if (enableGrpcClientMetrics) { + grpcMetricsManuallyEnabled = true; + } + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setTransportOptions(TransportOptions transportOptions) { + if (!(transportOptions instanceof GrpcTransportOptions)) { + throw new IllegalArgumentException("Only gRPC transport is allowed."); + } + super.setTransportOptions(transportOptions); + return this; + } + + /** + * Override the default retry handling behavior with an alternate strategy. + * + * @param storageRetryStrategy a non-null storageRetryStrategy to use + * @return the builder + * @see StorageRetryStrategy#getDefaultStorageRetryStrategy() + * @since 2.14.0 + */ + public GrpcStorageOptions.Builder setStorageRetryStrategy( + StorageRetryStrategy storageRetryStrategy) { + this.storageRetryStrategy = + requireNonNull(storageRetryStrategy, "storageRetryStrategy must be non null"); + return this; + } + + @Override + protected GrpcStorageOptions.Builder self() { + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setServiceFactory( + ServiceFactory serviceFactory) { + super.setServiceFactory(serviceFactory); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setClock(ApiClock clock) { + super.setClock(clock); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setProjectId(String projectId) { + super.setProjectId(projectId); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setHost(String host) { + super.setHost(host); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setCredentials(Credentials credentials) { + super.setCredentials(credentials); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setRetrySettings(RetrySettings retrySettings) { + super.setRetrySettings(retrySettings); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setServiceRpcFactory( + ServiceRpcFactory serviceRpcFactory) { + throw new UnsupportedOperationException( + "GrpcStorageOptions does not support setting a custom instance of ServiceRpcFactory"); + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setHeaderProvider(HeaderProvider headerProvider) { + super.setHeaderProvider(headerProvider); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setClientLibToken(String clientLibToken) { + super.setClientLibToken(clientLibToken); + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions.Builder setQuotaProjectId(String quotaProjectId) { + super.setQuotaProjectId(quotaProjectId); + return this; + } + + /** + * @since 2.22.3 + */ + public GrpcStorageOptions.Builder setGrpcInterceptorProvider( + @NonNull GrpcInterceptorProvider grpcInterceptorProvider) { + requireNonNull(grpcInterceptorProvider, "grpcInterceptorProvider must be non null"); + this.grpcInterceptorProvider = grpcInterceptorProvider; + return this; + } + + /** + * @see BlobWriteSessionConfig + * @see BlobWriteSessionConfigs + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageDefaults#getDefaultStorageWriterConfig() + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public GrpcStorageOptions.Builder setBlobWriteSessionConfig( + @NonNull BlobWriteSessionConfig blobWriteSessionConfig) { + requireNonNull(blobWriteSessionConfig, "blobWriteSessionConfig must be non null"); + checkArgument( + blobWriteSessionConfig instanceof BlobWriteSessionConfig.GrpcCompatible, + "The provided instance of BlobWriteSessionConfig is not compatible with gRPC transport."); + this.blobWriteSessionConfig = blobWriteSessionConfig; + return this; + } + + @BetaApi + @Override + public GrpcStorageOptions.Builder setUniverseDomain(String universeDomain) { + super.setUniverseDomain(universeDomain); + return this; + } + + @BetaApi + @Override + public GrpcStorageOptions.Builder setApiTracerFactory(ApiTracerFactory apiTracerFactory) { + super.setApiTracerFactory(apiTracerFactory); + return this; + } + + /** + * Enable OpenTelemetry Tracing and provide an instance for the client to use. + * + * @param openTelemetry User defined instance of OpenTelemetry to be used by the library + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public GrpcStorageOptions.Builder setOpenTelemetry(OpenTelemetry openTelemetry) { + requireNonNull(openTelemetry, "openTelemetry must be non null"); + this.openTelemetry = openTelemetry; + return this; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcStorageOptions build() { + GrpcStorageOptions options = new GrpcStorageOptions(this, defaults()); + // TODO: Remove when https://github.com/googleapis/sdk-platform-java/issues/2911 is resolved + if (options.getUniverseDomain() != null) { + this.setHost("https://storage." + options.getUniverseDomain()); + return new GrpcStorageOptions(this, defaults()); + } + return options; + } + } + + /** + * @since 2.14.0 + */ + public static final class GrpcStorageDefaults extends StorageDefaults { + static final GrpcStorageDefaults INSTANCE = new GrpcStorageOptions.GrpcStorageDefaults(); + static final StorageFactory STORAGE_FACTORY = new GrpcStorageFactory(); + static final StorageRpcFactory STORAGE_RPC_FACTORY = new GrpcStorageRpcFactory(); + static final GrpcInterceptorProvider INTERCEPTOR_PROVIDER = + NoopGrpcInterceptorProvider.INSTANCE; + + private GrpcStorageDefaults() {} + + /** + * @since 2.14.0 + */ + @Override + public StorageFactory getDefaultServiceFactory() { + return STORAGE_FACTORY; + } + + /** + * @since 2.14.0 + */ + @Override + public StorageRpcFactory getDefaultRpcFactory() { + return STORAGE_RPC_FACTORY; + } + + /** + * @since 2.14.0 + */ + @Override + public GrpcTransportOptions getDefaultTransportOptions() { + return GrpcTransportOptions.newBuilder().build(); + } + + /** + * @since 2.14.0 + */ + public StorageRetryStrategy getStorageRetryStrategy() { + return StorageRetryStrategy.getDefaultStorageRetryStrategy(); + } + + /** This method is obsolete. Use {@link #getTerminationAwaitDurationJavaTime()} instead. */ + @ObsoleteApi("Use getTerminationAwaitDurationJavaTime() instead") + public org.threeten.bp.Duration getTerminationAwaitDuration() { + return toThreetenDuration(getTerminationAwaitDurationJavaTime()); + } + + /** + * @since 2.14.0 + */ + public java.time.Duration getTerminationAwaitDurationJavaTime() { + return java.time.Duration.ofMinutes(1); + } + + /** + * @since 2.14.0 + */ + public boolean isAttemptDirectPath() { + return true; + } + + /** + * @since 2.41.0 + */ + public boolean isEnableGrpcClientMetrics() { + return true; + } + + /** + * @since 2.22.3 + */ + public GrpcInterceptorProvider grpcInterceptorProvider() { + return INTERCEPTOR_PROVIDER; + } + + /** + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + */ + public BlobWriteSessionConfig getDefaultStorageWriterConfig() { + return BlobWriteSessionConfigs.getDefault(); + } + + /** + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public OpenTelemetry getDefaultOpenTelemetry() { + return OpenTelemetry.noop(); + } + } + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable} + * compatibility in {@link com.google.cloud.ServiceOptions}. + * + *

To access an instance of this class instead use {@link + * GrpcStorageOptions.GrpcStorageDefaults#getDefaultServiceFactory() + * GrpcStorageOptions.defaults().getDefaultServiceFactory()}. + * + * @see GrpcStorageOptions#defaults() + * @see GrpcStorageOptions.GrpcStorageDefaults#getDefaultServiceFactory() + * @since 2.14.0 + */ + @InternalApi + public static class GrpcStorageFactory implements StorageFactory { + private static final Logger LOGGER = Logger.getLogger(GrpcStorageFactory.class.getName()); + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable} + * compatibility in {@link com.google.cloud.ServiceOptions}. + * + *

To access an instance of this class instead use {@link + * GrpcStorageOptions.GrpcStorageDefaults#getDefaultServiceFactory() + * GrpcStorageOptions.defaults().getDefaultServiceFactory()}. + * + * @see GrpcStorageOptions#defaults() + * @see GrpcStorageOptions.GrpcStorageDefaults#getDefaultServiceFactory() + * @deprecated instead use {@link + * GrpcStorageOptions.GrpcStorageDefaults#getDefaultServiceFactory() + * GrpcStorageOptions.defaults().getDefaultServiceFactory()} + * @since 2.14.0 + */ + // this class needs to be public due to ServiceOptions forName'ing it in it's readObject method + @InternalApi + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") + public GrpcStorageFactory() {} + + @Override + public Storage create(StorageOptions options) { + if (options instanceof GrpcStorageOptions) { + GrpcStorageOptions grpcStorageOptions = (GrpcStorageOptions) options; + try { + Tuple> t = grpcStorageOptions.resolveSettingsAndOpts(); + StorageSettings storageSettings = t.x(); + Opts defaultOpts = t.y(); + + ScheduledExecutorService executor = + storageSettings.getBackgroundExecutorProvider().getExecutor(); + RetryContextProvider retryContextProvider = + RetryContext.providerFrom( + executor, + grpcStorageOptions, + new ReadObjectRangeResultRetryAlgorithmDecorator( + grpcStorageOptions.getRetryAlgorithmManager().idempotent())); + + OpenTelemetry otel = options.getOpenTelemetry(); + DefaultRetrier retrier = + new DefaultRetrier( + OtelStorageDecorator.retryContextDecorator(otel), grpcStorageOptions); + if (ZeroCopyReadinessChecker.isReady()) { + LOGGER.config("zero-copy protobuf deserialization available, using it"); + StorageStubSettings baseSettings = + (StorageStubSettings) storageSettings.getStubSettings(); + InternalStorageStubSettings.Builder internalStorageStubSettingsBuilder = + new InternalStorageStubSettings.Builder(baseSettings); + InternalStorageSettings.Builder settingsBuilder = + new InternalStorageSettings.Builder(internalStorageStubSettingsBuilder); + InternalStorageSettings internalStorageSettingsBuilder = + new InternalStorageSettings(settingsBuilder); + InternalStorageClient client = + new InternalStorageClient(internalStorageSettingsBuilder); + InternalZeroCopyGrpcStorageStub stub = client.getStub(); + StorageDataClient dataClient = + StorageDataClient.create( + executor, + grpcStorageOptions.terminationAwaitDuration, + new ZeroCopyBidiStreamingCallable<>( + stub.bidiReadObjectCallable(), stub.bidiReadObjectResponseMarshaller), + retryContextProvider, + IOAutoCloseable.noOp()); + GrpcStorageImpl grpcStorage = + new GrpcStorageImpl( + grpcStorageOptions, + client, + dataClient, + stub.readObjectResponseMarshaller, + grpcStorageOptions.blobWriteSessionConfig.createFactory(Clock.systemUTC()), + retrier, + defaultOpts); + return OtelStorageDecorator.decorate(grpcStorage, otel, Transport.GRPC); + } else { + LOGGER.config( + "zero-copy protobuf deserialization unavailable, proceeding with default"); + StorageClient client = StorageClient.create(storageSettings); + StorageDataClient dataClient = + StorageDataClient.create( + executor, + grpcStorageOptions.terminationAwaitDuration, + new ZeroCopyBidiStreamingCallable<>( + client.bidiReadObjectCallable(), + ResponseContentLifecycleManager.noopBidiReadObjectResponse()), + retryContextProvider, + IOAutoCloseable.noOp()); + GrpcStorageImpl grpcStorage = + new GrpcStorageImpl( + grpcStorageOptions, + client, + dataClient, + ResponseContentLifecycleManager.noop(), + grpcStorageOptions.blobWriteSessionConfig.createFactory(Clock.systemUTC()), + retrier, + defaultOpts); + return OtelStorageDecorator.decorate(grpcStorage, otel, Transport.GRPC); + } + } catch (IOException e) { + throw new IllegalStateException( + "Unable to instantiate gRPC com.google.cloud.storage.Storage client.", e); + } + } else { + throw new IllegalArgumentException("Only GrpcStorageOptions supported"); + } + } + } + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable} + * compatibility in {@link com.google.cloud.ServiceOptions}. + * + *

To access an instance of this class instead use {@link + * GrpcStorageOptions.GrpcStorageDefaults#getDefaultRpcFactory() + * GrpcStorageOptions.defaults().getDefaultRpcFactory()}. + * + * @see GrpcStorageOptions#defaults() + * @see GrpcStorageOptions.GrpcStorageDefaults#getDefaultRpcFactory() + * @since 2.14.0 + */ + @InternalApi + @Deprecated + public static class GrpcStorageRpcFactory implements StorageRpcFactory { + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable} + * compatibility in {@link com.google.cloud.ServiceOptions}. + * + *

To access an instance of this class instead use {@link + * GrpcStorageOptions.GrpcStorageDefaults#getDefaultRpcFactory() + * GrpcStorageOptions.defaults().getDefaultRpcFactory()}. + * + * @see GrpcStorageOptions#defaults() + * @see GrpcStorageOptions.GrpcStorageDefaults#getDefaultRpcFactory() + * @deprecated instead use {@link GrpcStorageOptions.GrpcStorageDefaults#getDefaultRpcFactory() + * GrpcStorageOptions.defaults().getDefaultRpcFactory()} + * @since 2.14.0 + */ + // this class needs to be public due to ServiceOptions forName'ing it in it's readObject method + @InternalApi + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") + public GrpcStorageRpcFactory() {} + + @Override + public ServiceRpc create(StorageOptions options) { + throw new IllegalStateException("No supported for grpc"); + } + } + + // setInternalHeaderProvider is protected so we need to open its scope in order to set it + // we are adding an entry for gccl which is set via this provider + private static final class GapicStorageSettingsBuilder extends StorageSettings.Builder { + private GapicStorageSettingsBuilder(StorageSettings settings) { + super(settings); + } + + @Override + protected StorageSettings.Builder setInternalHeaderProvider( + HeaderProvider internalHeaderProvider) { + return super.setInternalHeaderProvider(internalHeaderProvider); + } + } + + private static final class NoopGrpcInterceptorProvider + implements GrpcInterceptorProvider, Serializable { + private static long serialVersionUID = -8523033236999805349L; + private static final NoopGrpcInterceptorProvider INSTANCE = new NoopGrpcInterceptorProvider(); + + @Override + public List getInterceptors() { + return ImmutableList.of(); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + private static final class InternalStorageClient extends StorageClient { + + private InternalStorageClient(StorageSettings settings) throws IOException { + super(settings); + } + + @Override + public void shutdownNow() { + try { + // GrpcStorageStub#close() is final and we can't override it + // instead hook in here to close out the zero-copy marshaller + //noinspection EmptyTryBlock + try (ZeroCopyResponseMarshaller ignore1 = + getStub().readObjectResponseMarshaller; + ZeroCopyResponseMarshaller ignore2 = + getStub().bidiReadObjectResponseMarshaller) { + // use try-with to do the close dance for us + } + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + super.shutdownNow(); + } + } + + @Override + public InternalZeroCopyGrpcStorageStub getStub() { + return (InternalZeroCopyGrpcStorageStub) super.getStub(); + } + } + + private static final class InternalStorageSettings extends StorageSettings { + + private InternalStorageSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + private static final class Builder extends StorageSettings.Builder { + private Builder(StorageStubSettings.Builder stubSettings) { + super(stubSettings); + } + + @Override + public InternalStorageSettings build() throws IOException { + return new InternalStorageSettings(this); + } + } + } + + private static final class InternalStorageStubSettings extends StorageStubSettings { + + private InternalStorageStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + @Override + public StorageStub createStub() throws IOException { + if (!getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + ClientContext clientContext = ClientContext.create(this); + GrpcStorageCallableFactory grpcStorageCallableFactory = new GrpcStorageCallableFactory(); + InternalZeroCopyGrpcStorageStub stub = + new InternalZeroCopyGrpcStorageStub(this, clientContext, grpcStorageCallableFactory); + return stub; + } + + private static final class Builder extends StorageStubSettings.Builder { + + private Builder(StorageStubSettings settings) { + super(settings); + } + + @Override + public InternalStorageStubSettings build() throws IOException { + return new InternalStorageStubSettings(this); + } + } + } + + // DanglingJavadocs are for breadcrumbs to source of copied generated code + @SuppressWarnings("DanglingJavadoc") + private static final class InternalZeroCopyGrpcStorageStub extends GrpcStorageStub + implements AutoCloseable { + + private static final RequestParamsExtractor + EMPTY_REQUEST_PARAMS_EXTRACTOR = request -> ImmutableMap.of(); + + /** + * @see GrpcStorageStub#READ_OBJECT_0_PATH_TEMPLATE + */ + private static final PathTemplate READ_OBJECT_0_PATH_TEMPLATE = + PathTemplate.create("{bucket=**}"); + + private final ZeroCopyResponseMarshaller readObjectResponseMarshaller; + private final ZeroCopyResponseMarshaller + bidiReadObjectResponseMarshaller; + + private final ServerStreamingCallable readObjectCallable; + private final BidiStreamingCallable + bidiReadObjectCallable; + + private InternalZeroCopyGrpcStorageStub( + StorageStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + super(settings, clientContext, callableFactory); + + this.readObjectResponseMarshaller = + new ZeroCopyResponseMarshaller<>(ReadObjectResponse.getDefaultInstance()); + + this.bidiReadObjectResponseMarshaller = + new ZeroCopyResponseMarshaller<>(BidiReadObjectResponse.getDefaultInstance()); + + /** + * @see GrpcStorageStub#readObjectMethodDescriptor + */ + MethodDescriptor readObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.storage.v2.Storage/ReadObject") + .setRequestMarshaller(ProtoUtils.marshaller(ReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller(readObjectResponseMarshaller) + .build(); + /** + * @see GrpcStorageStub#bidiReadObjectMethodDescriptor + */ + MethodDescriptor + bidiReadObjectMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.storage.v2.Storage/BidiReadObject") + .setRequestMarshaller( + ProtoUtils.marshaller(BidiReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller(bidiReadObjectResponseMarshaller) + .build(); + + GrpcCallSettings readObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readObjectMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + // todo: this is fragile to proto annotation changes + builder.add(request.getBucket(), "bucket", READ_OBJECT_0_PATH_TEMPLATE); + return builder.build(); + }) + .build(); + + GrpcCallSettings + bidiReadObjectTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(bidiReadObjectMethodDescriptor) + .setParamsExtractor(EMPTY_REQUEST_PARAMS_EXTRACTOR) + .build(); + + this.readObjectCallable = + callableFactory.createServerStreamingCallable( + readObjectTransportSettings, settings.readObjectSettings(), clientContext); + this.bidiReadObjectCallable = + callableFactory.createBidiStreamingCallable( + bidiReadObjectTransportSettings, settings.bidiReadObjectSettings(), clientContext); + } + + @Override + public ServerStreamingCallable readObjectCallable() { + return readObjectCallable; + } + + @Override + public BidiStreamingCallable + bidiReadObjectCallable() { + return bidiReadObjectCallable; + } + } + + @VisibleForTesting + static class ZeroCopyResponseMarshaller + implements MethodDescriptor.PrototypeMarshaller, + ResponseContentLifecycleManager, + Closeable { + private final Map unclosedStreams; + private final Parser parser; + private final MethodDescriptor.PrototypeMarshaller baseMarshaller; + + ZeroCopyResponseMarshaller(Response defaultInstance) { + parser = (Parser) defaultInstance.getParserForType(); + baseMarshaller = + (MethodDescriptor.PrototypeMarshaller) ProtoUtils.marshaller(defaultInstance); + unclosedStreams = Collections.synchronizedMap(new IdentityHashMap<>()); + } + + @Override + public Class getMessageClass() { + return baseMarshaller.getMessageClass(); + } + + @Override + public Response getMessagePrototype() { + return baseMarshaller.getMessagePrototype(); + } + + @Override + public InputStream stream(Response value) { + return baseMarshaller.stream(value); + } + + @Override + public Response parse(InputStream stream) { + CodedInputStream cis = null; + try { + if (stream instanceof KnownLength + && stream instanceof Detachable + && stream instanceof HasByteBuffer + && ((HasByteBuffer) stream).byteBufferSupported()) { + int size = stream.available(); + // Stream is now detached here and should be closed later. + stream = ((Detachable) stream).detach(); + // This mark call is to keep buffer while traversing buffers using skip. + stream.mark(size); + List byteStrings = new ArrayList<>(); + while (stream.available() != 0) { + ByteBuffer buffer = ((HasByteBuffer) stream).getByteBuffer(); + byteStrings.add(UnsafeByteOperations.unsafeWrap(buffer)); + stream.skip(buffer.remaining()); + } + stream.reset(); + cis = ByteString.copyFrom(byteStrings).newCodedInput(); + cis.enableAliasing(true); + cis.setSizeLimit(Integer.MAX_VALUE); + } + } catch (IOException e) { + throw createStatusRuntimeException(e); + } + if (cis != null) { + // fast path (no memory copy) + Response message; + try { + message = parseFrom(cis); + } catch (InvalidProtocolBufferException ipbe) { + throw createStatusRuntimeException(ipbe); + } + unclosedStreams.put(message, stream); + return message; + } else { + // slow path + return baseMarshaller.parse(stream); + } + } + + private StatusRuntimeException createStatusRuntimeException(IOException e) { + String description = ""; + Response messagePrototype = baseMarshaller.getMessagePrototype(); + if (messagePrototype != null) { + description = "for " + messagePrototype.getClass().getSimpleName(); + } + return Status.INTERNAL + .withDescription("Error parsing input stream" + description) + .withCause(e) + .asRuntimeException(); + } + + private Response parseFrom(CodedInputStream stream) throws InvalidProtocolBufferException { + Response message = parser.parseFrom(stream); + try { + stream.checkLastTagWas(0); + return message; + } catch (InvalidProtocolBufferException e) { + e.setUnfinishedMessage(message); + throw e; + } + } + + @Override + public ResponseContentLifecycleHandle get(Response response) { + return ResponseContentLifecycleHandle.create( + response, + () -> { + InputStream stream = unclosedStreams.remove(response); + if (stream != null) { + stream.close(); + } + }); + } + + @Override + public void close() throws IOException { + GrpcUtils.closeAll(unclosedStreams.values()); + } + } + + static final class ZeroCopyReadinessChecker { + private static final boolean isZeroCopyReady; + + static { + // Check whether io.grpc.Detachable exists? + boolean detachableClassExists = false; + try { + // Try to load Detachable interface in the package where KnownLength is in. + // This can be done directly by looking up io.grpc.Detachable but rather + // done indirectly to handle the case where gRPC is being shaded in a + // different package. + String knownLengthClassName = KnownLength.class.getName(); + String detachableClassName = + knownLengthClassName.substring(0, knownLengthClassName.lastIndexOf('.') + 1) + + "Detachable"; + Class detachableClass = Class.forName(detachableClassName); + detachableClassExists = (detachableClass != null); + } catch (ClassNotFoundException ex) { + // leaves detachableClassExists false + } + // Check whether com.google.protobuf.UnsafeByteOperations exists? + boolean unsafeByteOperationsClassExists = false; + try { + // Same above + String messageLiteClassName = MessageLite.class.getName(); + String unsafeByteOperationsClassName = + messageLiteClassName.substring(0, messageLiteClassName.lastIndexOf('.') + 1) + + "UnsafeByteOperations"; + Class unsafeByteOperationsClass = Class.forName(unsafeByteOperationsClassName); + unsafeByteOperationsClassExists = (unsafeByteOperationsClass != null); + } catch (ClassNotFoundException ex) { + // leaves unsafeByteOperationsClassExists false + } + isZeroCopyReady = detachableClassExists && unsafeByteOperationsClassExists; + } + + public static boolean isReady() { + return isZeroCopyReady; + } + } + + private static class ReadObjectRangeResultRetryAlgorithmDecorator + extends BasicResultRetryAlgorithm { + + private final ResultRetryAlgorithm delegate; + + private ReadObjectRangeResultRetryAlgorithmDecorator(ResultRetryAlgorithm delegate) { + this.delegate = delegate; + } + + @Override + public boolean shouldRetry(Throwable t, Object previousResponse) { + // this is only retryable with read object range, not other requests + return t instanceof UncheckedChecksumMismatchException + || (t instanceof OutOfRangeException && ((OutOfRangeException) t).isRetryable()) + || (t instanceof AbortedException && ((AbortedException) t).isRetryable()) + || delegate.shouldRetry(StorageException.coalesce(t), null); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslation.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslation.java new file mode 100644 index 000000000000..feda6e70f9f9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslation.java @@ -0,0 +1,156 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status.Code; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * A set of compatibility tools to map between grpc and HTTP status codes while trying to keep our + * retry behavior in consideration. + */ +final class GrpcToHttpStatusCodeTranslation { + + /** + * Mappings between gRPC status codes and their corresponding HTTP status codes, which match our + * existing definitions and retry bucketing. + * + *

+ * + *

{@code UNAVAILABLE} covers connection reset + * + *

+   *   io.grpc.netty.shaded.io.netty.channel.unix.Errors$NativeIoException: readAddress(..) failed: Connection reset by peer
+   * 
+ */ + private static final ImmutableList STATUS_CODE_MAPPINGS = + ImmutableList.of( + StatusCodeMapping.of(200, Code.OK), + StatusCodeMapping.of(400, Code.DATA_LOSS), + StatusCodeMapping.of(400, Code.INVALID_ARGUMENT), + StatusCodeMapping.of(400, Code.OUT_OF_RANGE), + StatusCodeMapping.of(401, Code.UNAUTHENTICATED), + StatusCodeMapping.of(403, Code.PERMISSION_DENIED), + StatusCodeMapping.of(404, Code.NOT_FOUND), + StatusCodeMapping.of(409, Code.ALREADY_EXISTS), + StatusCodeMapping.of(412, Code.FAILED_PRECONDITION), + StatusCodeMapping.of(429, Code.RESOURCE_EXHAUSTED), + StatusCodeMapping.of(500, Code.INTERNAL), + StatusCodeMapping.of(501, Code.UNIMPLEMENTED), + StatusCodeMapping.of(503, Code.UNAVAILABLE), + StatusCodeMapping.of(504, Code.DEADLINE_EXCEEDED), + StatusCodeMapping.of(409, Code.ABORTED), + StatusCodeMapping.of(0, Code.CANCELLED), + StatusCodeMapping.of(0, Code.UNKNOWN)); + + /** Index our {@link StatusCodeMapping} for constant time lookup by {@link Code} */ + private static final Map GRPC_CODE_INDEX = + STATUS_CODE_MAPPINGS.stream() + .collect( + ImmutableMap.toImmutableMap(StatusCodeMapping::getGrpcCode, Function.identity())); + + /** + * For use in {@link #resultRetryAlgorithmToCodes(ResultRetryAlgorithm)}. Resolve all codes and + * construct corresponding ApiExceptions. + * + *

Constructing the exceptions will walk the stack for each one. In order to avoid the stack + * walking overhead for every Code for every invocation, construct the set of exceptions only once + * and keep in this value. + */ + private static final Set CODE_API_EXCEPTIONS = + STATUS_CODE_MAPPINGS.stream() + .map(StatusCodeMapping::getGrpcStatusCode) + .map(c -> ApiExceptionFactory.createException(null, c, false)) + .map(StorageException::asStorageException) + .collect(Collectors.toSet()); + + /** + * When translating from gRPC Status Codes to the HTTP codes all of our middle ware expects, we + * must take care to translate in accordance with the expected retry semantics already outlined + * and validated for the JSON implementation. This is why we do not simply use {@link + * GrpcStatusCode#of(Code)}{@link GrpcStatusCode#getCode() .getCode}{@link + * StatusCode.Code#getHttpStatusCode() .getHttpStatusCode()} as it sometimes returns conflicting + * HTTP codes for our retry handling. + */ + static int grpcCodeToHttpStatusCode(Code code) { + StatusCodeMapping found = GRPC_CODE_INDEX.get(code); + // theoretically it's possible for gRPC to add a new code we haven't mapped here, if this + // happens fall through to our default of 0 + if (found != null) { + return found.getHttpStatus(); + } else { + return 0; + } + } + + /** + * When using the retry features of the Gapic client, we are only allowed to provide a {@link + * Set}{@code <}{@link StatusCode.Code}{@code >}. Given {@link StatusCode.Code} is an enum, we can + * resolve the set of values from a given {@link ResultRetryAlgorithm} by evaluating each one as + * an {@link ApiException}. + */ + static Set resultRetryAlgorithmToCodes(ResultRetryAlgorithm alg) { + return CODE_API_EXCEPTIONS.stream() + .filter(e -> alg.shouldRetry(e, null)) + .map(e -> e.apiExceptionCause.getStatusCode().getCode()) + .collect(Collectors.toSet()); + } + + /** + * Simple tuple class to bind together our corresponding http status code and {@link Code} while + * providing easy access to the correct {@link GrpcStatusCode} where necessary. + */ + private static final class StatusCodeMapping { + + private final int httpStatus; + + private final Code grpcCode; + private final GrpcStatusCode grpcStatusCode; + + private StatusCodeMapping(int httpStatus, Code grpcCode, GrpcStatusCode grpcStatusCode) { + this.httpStatus = httpStatus; + this.grpcCode = grpcCode; + this.grpcStatusCode = grpcStatusCode; + } + + public int getHttpStatus() { + return httpStatus; + } + + public Code getGrpcCode() { + return grpcCode; + } + + public GrpcStatusCode getGrpcStatusCode() { + return grpcStatusCode; + } + + static StatusCodeMapping of(int httpStatus, Code grpcCode) { + return new StatusCodeMapping(httpStatus, grpcCode, GrpcStatusCode.of(grpcCode)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcUtils.java new file mode 100644 index 000000000000..cbf5d3172a24 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcUtils.java @@ -0,0 +1,352 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStream; +import com.google.api.gax.rpc.BidiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ClientStreamReadyObserver; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StateCheckingResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.Message; +import com.google.rpc.Status; +import com.google.storage.v2.BidiReadObjectError; +import com.google.storage.v2.BidiReadObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import io.grpc.StatusRuntimeException; +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class GrpcUtils { + + private GrpcUtils() {} + + static GrpcCallContext contextWithBucketName(String bucketName, GrpcCallContext baseContext) { + if (bucketName != null && !bucketName.isEmpty()) { + return baseContext.withExtraHeaders( + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of(String.format(Locale.US, "bucket=%s", bucketName)))); + } + return baseContext; + } + + /** + * In the event closing the streams results in multiple streams throwing IOExceptions, collect + * them all as suppressed exceptions on the first occurrence. + */ + static void closeAll(Collection closeables) throws IOException { + if (closeables.isEmpty()) { + return; + } + IOException ioException = + closeables.stream() + .filter(Objects::nonNull) + .map( + stream -> { + try { + stream.close(); + return null; + } catch (IOException e) { + return e; + } + }) + .filter(Objects::nonNull) + .reduce( + null, + (l, r) -> { + if (l != null) { + l.addSuppressed(r); + return l; + } else { + return r; + } + }, + (l, r) -> l); + + if (ioException != null) { + throw ioException; + } + } + + /** + * Returns the first occurrence of a {@link BidiReadObjectRedirectedError} if the throwable is an + * {@link ApiException} with {@link ErrorDetails} that contain an entry that is a {@link + * BidiReadObjectRedirectedError} (evaluated from index 0 to length). {@code null} otherwise. + */ + @Nullable + static BidiReadObjectRedirectedError getBidiReadObjectRedirectedError(Throwable t) { + return findFirstPackedAny(t, BidiReadObjectRedirectedError.class); + } + + /** + * Returns the first occurrence of a {@link BidiWriteObjectRedirectedError} if the throwable is an + * {@link ApiException} with {@link ErrorDetails} that contain an entry that is a {@link + * BidiWriteObjectRedirectedError} (evaluated from index 0 to length). {@code null} otherwise. + */ + @Nullable + static BidiWriteObjectRedirectedError getBidiWriteObjectRedirectedError(Throwable t) { + return findFirstPackedAny(t, BidiWriteObjectRedirectedError.class); + } + + /** + * Returns the first occurrence of a {@link BidiReadObjectError} if the throwable is an {@link + * ApiException} with {@link ErrorDetails} that contain an entry that is a {@link + * BidiReadObjectError} (evaluated from index 0 to length). {@code null} otherwise. + */ + @Nullable + static BidiReadObjectError getBidiReadObjectError(Throwable t) { + return findFirstPackedAny(t, BidiReadObjectError.class); + } + + static ApiException statusToApiException(Status status) { + io.grpc.Status grpcStatus = io.grpc.Status.fromCodeValue(status.getCode()); + if (!status.getMessage().isEmpty()) { + grpcStatus = grpcStatus.withDescription(status.getMessage()); + } + StatusRuntimeException cause = grpcStatus.asRuntimeException(); + return ApiExceptionFactory.createException( + cause, GrpcStatusCode.of(grpcStatus.getCode()), false); + } + + @Nullable + private static M findFirstPackedAny(Throwable t, Class clazz) { + if (t instanceof ApiException) { + ApiException apiException = (ApiException) t; + ErrorDetails errorDetails = apiException.getErrorDetails(); + if (errorDetails != null) { + return errorDetails.getMessage(clazz); + } + } + return null; + } + + static StateCheckingResponseObserver decorateAsStateChecking( + ResponseObserver delegate) { + return new DecoratingStateCheckingResponseObserver<>(delegate); + } + + private static final class DecoratingStateCheckingResponseObserver + extends StateCheckingResponseObserver { + private final ResponseObserver delegate; + + private DecoratingStateCheckingResponseObserver(ResponseObserver delegate) { + this.delegate = delegate; + } + + @Override + protected void onStartImpl(StreamController controller) { + delegate.onStart(controller); + } + + @Override + protected void onResponseImpl(Response response) { + delegate.onResponse(response); + } + + @Override + protected void onErrorImpl(Throwable t) { + delegate.onError(t); + } + + @Override + protected void onCompleteImpl() { + delegate.onComplete(); + } + } + + @SuppressWarnings("deprecation") + static final class ZeroCopyBidiStreamingCallable + extends BidiStreamingCallable { + private final BidiStreamingCallable delegate; + private final ResponseContentLifecycleManager responseContentLifecycleManager; + + ZeroCopyBidiStreamingCallable( + BidiStreamingCallable delegate, + ResponseContentLifecycleManager responseContentLifecycleManager) { + this.delegate = delegate; + this.responseContentLifecycleManager = responseContentLifecycleManager; + } + + @Override + public ClientStream internalCall( + ResponseObserver responseObserver, + ClientStreamReadyObserver onReady, + ApiCallContext context) { + return delegate.internalCall(responseObserver, onReady, context); + } + + @Override + public void call(BidiStreamObserver bidiObserver) { + delegate.call(bidiObserver); + } + + @Override + public void call(BidiStreamObserver bidiObserver, ApiCallContext context) { + delegate.call(bidiObserver, context); + } + + @Override + public BidiStream call() { + return delegate.call(); + } + + @Override + public BidiStream call(ApiCallContext context) { + return delegate.call(context); + } + + @Override + public ClientStream splitCall(ResponseObserver responseObserver) { + return delegate.splitCall(responseObserver); + } + + @Override + public ClientStream splitCall( + ResponseObserver responseObserver, ApiCallContext context) { + return delegate.splitCall(responseObserver, context); + } + + @Override + @Deprecated + public ApiStreamObserver bidiStreamingCall( + ApiStreamObserver responseObserver, ApiCallContext context) { + return delegate.bidiStreamingCall(responseObserver, context); + } + + @Override + @Deprecated + public ApiStreamObserver bidiStreamingCall( + ApiStreamObserver responseObserver) { + return delegate.bidiStreamingCall(responseObserver); + } + + @Override + public ZeroCopyBidiStreamingCallable withDefaultCallContext( + ApiCallContext defaultCallContext) { + return new ZeroCopyBidiStreamingCallable<>( + delegate.withDefaultCallContext(defaultCallContext), responseContentLifecycleManager); + } + + ResponseContentLifecycleManager getResponseContentLifecycleManager() { + return responseContentLifecycleManager; + } + } + + @SuppressWarnings("deprecation") + static final class ZeroCopyServerStreamingCallable + extends ServerStreamingCallable { + private final ServerStreamingCallable delegate; + private final ResponseContentLifecycleManager responseContentLifecycleManager; + + ZeroCopyServerStreamingCallable( + ServerStreamingCallable delegate, + ResponseContentLifecycleManager responseContentLifecycleManager) { + this.delegate = delegate; + this.responseContentLifecycleManager = responseContentLifecycleManager; + } + + @Override + public ServerStream call(RequestT request) { + return delegate.call(request); + } + + @Override + public ServerStream call(RequestT request, ApiCallContext context) { + return delegate.call(request, context); + } + + @Override + public void call( + RequestT request, ResponseObserver responseObserver, ApiCallContext context) { + delegate.call(request, responseObserver, context); + } + + @Override + public void call(RequestT request, ResponseObserver responseObserver) { + delegate.call(request, responseObserver); + } + + @Override + public UnaryCallable first() { + return delegate.first(); + } + + @Override + public UnaryCallable> all() { + return delegate.all(); + } + + @Override + @Deprecated + public void serverStreamingCall( + RequestT request, ApiStreamObserver responseObserver, ApiCallContext context) { + delegate.serverStreamingCall(request, responseObserver, context); + } + + @Override + @Deprecated + public void serverStreamingCall( + RequestT request, ApiStreamObserver responseObserver) { + delegate.serverStreamingCall(request, responseObserver); + } + + @Override + @Deprecated + public Iterator blockingServerStreamingCall( + RequestT request, ApiCallContext context) { + return delegate.blockingServerStreamingCall(request, context); + } + + @Override + @Deprecated + public Iterator blockingServerStreamingCall(RequestT request) { + return delegate.blockingServerStreamingCall(request); + } + + @Override + public ZeroCopyServerStreamingCallable withDefaultCallContext( + ApiCallContext defaultCallContext) { + return new ZeroCopyServerStreamingCallable<>( + delegate.withDefaultCallContext(defaultCallContext), responseContentLifecycleManager); + } + + ResponseContentLifecycleManager getResponseContentLifecycleManager() { + return responseContentLifecycleManager; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java new file mode 100644 index 000000000000..0215821a57c4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java @@ -0,0 +1,176 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.concurrent.ExecutionException; +import java.util.zip.GZIPInputStream; + +final class GzipReadableByteChannel implements UnbufferedReadableByteChannel { + private final UnbufferedReadableByteChannel source; + private final ApiFuture contentEncoding; + + private boolean retEOF = false; + private ScatteringByteChannel delegate; + private ByteBuffer leftovers; + + GzipReadableByteChannel(UnbufferedReadableByteChannel source, ApiFuture contentEncoding) { + this.source = source; + this.contentEncoding = contentEncoding; + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (retEOF) { + return -1; + } + long bytesRead = 0; + // if our delegate is null, that means this is the first read attempt + if (delegate == null) { + // try to determine if the underlying data coming out of `source` is gzip + byte[] firstByte = new byte[1]; + ByteBuffer wrap = ByteBuffer.wrap(firstByte); + // Step 1: initiate a read of the first byte of the object + // this will have minimal overhead as the messages coming from gcs are inherently windowed + // if the object size is between 2 and 2MiB the remaining bytes will be held in the channel + // for later read. + source.read(wrap); + try { + // Step 2: wait for the object metadata, this is populated in the first message from GCS + String contentEncoding = this.contentEncoding.get(); + // if the Content-Encoding is gzip, Step 3: wire gzip decompression into the byte path + // this will have a copy impact as we are no longer controlling all the buffers + if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) { + // to wire gzip decompression into the byte path: + // Create an input stream of the firstByte bytes we already read + ByteArrayInputStream firstByteAgain = new ByteArrayInputStream(firstByte); + // Create an InputStream facade of source + InputStream sourceInputStream = Channels.newInputStream(source); + // create a new InputStream with the firstByte bytes prepended to source + SequenceInputStream first4AndSource = + new SequenceInputStream(firstByteAgain, sourceInputStream); + // add gzip decompression + GZIPInputStream decompress = + new GZIPInputStream(new OptimisticAvailabilityInputStream(first4AndSource)); + // create a channel from our GZIPInputStream + ReadableByteChannel decompressedChannel = Channels.newChannel(decompress); + // turn our ReadableByteChannel into a ScatteringByteChannel + delegate = StorageByteChannels.readable().asScatteringByteChannel(decompressedChannel); + } else { + // if content encoding isn't gzip, copy the bytes we read into the dsts and set delegate + // to source + wrap.flip(); + bytesRead += Buffers.copy(wrap, dsts, offset, length); + if (wrap.hasRemaining()) { + leftovers = wrap; + } + delegate = source; + } + } catch (InterruptedException | ExecutionException e) { + throw new IOException(e); + } + } else if (leftovers != null && leftovers.hasRemaining()) { + bytesRead += Buffers.copy(leftovers, dsts, offset, length); + if (!leftovers.hasRemaining()) { + leftovers = null; + } + } + + // Because we're pre-reading a byte of the object in order to determine if we need to + // plumb in gzip decompress, there is the possibility we will reach EOF while probing. + // In order to maintain correctness of EOF propagation, determine if we will need to signal EOF + // upon the next read. + long read = delegate.read(dsts, offset, length); + if (read == -1 && bytesRead == 0) { + return -1; + } else if (read == -1) { + retEOF = true; + } else { + bytesRead += read; + } + + return bytesRead; + } + + @Override + public boolean isOpen() { + return !retEOF && source.isOpen(); + } + + @Override + public void close() throws IOException { + // leverage try-with-resource to handle the dance closing these two resources + try (AutoCloseable ignored1 = source; + AutoCloseable ignored2 = delegate) { + delegate = null; + } catch (Exception e) { + throw new IOException("Error while attempting to close channel.", e); + } + } + + /** + * There is an edge-case in the JDK's {@link GZIPInputStream} where it will prematurely terminate + * reading from the underlying InputStream. + * + *

This class decorates an InputStream to be optimistic about the number of available bytes + * when reading data to encourage GzipInputStream to consume the entire stream. + * + *

For a more in-depth write up see google-http-java-client/pull/1608 + * + *

NOTE This class is a copy of the private class from {@code + * com.google.api.client.http.GzipSupport}. This class should not be made public, as it is not + * general purpose and so is reproduced here. + */ + private static final class OptimisticAvailabilityInputStream extends FilterInputStream { + private int lastRead = 0; + + OptimisticAvailabilityInputStream(InputStream delegate) { + super(delegate); + } + + @Override + public int available() throws IOException { + return lastRead > -1 ? Integer.MAX_VALUE : 0; + } + + @Override + public int read() throws IOException { + return lastRead = super.read(); + } + + @Override + public int read(byte[] b) throws IOException { + return lastRead = super.read(b); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return lastRead = super.read(b, off, len); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Hasher.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Hasher.java new file mode 100644 index 000000000000..c1b506de2f7e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Hasher.java @@ -0,0 +1,260 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.DataLossException; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import io.grpc.Status.Code; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Locale; +import java.util.function.Supplier; +import java.util.logging.Logger; +import javax.annotation.ParametersAreNonnullByDefault; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +@SuppressWarnings("ClassEscapesDefinedScope") +@ParametersAreNonnullByDefault +interface Hasher { + final class DefaultInstanceHolder { + private static final Logger LOGGER = Logger.getLogger(Hasher.class.getName()); + private static final String PROPERTY_NAME = "com.google.cloud.storage.Hasher.default"; + private static final String PROPERTY_VALUE = System.getProperty(PROPERTY_NAME, "enabled"); + static final Hasher DEFAULT_HASHER; + + static { + LOGGER.fine(String.format(Locale.US, "-D%s=%s", PROPERTY_NAME, PROPERTY_VALUE)); + if ("disabled".equalsIgnoreCase(PROPERTY_VALUE)) { + DEFAULT_HASHER = noop(); + } else { + try { + Class.forName("java.util.zip.CRC32C"); + } catch (ClassNotFoundException e) { + LOGGER.fine("Fast CRC32C implementation (Java 9+) is not available."); + } + DEFAULT_HASHER = enabled(); + } + } + } + + @Nullable + default Crc32cLengthKnown hash(Supplier b) { + return hash(b.get()); + } + + @Nullable Crc32cLengthKnown hash(ByteBuffer b); + + @Nullable Crc32cLengthKnown hash(ByteString byteString); + + void validate(Crc32cValue expected, Supplier b) throws ChecksumMismatchException; + + void validate(Crc32cValue expected, ByteString byteString) throws ChecksumMismatchException; + + void validateUnchecked(Crc32cValue expected, ByteString byteString) + throws UncheckedChecksumMismatchException; + + @Nullable > C nullSafeConcat( + @Nullable C r1, @Nullable Crc32cLengthKnown r2); + + /** + * The initial value to use for this hasher. + * + *

Not ideal, really we should always start with {@link Crc32cValue#zero()} but this saves us + * from having to plumb the initial value along with the actual hasher to the constructor of the + * WriteCtx when hashing is disabled because of user provided crc32c/md5 preconditions. + */ + @Nullable Crc32cLengthKnown initialValue(); + + static NoOpHasher noop() { + return NoOpHasher.INSTANCE; + } + + static GuavaHasher enabled() { + return GuavaHasher.INSTANCE; + } + + static Hasher defaultHasher() { + return DefaultInstanceHolder.DEFAULT_HASHER; + } + + @Immutable + class NoOpHasher implements Hasher { + private static final NoOpHasher INSTANCE = new NoOpHasher(); + + private NoOpHasher() {} + + @Override + public Crc32cLengthKnown hash(ByteBuffer b) { + return null; + } + + @Override + public @Nullable Crc32cLengthKnown hash(ByteString byteString) { + return null; + } + + @Override + public void validate(Crc32cValue expected, Supplier b) {} + + @Override + public void validate(Crc32cValue expected, ByteString b) {} + + @Override + public void validateUnchecked(Crc32cValue expected, ByteString byteString) {} + + @Override + public > @Nullable C nullSafeConcat( + @Nullable C r1, @Nullable Crc32cLengthKnown r2) { + return null; + } + + @Override + public @Nullable Crc32cLengthKnown initialValue() { + return null; + } + } + + @Immutable + class GuavaHasher implements Hasher { + private static final GuavaHasher INSTANCE = new GuavaHasher(); + + private GuavaHasher() {} + + @Override + public @NonNull Crc32cLengthKnown hash(Supplier b) { + return hash(b.get()); + } + + @Override + public @NonNull Crc32cLengthKnown hash(ByteBuffer b) { + int remaining = b.remaining(); + return Crc32cValue.of(Hashing.crc32c().hashBytes(b).asInt(), remaining); + } + + @SuppressWarnings({"UnstableApiUsage"}) + @Override + public @NonNull Crc32cLengthKnown hash(ByteString byteString) { + List buffers = byteString.asReadOnlyByteBufferList(); + com.google.common.hash.Hasher crc32c = Hashing.crc32c().newHasher(); + for (ByteBuffer b : buffers) { + crc32c.putBytes(b); + } + return Crc32cValue.of(crc32c.hash().asInt(), byteString.size()); + } + + @Override + public void validate(Crc32cValue expected, ByteString byteString) + throws ChecksumMismatchException { + Crc32cLengthKnown actual = hash(byteString); + if (!actual.eqValue(expected)) { + throw new ChecksumMismatchException(expected, actual); + } + } + + @Override + public void validate(Crc32cValue expected, Supplier b) + throws ChecksumMismatchException { + @NonNull Crc32cLengthKnown actual = hash(b); + if (!actual.eqValue(expected)) { + throw new ChecksumMismatchException(expected, actual); + } + } + + @Override + public void validateUnchecked(Crc32cValue expected, ByteString byteString) + throws UncheckedChecksumMismatchException { + Crc32cLengthKnown actual = hash(byteString); + if (!actual.eqValue(expected)) { + throw new UncheckedChecksumMismatchException(expected, actual); + } + } + + @SuppressWarnings("unchecked") + @Override + public > @Nullable C nullSafeConcat( + @Nullable C r1, @Nullable Crc32cLengthKnown r2) { + if (r1 == null) { + return null; + } else if (r2 == null) { + return r1; + } else { + return (C) r1.concat(r2); + } + } + + @Override + public @NonNull Crc32cLengthKnown initialValue() { + return Crc32cValue.zero(); + } + } + + final class ChecksumMismatchException extends IOException { + private final Crc32cValue expected; + private final Crc32cLengthKnown actual; + + private ChecksumMismatchException(Crc32cValue expected, Crc32cLengthKnown actual) { + super( + String.format( + Locale.US, + "Mismatch checksum value. Expected %s actual %s", + expected.debugString(), + actual.debugString())); + this.expected = expected; + this.actual = actual; + } + + Crc32cValue getExpected() { + return expected; + } + + Crc32cValue getActual() { + return actual; + } + } + + final class UncheckedChecksumMismatchException extends DataLossException { + private static final GrpcStatusCode STATUS_CODE = GrpcStatusCode.of(Code.DATA_LOSS); + private final Crc32cValue expected; + private final Crc32cLengthKnown actual; + + private UncheckedChecksumMismatchException(Crc32cValue expected, Crc32cLengthKnown actual) { + super( + String.format( + "Mismatch checksum value. Expected %s actual %s", + expected.debugString(), actual.debugString()), + /* cause= */ null, + STATUS_CODE, + /* retryable= */ false); + this.expected = expected; + this.actual = actual; + } + + Crc32cValue getExpected() { + return expected; + } + + Crc32cLengthKnown getActual() { + return actual; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HmacKey.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HmacKey.java new file mode 100644 index 000000000000..fe5b27b1fe80 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HmacKey.java @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BackwardCompatibilityUtils.millisOffsetDateTimeCodec; + +import java.io.Serializable; +import java.time.OffsetDateTime; +import java.util.Objects; + +/** HMAC key for a service account. */ +public class HmacKey implements Serializable { + + private static final long serialVersionUID = 3033393659217005187L; + private final String secretKey; + private final HmacKeyMetadata metadata; + + private HmacKey(Builder builder) { + this.secretKey = builder.secretKey; + this.metadata = builder.metadata; + } + + public static Builder newBuilder(String secretKey) { + return new Builder(secretKey); + } + + /** Builder for {@code HmacKey} objects. * */ + public static class Builder { + private String secretKey; + private HmacKeyMetadata metadata; + + private Builder(String secretKey) { + this.secretKey = secretKey; + } + + public Builder setSecretKey(String secretKey) { + this.secretKey = secretKey; + return this; + } + + public Builder setMetadata(HmacKeyMetadata metadata) { + this.metadata = metadata; + return this; + } + + /** Creates an {@code HmacKey} object from this builder. * */ + public HmacKey build() { + return new HmacKey(this); + } + } + + /** Returns the secret key associated with this HMAC key. * */ + public String getSecretKey() { + return secretKey; + } + + /** Returns the metadata associated with this HMAC key. * */ + public HmacKeyMetadata getMetadata() { + return metadata; + } + + @Override + public int hashCode() { + return Objects.hash(secretKey, metadata); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof HmacKey)) { + return false; + } + HmacKey hmacKey = (HmacKey) o; + return Objects.equals(secretKey, hmacKey.secretKey) + && Objects.equals(metadata, hmacKey.metadata); + } + + public enum HmacKeyState { + ACTIVE("ACTIVE"), + INACTIVE("INACTIVE"), + DELETED("DELETED"); + + private final String state; + + HmacKeyState(String state) { + this.state = state; + } + } + + /** + * The metadata for a service account HMAC key. This class holds all data associated with an HMAC + * key other than the secret key. + */ + public static class HmacKeyMetadata implements Serializable { + + private static final long serialVersionUID = 9130344756739042314L; + private final String accessId; + private final String etag; + private final String id; + private final String projectId; + private final ServiceAccount serviceAccount; + private final HmacKeyState state; + private final OffsetDateTime createTime; + private final OffsetDateTime updateTime; + + private HmacKeyMetadata(Builder builder) { + this.accessId = builder.accessId; + this.etag = builder.etag; + this.id = builder.id; + this.projectId = builder.projectId; + this.serviceAccount = builder.serviceAccount; + this.state = builder.state; + this.createTime = builder.createTime; + this.updateTime = builder.updateTime; + } + + public static Builder newBuilder(ServiceAccount serviceAccount) { + return new Builder(serviceAccount); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static HmacKeyMetadata of( + ServiceAccount serviceAccount, String accessId, String projectId) { + return newBuilder(serviceAccount).setAccessId(accessId).setProjectId(projectId).build(); + } + + @Override + public int hashCode() { + return Objects.hash(accessId, projectId); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final HmacKeyMetadata other = (HmacKeyMetadata) obj; + return Objects.equals(this.accessId, other.accessId) + && Objects.equals(this.etag, other.etag) + && Objects.equals(this.id, other.id) + && Objects.equals(this.projectId, other.projectId) + && Objects.equals(this.serviceAccount, other.serviceAccount) + && Objects.equals(this.state, other.state) + && Objects.equals(this.createTime, other.createTime) + && Objects.equals(this.updateTime, other.updateTime); + } + + /** + * Returns the access id for this HMAC key. This is the id needed to get or delete the key. * + */ + public String getAccessId() { + return accessId; + } + + /** + * Returns HTTP 1.1 Entity tag for this HMAC key. + * + * @see Entity Tags + */ + public String getEtag() { + return etag; + } + + /** Returns the resource name of this HMAC key. * */ + public String getId() { + return id; + } + + /** Returns the project id associated with this HMAC key. * */ + public String getProjectId() { + return projectId; + } + + /** Returns the service account associated with this HMAC key. * */ + public ServiceAccount getServiceAccount() { + return serviceAccount; + } + + /** Returns the current state of this HMAC key. * */ + public HmacKeyState getState() { + return state; + } + + /** + * Returns the creation time of this HMAC key. + * + * @deprecated Use {@link #getCreateTimeOffsetDateTime()} + */ + @Deprecated + public Long getCreateTime() { + return millisOffsetDateTimeCodec.decode(createTime); + } + + /** Returns the creation time of this HMAC key. * */ + public OffsetDateTime getCreateTimeOffsetDateTime() { + return createTime; + } + + /** + * Returns the last updated time of this HMAC key. + * + * @deprecated Use {@link #getUpdateTimeOffsetDateTime()} + */ + @Deprecated + public Long getUpdateTime() { + return millisOffsetDateTimeCodec.decode(updateTime); + } + + /** Returns the last updated time of this HMAC key. * */ + public OffsetDateTime getUpdateTimeOffsetDateTime() { + return updateTime; + } + + /** Builder for {@code HmacKeyMetadata} objects. * */ + public static class Builder { + private String accessId; + private String etag; + private String id; + private String projectId; + private ServiceAccount serviceAccount; + private HmacKeyState state; + private OffsetDateTime createTime; + private OffsetDateTime updateTime; + + private Builder(ServiceAccount serviceAccount) { + this.serviceAccount = serviceAccount; + } + + private Builder(HmacKeyMetadata metadata) { + this.accessId = metadata.accessId; + this.etag = metadata.etag; + this.id = metadata.id; + this.projectId = metadata.projectId; + this.serviceAccount = metadata.serviceAccount; + this.state = metadata.state; + this.createTime = metadata.createTime; + this.updateTime = metadata.updateTime; + } + + public Builder setAccessId(String accessId) { + this.accessId = accessId; + return this; + } + + public Builder setEtag(String etag) { + this.etag = etag; + return this; + } + + public Builder setId(String id) { + this.id = id; + return this; + } + + public Builder setServiceAccount(ServiceAccount serviceAccount) { + this.serviceAccount = serviceAccount; + return this; + } + + public Builder setState(HmacKeyState state) { + this.state = state; + return this; + } + + /** + * @deprecated Use {@link #setCreateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setCreateTime(long createTime) { + return setCreateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(createTime)); + } + + public Builder setCreateTimeOffsetDateTime(OffsetDateTime createTime) { + this.createTime = createTime; + return this; + } + + public Builder setProjectId(String projectId) { + this.projectId = projectId; + return this; + } + + /** Creates an {@code HmacKeyMetadata} object from this builder. * */ + public HmacKeyMetadata build() { + return new HmacKeyMetadata(this); + } + + /** + * @deprecated Use {@link #setUpdateTimeOffsetDateTime(OffsetDateTime)} + */ + @Deprecated + public Builder setUpdateTime(long updateTime) { + return setUpdateTimeOffsetDateTime(millisOffsetDateTimeCodec.encode(updateTime)); + } + + public Builder setUpdateTimeOffsetDateTime(OffsetDateTime updateTime) { + this.updateTime = updateTime; + return this; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpClientContext.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpClientContext.java new file mode 100644 index 000000000000..35c93c02f599 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpClientContext.java @@ -0,0 +1,85 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequestFactory; +import com.google.api.client.json.JsonObjectParser; +import com.google.api.client.util.ObjectParser; +import com.google.cloud.storage.spi.v1.StorageRpc; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracing; +import java.util.List; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class HttpClientContext { + + private final HttpRequestFactory requestFactory; + private final ObjectParser objectParser; + private final Tracer tracer; + + private HttpClientContext( + HttpRequestFactory requestFactory, ObjectParser objectParser, Tracer tracer) { + this.requestFactory = requestFactory; + this.objectParser = objectParser; + this.tracer = tracer; + } + + @SuppressWarnings({"unchecked", "SameParameterValue"}) + static @Nullable String firstHeaderValue( + @NonNull HttpHeaders headers, @NonNull String headerName) { + Object v = headers.get(headerName); + // HttpHeaders doesn't type its get method, so we have to jump through hoops here + if (v instanceof List) { + List list = (List) v; + return list.get(0); + } else { + return null; + } + } + + public HttpRequestFactory getRequestFactory() { + return requestFactory; + } + + public ObjectParser getObjectParser() { + return objectParser; + } + + public Tracer getTracer() { + return tracer; + } + + public Span startSpan(String name) { + // record events is hardcoded to true in HttpStorageRpc, preserve it here + return tracer.spanBuilder(name).setRecordEvents(true).startSpan(); + } + + static HttpClientContext from(StorageRpc storageRpc) { + return new HttpClientContext( + storageRpc.getStorage().getRequestFactory(), + storageRpc.getStorage().getObjectParser(), + Tracing.getTracer()); + } + + public static HttpClientContext of( + HttpRequestFactory requestFactory, JsonObjectParser jsonObjectParser) { + return new HttpClientContext(requestFactory, jsonObjectParser, Tracing.getTracer()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpContentRange.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpContentRange.java new file mode 100644 index 000000000000..43c68c02b9bd --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpContentRange.java @@ -0,0 +1,274 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.common.base.MoreObjects; +import java.util.Locale; +import java.util.Objects; +import java.util.function.UnaryOperator; + +abstract class HttpContentRange { + + private final boolean finalizing; + + private HttpContentRange(boolean finalizing) { + this.finalizing = finalizing; + } + + public abstract String getHeaderValue(); + + public abstract boolean endOffsetEquals(long e); + + public boolean isFinalizing() { + return finalizing; + } + + static Total of(ByteRangeSpec spec, long size) { + checkArgument(size >= 0, "size must be >= 0"); + checkArgument(size >= spec.endOffsetInclusive(), "size must be >= end"); + return new Total(spec, size); + } + + static Incomplete of(ByteRangeSpec spec) { + return new Incomplete(spec); + } + + static Size of(long size) { + checkArgument(size >= 0, "size must be >= 0"); + return new Size(size); + } + + static Query query() { + return Query.INSTANCE; + } + + static HttpContentRange parse(String string) { + if ("bytes */*".equals(string)) { + return HttpContentRange.query(); + } else if (string.startsWith("bytes */")) { + return HttpContentRange.of(Long.parseLong(string.substring(8))); + } else { + int idxDash = string.indexOf('-'); + int idxSlash = string.indexOf('/'); + + String beginS = string.substring(6, idxDash); + String endS = string.substring(idxDash + 1, idxSlash); + long begin = Long.parseLong(beginS); + long end = Long.parseLong(endS); + if (string.endsWith("/*")) { + return HttpContentRange.of(ByteRangeSpec.explicitClosed(begin, end)); + } else { + String sizeS = string.substring(idxSlash + 1); + long size = Long.parseLong(sizeS); + return HttpContentRange.of(ByteRangeSpec.explicitClosed(begin, end), size); + } + } + } + + static final class Incomplete extends HttpContentRange implements HasRange { + + private final ByteRangeSpec spec; + + private Incomplete(ByteRangeSpec spec) { + super(false); + this.spec = spec; + } + + @Override + public String getHeaderValue() { + return String.format( + Locale.US, "bytes %d-%d/*", spec.beginOffset(), spec.endOffsetInclusive()); + } + + @Override + public boolean endOffsetEquals(long e) { + return e == spec.endOffset(); + } + + @Override + public ByteRangeSpec range() { + return spec; + } + + @Override + public Incomplete map(UnaryOperator f) { + return new Incomplete(f.apply(spec)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Incomplete)) { + return false; + } + Incomplete that = (Incomplete) o; + return Objects.equals(spec, that.spec); + } + + @Override + public int hashCode() { + return Objects.hash(spec); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("spec", spec).toString(); + } + } + + static final class Total extends HttpContentRange implements HasRange, HasSize { + + private final ByteRangeSpec spec; + private final long size; + + private Total(ByteRangeSpec spec, long size) { + super(true); + this.spec = spec; + this.size = size; + } + + @Override + public String getHeaderValue() { + return String.format( + Locale.US, "bytes %d-%d/%d", spec.beginOffset(), spec.endOffsetInclusive(), size); + } + + @Override + public boolean endOffsetEquals(long e) { + return e == spec.endOffset(); + } + + @Override + public long getSize() { + return size; + } + + @Override + public ByteRangeSpec range() { + return spec; + } + + @Override + public Total map(UnaryOperator f) { + return new Total(f.apply(spec), size); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Total)) { + return false; + } + Total total = (Total) o; + return size == total.size && Objects.equals(spec, total.spec); + } + + @Override + public int hashCode() { + return Objects.hash(spec, size); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("spec", spec).add("size", size).toString(); + } + } + + static final class Size extends HttpContentRange implements HasSize { + + private final long size; + + private Size(long size) { + super(true); + this.size = size; + } + + @Override + public String getHeaderValue() { + return String.format(Locale.US, "bytes */%d", size); + } + + @Override + public boolean endOffsetEquals(long e) { + return e == Math.max(0, size - 1); + } + + @Override + public long getSize() { + return size; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Size)) { + return false; + } + Size size1 = (Size) o; + return size == size1.size; + } + + @Override + public int hashCode() { + return Objects.hash(size); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("size", size).toString(); + } + } + + static final class Query extends HttpContentRange { + + private static final Query INSTANCE = new Query(); + + private Query() { + super(false); + } + + @Override + public boolean endOffsetEquals(long e) { + return false; + } + + @Override + public String getHeaderValue() { + return "bytes */*"; + } + } + + interface HasRange { + + ByteRangeSpec range(); + + T map(UnaryOperator f); + } + + interface HasSize { + + long getSize(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpCopyWriter.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpCopyWriter.java new file mode 100644 index 000000000000..6d8a24fc8eda --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpCopyWriter.java @@ -0,0 +1,295 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.RestorableState; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Retrying.HttpRetrier; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteRequest; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteResponse; +import com.google.common.base.MoreObjects; +import java.io.Serializable; +import java.util.Map; +import java.util.Objects; +import java.util.function.UnaryOperator; + +public class HttpCopyWriter extends CopyWriter { + + private final HttpStorageOptions serviceOptions; + private final StorageRpc storageRpc; + private RewriteResponse rewriteResponse; + private final Retrier retrier; + + HttpCopyWriter( + HttpStorageOptions serviceOptions, RewriteResponse rewriteResponse, Retrier retrier) { + this.serviceOptions = serviceOptions; + this.rewriteResponse = rewriteResponse; + this.storageRpc = serviceOptions.getStorageRpcV1(); + this.retrier = retrier; + } + + @Override + public Blob getResult() { + while (!isDone()) { + copyChunk(); + } + BlobInfo info = Conversions.json().blobInfo().decode(rewriteResponse.result); + return info.asBlob(serviceOptions.getService()); + } + + @Override + public long getBlobSize() { + return rewriteResponse.blobSize; + } + + @Override + public boolean isDone() { + return rewriteResponse.isDone; + } + + @Override + public long getTotalBytesCopied() { + return rewriteResponse.totalBytesRewritten; + } + + @Override + public void copyChunk() { + if (!isDone()) { + RewriteRequest rewriteRequest = rewriteResponse.rewriteRequest; + this.rewriteResponse = + retrier.run( + serviceOptions.getRetryAlgorithmManager().getForObjectsRewrite(rewriteRequest), + () -> storageRpc.continueRewrite(rewriteResponse), + Decoder.identity()); + } + } + + @Override + public RestorableState capture() { + return StateImpl.newBuilder( + serviceOptions, + Conversions.json().blobId().decode(rewriteResponse.rewriteRequest.source), + rewriteResponse.rewriteRequest.sourceOptions, + rewriteResponse.rewriteRequest.overrideInfo, + Conversions.json().blobInfo().decode(rewriteResponse.rewriteRequest.target), + rewriteResponse.rewriteRequest.targetOptions) + .setResult( + rewriteResponse.result != null + ? Conversions.json().blobInfo().decode(rewriteResponse.result) + : null) + .setBlobSize(getBlobSize()) + .setIsDone(isDone()) + .setMegabytesCopiedPerChunk(rewriteResponse.rewriteRequest.megabytesRewrittenPerCall) + .setRewriteToken(rewriteResponse.rewriteToken) + .setTotalBytesRewritten(getTotalBytesCopied()) + .build(); + } + + static class StateImpl implements RestorableState, Serializable { + + private static final long serialVersionUID = 1843004265650868946L; + + private final HttpStorageOptions serviceOptions; + private final BlobId source; + private final Map sourceOptions; + private final boolean overrideInfo; + private final BlobInfo target; + private final Map targetOptions; + private final BlobInfo result; + private final long blobSize; + private final boolean isDone; + private final String rewriteToken; + private final long totalBytesCopied; + private final Long megabytesCopiedPerChunk; + + StateImpl(Builder builder) { + this.serviceOptions = builder.serviceOptions; + this.source = builder.source; + this.sourceOptions = builder.sourceOptions; + this.overrideInfo = builder.overrideInfo; + this.target = builder.target; + this.targetOptions = builder.targetOptions; + this.result = builder.result; + this.blobSize = builder.blobSize; + this.isDone = builder.isDone; + this.rewriteToken = builder.rewriteToken; + this.totalBytesCopied = builder.totalBytesCopied; + this.megabytesCopiedPerChunk = builder.megabytesCopiedPerChunk; + } + + static class Builder { + + private final HttpStorageOptions serviceOptions; + private final BlobId source; + private final Map sourceOptions; + private final boolean overrideInfo; + private final BlobInfo target; + private final Map targetOptions; + private BlobInfo result; + private long blobSize; + private boolean isDone; + private String rewriteToken; + private long totalBytesCopied; + private Long megabytesCopiedPerChunk; + + private Builder( + HttpStorageOptions options, + BlobId source, + Map sourceOptions, + boolean overrideInfo, + BlobInfo target, + Map targetOptions) { + this.serviceOptions = options; + this.source = source; + this.sourceOptions = sourceOptions; + this.overrideInfo = overrideInfo; + this.target = target; + this.targetOptions = targetOptions; + } + + Builder setResult(BlobInfo result) { + this.result = result; + return this; + } + + Builder setBlobSize(long blobSize) { + this.blobSize = blobSize; + return this; + } + + Builder setIsDone(boolean isDone) { + this.isDone = isDone; + return this; + } + + Builder setRewriteToken(String rewriteToken) { + this.rewriteToken = rewriteToken; + return this; + } + + Builder setTotalBytesRewritten(long totalBytesRewritten) { + this.totalBytesCopied = totalBytesRewritten; + return this; + } + + Builder setMegabytesCopiedPerChunk(Long megabytesCopiedPerChunk) { + this.megabytesCopiedPerChunk = megabytesCopiedPerChunk; + return this; + } + + RestorableState build() { + return new StateImpl(this); + } + } + + static Builder newBuilder( + HttpStorageOptions options, + BlobId source, + Map sourceOptions, + boolean overrideInfo, + BlobInfo target, + Map targetOptions) { + return new Builder(options, source, sourceOptions, overrideInfo, target, targetOptions); + } + + @Override + public CopyWriter restore() { + RewriteRequest rewriteRequest = + new RewriteRequest( + Conversions.json().blobId().encode(source), + sourceOptions, + overrideInfo, + Conversions.json().blobInfo().encode(target), + targetOptions, + megabytesCopiedPerChunk); + RewriteResponse rewriteResponse = + new RewriteResponse( + rewriteRequest, + result != null ? Conversions.json().blobInfo().encode(result) : null, + blobSize, + isDone, + rewriteToken, + totalBytesCopied); + HttpRetrier httpRetrier = + new HttpRetrier( + new DefaultRetrier( + UnaryOperator.identity(), + RetryingDependencies.simple( + serviceOptions.getClock(), serviceOptions.getRetrySettings()))); + return new HttpCopyWriter(serviceOptions, rewriteResponse, httpRetrier); + } + + @Override + public int hashCode() { + return Objects.hash( + serviceOptions, + source, + sourceOptions, + overrideInfo, + target, + targetOptions, + result, + blobSize, + isDone, + megabytesCopiedPerChunk, + rewriteToken, + totalBytesCopied); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof StateImpl)) { + return false; + } + final StateImpl other = (StateImpl) obj; + return Objects.equals(this.serviceOptions, other.serviceOptions) + && Objects.equals(this.source, other.source) + && Objects.equals(this.sourceOptions, other.sourceOptions) + && Objects.equals(this.overrideInfo, other.overrideInfo) + && Objects.equals(this.target, other.target) + && Objects.equals(this.targetOptions, other.targetOptions) + && Objects.equals(this.result, other.result) + && Objects.equals(this.rewriteToken, other.rewriteToken) + && Objects.equals(this.megabytesCopiedPerChunk, other.megabytesCopiedPerChunk) + && this.blobSize == other.blobSize + && this.isDone == other.isDone + && this.totalBytesCopied == other.totalBytesCopied; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("source", source) + .add("overrideInfo", overrideInfo) + .add("target", target) + .add("result", result) + .add("blobSize", blobSize) + .add("isDone", isDone) + .add("rewriteToken", rewriteToken) + .add("totalBytesCopied", totalBytesCopied) + .add("megabytesCopiedPerChunk", megabytesCopiedPerChunk) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java new file mode 100644 index 000000000000..2b81fac694da --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java @@ -0,0 +1,172 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.ApiaryUnbufferedReadableByteChannel.ApiaryReadRequest; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.util.concurrent.MoreExecutors; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; +import javax.annotation.concurrent.Immutable; + +@Immutable +final class HttpDownloadSessionBuilder { + private static final HttpDownloadSessionBuilder INSTANCE = new HttpDownloadSessionBuilder(); + + private static final int DEFAULT_BUFFER_CAPACITY = ByteSizeConstants._2MiB; + + private HttpDownloadSessionBuilder() {} + + static HttpDownloadSessionBuilder create() { + return INSTANCE; + } + + public ReadableByteChannelSessionBuilder byteChannel( + BlobReadChannelContext blobReadChannelContext) { + // TODO: refactor BlobReadChannelContext to push retry to a lower individual config + // similar to GapicWritableByteChannelSessionBuilder.ResumableUploadBuilder.withRetryConfig + return new ReadableByteChannelSessionBuilder(blobReadChannelContext); + } + + public static final class ReadableByteChannelSessionBuilder { + + private final BlobReadChannelContext blobReadChannelContext; + private boolean autoGzipDecompression; + + // private Hasher hasher; // TODO: wire in Hasher + + private ReadableByteChannelSessionBuilder(BlobReadChannelContext blobReadChannelContext) { + this.blobReadChannelContext = blobReadChannelContext; + this.autoGzipDecompression = false; + } + + public ReadableByteChannelSessionBuilder setAutoGzipDecompression( + boolean autoGzipDecompression) { + this.autoGzipDecompression = autoGzipDecompression; + return this; + } + + public BufferedReadableByteChannelSessionBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + public BufferedReadableByteChannelSessionBuilder buffered(BufferHandle bufferHandle) { + return new BufferedReadableByteChannelSessionBuilder(bufferHandle, bindFunction()); + } + + public BufferedReadableByteChannelSessionBuilder buffered(ByteBuffer buffer) { + return buffered(BufferHandle.handleOf(buffer)); + } + + public UnbufferedReadableByteChannelSessionBuilder unbuffered() { + return new UnbufferedReadableByteChannelSessionBuilder(bindFunction()); + } + + private BiFunction< + ApiaryReadRequest, SettableApiFuture, UnbufferedReadableByteChannel> + bindFunction() { + // for any non-final value, create a reference to the value at this point in time + boolean autoGzipDecompression = this.autoGzipDecompression; + return (request, resultFuture) -> { + if (autoGzipDecompression) { + return new GzipReadableByteChannel( + new ApiaryUnbufferedReadableByteChannel( + request, + blobReadChannelContext.getApiaryClient(), + resultFuture, + blobReadChannelContext.getRetrier(), + blobReadChannelContext.getRetryAlgorithmManager().idempotent()), + ApiFutures.transform( + resultFuture, StorageObject::getContentEncoding, MoreExecutors.directExecutor())); + } else { + return new ApiaryUnbufferedReadableByteChannel( + request, + blobReadChannelContext.getApiaryClient(), + resultFuture, + blobReadChannelContext.getRetrier(), + blobReadChannelContext.getRetryAlgorithmManager().idempotent()); + } + }; + } + + public static final class BufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ApiaryReadRequest, SettableApiFuture, BufferedReadableByteChannel> + f; + private ApiaryReadRequest request; + + private BufferedReadableByteChannelSessionBuilder( + BufferHandle buffer, + BiFunction< + ApiaryReadRequest, + SettableApiFuture, + UnbufferedReadableByteChannel> + f) { + this.f = f.andThen(c -> new DefaultBufferedReadableByteChannel(buffer, c)); + } + + public BufferedReadableByteChannelSessionBuilder setApiaryReadRequest( + ApiaryReadRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public BufferedReadableByteChannelSession build() { + return new ChannelSession.BufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + + public static final class UnbufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ApiaryReadRequest, SettableApiFuture, UnbufferedReadableByteChannel> + f; + private ApiaryReadRequest request; + + private UnbufferedReadableByteChannelSessionBuilder( + BiFunction< + ApiaryReadRequest, + SettableApiFuture, + UnbufferedReadableByteChannel> + f) { + this.f = f; + } + + public UnbufferedReadableByteChannelSessionBuilder setApiaryReadRequest( + ApiaryReadRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public UnbufferedReadableByteChannelSession build() { + return new ChannelSession.UnbufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpMethod.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpMethod.java new file mode 100644 index 000000000000..30217d47384b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpMethod.java @@ -0,0 +1,66 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFunction; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; + +/** Http method supported by Storage service. */ +public final class HttpMethod extends StringEnumValue { + private static final long serialVersionUID = -5787845034130236201L; + + private HttpMethod(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = + new ApiFunction() { + @Override + public HttpMethod apply(String constant) { + return new HttpMethod(constant); + } + }; + + private static final StringEnumType type = + new StringEnumType(HttpMethod.class, CONSTRUCTOR); + + public static final HttpMethod GET = type.createAndRegister("GET"); + public static final HttpMethod HEAD = type.createAndRegister("HEAD"); + public static final HttpMethod PUT = type.createAndRegister("PUT"); + public static final HttpMethod POST = type.createAndRegister("POST"); + public static final HttpMethod DELETE = type.createAndRegister("DELETE"); + public static final HttpMethod OPTIONS = type.createAndRegister("OPTIONS"); + + /** + * Get the HttpMethod for the given String constant, and throw an exception if the constant is not + * recognized. + */ + public static HttpMethod valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** Get the HttpMethod for the given String constant, and allow unrecognized values. */ + public static HttpMethod valueOf(String constant) { + return type.valueOf(constant); + } + + /** Return the known values for HttpMethod. */ + public static HttpMethod[] values() { + return type.values(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java new file mode 100644 index 000000000000..3db42e3ac363 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java @@ -0,0 +1,314 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.HmacKeyMetadata; +import com.google.api.services.storage.model.Notification; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.Policy; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteRequest; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +final class HttpRetryAlgorithmManager implements Serializable { + + private static final long serialVersionUID = -3301856948991518651L; + final StorageRetryStrategy retryStrategy; + + HttpRetryAlgorithmManager(StorageRetryStrategy retryStrategy) { + this.retryStrategy = retryStrategy; + } + + /** + * Some operations are inherently idempotent after they're started (Resumable uploads, rewrites) + * provide access to the idempotent {@link ResultRetryAlgorithm} for those uses. + */ + ResultRetryAlgorithm idempotent() { + return retryStrategy.getIdempotentHandler(); + } + + ResultRetryAlgorithm nonIdempotent() { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketAclCreate( + BucketAccessControl pb, Map optionsMap) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketAclDelete( + String pb, Map optionsMap) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketAclGet( + String pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketAclUpdate( + BucketAccessControl pb, Map optionsMap) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketAclList( + String pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsCreate( + Bucket pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsDelete( + Bucket pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsGet(Bucket pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsUpdate( + Bucket pb, Map optionsMap) { + // TODO: Include etag when it is supported by the library + return optionsMap.containsKey(StorageRpc.Option.IF_METAGENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsList(Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsLockRetentionPolicy( + Bucket pb, Map optionsMap) { + // Always idempotent because IfMetagenerationMatch is required + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsGetIamPolicy( + String bucket, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsSetIamPolicy( + String bucket, Policy pb, Map optionsMap) { + return pb.getEtag() != null + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForBucketsTestIamPermissions( + String bucket, List permissions, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForDefaultObjectAclCreate(ObjectAccessControl pb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForDefaultObjectAclDelete(String pb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForDefaultObjectAclGet(String pb) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForDefaultObjectAclUpdate(ObjectAccessControl pb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForDefaultObjectAclList(String pb) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForHmacKeyCreate( + String pb, Map optionsMap) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForHmacKeyDelete( + HmacKeyMetadata pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForHmacKeyGet( + String accessId, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForHmacKeyUpdate( + HmacKeyMetadata pb, Map optionsMap) { + // TODO: Include etag when it is supported by the library + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForHmacKeyList(Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectAclCreate(ObjectAccessControl aclPb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectAclDelete( + String bucket, String name, Long generation, String pb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectAclList(String bucket, String name, Long generation) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectAclGet( + String bucket, String name, Long generation, String pb) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectAclUpdate(ObjectAccessControl aclPb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsCreate( + StorageObject pb, Map optionsMap) { + if (pb.getGeneration() != null && pb.getGeneration() == 0) { + return retryStrategy.getIdempotentHandler(); + } + return optionsMap.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsDelete( + StorageObject pb, Map optionsMap) { + return optionsMap.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + || (pb.getGeneration() != null && pb.getGeneration() > 0) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsGet( + StorageObject pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsRestore( + StorageObject pb, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsUpdate( + StorageObject pb, Map optionsMap) { + return optionsMap.containsKey(StorageRpc.Option.IF_METAGENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsList( + String bucket, Map optionsMap) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsRewrite(RewriteRequest pb) { + return pb.targetOptions.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsMove( + ImmutableMap sourceOptions, + ImmutableMap targetOptions) { + return targetOptions.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForObjectsCompose( + List sources, StorageObject target, Map optionsMap) { + return optionsMap.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForResumableUploadSessionCreate( + Map optionsMap) { + return optionsMap.containsKey(StorageRpc.Option.IF_GENERATION_MATCH) + ? retryStrategy.getIdempotentHandler() + : retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForResumableUploadSessionWrite( + Map optionsMap) { + // writing to a resumable upload session is always idempotent once it's active + // even if the start of the session wasn't idempotent our incremental writes can be. + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForServiceAccountGet(String pb) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForNotificationCreate(String bucket, Notification pb) { + return retryStrategy.getNonidempotentHandler(); + } + + public ResultRetryAlgorithm getForNotificationGet(String bucket, String notificationId) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForNotificationList(String bucket) { + return retryStrategy.getIdempotentHandler(); + } + + public ResultRetryAlgorithm getForNotificationDelete(String bucket, String notificationId) { + return retryStrategy.getIdempotentHandler(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof HttpRetryAlgorithmManager)) { + return false; + } + HttpRetryAlgorithmManager that = (HttpRetryAlgorithmManager) o; + return Objects.equals(retryStrategy, that.retryStrategy); + } + + @Override + public int hashCode() { + return Objects.hash(retryStrategy); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("retryStrategy", retryStrategy).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpStorageOptions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpStorageOptions.java new file mode 100644 index 000000000000..dac8a010cdfa --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpStorageOptions.java @@ -0,0 +1,483 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiClock; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.auth.Credentials; +import com.google.cloud.ServiceFactory; +import com.google.cloud.ServiceRpc; +import com.google.cloud.TransportOptions; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.spi.ServiceRpcFactory; +import com.google.cloud.storage.BlobWriteSessionConfig.WriterFactory; +import com.google.cloud.storage.Retrying.HttpRetrier; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.spi.StorageRpcFactory; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.OpenTelemetry; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.time.Clock; +import java.util.Objects; +import java.util.Set; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * @since 2.14.0 + */ +@TransportCompatibility(Transport.HTTP) +// non-final because of mocking frameworks +public class HttpStorageOptions extends StorageOptions { + + private static final long serialVersionUID = -5302637952911052045L; + private static final String API_SHORT_NAME = "Storage"; + private static final String GCS_SCOPE = "https://www.googleapis.com/auth/devstorage.full_control"; + private static final Set SCOPES = ImmutableSet.of(GCS_SCOPE); + private final HttpRetryAlgorithmManager retryAlgorithmManager; + private transient RetryDependenciesAdapter retryDepsAdapter; + private final BlobWriteSessionConfig blobWriteSessionConfig; + + private transient OpenTelemetry openTelemetry; + + private HttpStorageOptions(Builder builder, StorageDefaults serviceDefaults) { + super(builder, serviceDefaults); + this.retryAlgorithmManager = + new HttpRetryAlgorithmManager( + MoreObjects.firstNonNull( + builder.storageRetryStrategy, defaults().getStorageRetryStrategy())); + retryDepsAdapter = new RetryDependenciesAdapter(); + blobWriteSessionConfig = builder.blobWriteSessionConfig; + openTelemetry = builder.openTelemetry; + } + + @Override + protected Set getScopes() { + return SCOPES; + } + + @InternalApi + HttpRetryAlgorithmManager getRetryAlgorithmManager() { + return retryAlgorithmManager; + } + + @InternalApi + StorageRpc getStorageRpcV1() { + return (StorageRpc) getRpc(); + } + + /** + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Override + public OpenTelemetry getOpenTelemetry() { + return openTelemetry; + } + + @Override + public HttpStorageOptions.Builder toBuilder() { + return new HttpStorageOptions.Builder(this); + } + + @Override + public int hashCode() { + return Objects.hash( + retryAlgorithmManager, blobWriteSessionConfig, openTelemetry, baseHashCode()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof HttpStorageOptions)) { + return false; + } + HttpStorageOptions that = (HttpStorageOptions) o; + return Objects.equals(retryAlgorithmManager, that.retryAlgorithmManager) + && Objects.equals(blobWriteSessionConfig, that.blobWriteSessionConfig) + && Objects.equals(openTelemetry, that.openTelemetry) + && this.baseEquals(that); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.retryDepsAdapter = new RetryDependenciesAdapter(); + this.openTelemetry = HttpStorageOptions.getDefaultInstance().getOpenTelemetry(); + } + + public static HttpStorageOptions.Builder newBuilder() { + return new HttpStorageOptions.Builder(); + } + + public static HttpStorageOptions getDefaultInstance() { + return newBuilder().build(); + } + + public static HttpStorageDefaults defaults() { + return HttpStorageDefaults.INSTANCE; + } + + @InternalApi + RetryingDependencies asRetryDependencies() { + return retryDepsAdapter; + } + + public static class Builder extends StorageOptions.Builder { + + private StorageRetryStrategy storageRetryStrategy; + private BlobWriteSessionConfig blobWriteSessionConfig = + HttpStorageDefaults.INSTANCE.getDefaultStorageWriterConfig(); + private OpenTelemetry openTelemetry = HttpStorageDefaults.INSTANCE.getDefaultOpenTelemetry(); + + Builder() {} + + Builder(StorageOptions options) { + super(options); + HttpStorageOptions hso = (HttpStorageOptions) options; + this.storageRetryStrategy = hso.retryAlgorithmManager.retryStrategy; + this.blobWriteSessionConfig = hso.blobWriteSessionConfig; + this.openTelemetry = hso.getOpenTelemetry(); + } + + @Override + public HttpStorageOptions.Builder setTransportOptions(TransportOptions transportOptions) { + if (!(transportOptions instanceof HttpTransportOptions)) { + throw new IllegalArgumentException( + "Only http transport is allowed for " + API_SHORT_NAME + "."); + } + super.setTransportOptions(transportOptions); + return this; + } + + /** + * Override the default retry handling behavior with an alternate strategy. + * + * @param storageRetryStrategy a non-null storageRetryStrategy to use + * @return the builder + * @see StorageRetryStrategy#getDefaultStorageRetryStrategy() + */ + public HttpStorageOptions.Builder setStorageRetryStrategy( + StorageRetryStrategy storageRetryStrategy) { + this.storageRetryStrategy = + requireNonNull(storageRetryStrategy, "storageRetryStrategy must be non null"); + return this; + } + + @Override + protected HttpStorageOptions.Builder self() { + return this; + } + + @Override + public HttpStorageOptions.Builder setServiceFactory( + ServiceFactory serviceFactory) { + super.setServiceFactory(serviceFactory); + return this; + } + + @Override + public HttpStorageOptions.Builder setClock(ApiClock clock) { + super.setClock(clock); + return this; + } + + @Override + public HttpStorageOptions.Builder setProjectId(String projectId) { + super.setProjectId(projectId); + return this; + } + + @Override + public HttpStorageOptions.Builder setHost(String host) { + super.setHost(host); + return this; + } + + @Override + public HttpStorageOptions.Builder setCredentials(Credentials credentials) { + super.setCredentials(credentials); + return this; + } + + @Override + public HttpStorageOptions.Builder setRetrySettings(RetrySettings retrySettings) { + super.setRetrySettings(retrySettings); + return this; + } + + @Override + public HttpStorageOptions.Builder setServiceRpcFactory( + ServiceRpcFactory serviceRpcFactory) { + super.setServiceRpcFactory(serviceRpcFactory); + return this; + } + + @Override + public HttpStorageOptions.Builder setHeaderProvider(HeaderProvider headerProvider) { + super.setHeaderProvider(headerProvider); + return this; + } + + @Override + public HttpStorageOptions.Builder setClientLibToken(String clientLibToken) { + super.setClientLibToken(clientLibToken); + return this; + } + + @Override + public HttpStorageOptions.Builder setQuotaProjectId(String quotaProjectId) { + super.setQuotaProjectId(quotaProjectId); + return this; + } + + /** + * @see BlobWriteSessionConfig + * @see BlobWriteSessionConfigs + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see HttpStorageDefaults#getDefaultStorageWriterConfig() + * @since 2.29.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public HttpStorageOptions.Builder setBlobWriteSessionConfig( + @NonNull BlobWriteSessionConfig blobWriteSessionConfig) { + requireNonNull(blobWriteSessionConfig, "blobWriteSessionConfig must be non null"); + checkArgument( + blobWriteSessionConfig instanceof BlobWriteSessionConfig.HttpCompatible, + "The provided instance of BlobWriteSessionConfig is not compatible with this HTTP" + + " transport."); + this.blobWriteSessionConfig = blobWriteSessionConfig; + return this; + } + + @Override + public HttpStorageOptions.Builder setUniverseDomain(String universeDomain) { + super.setUniverseDomain(universeDomain); + return this; + } + + @Override + public HttpStorageOptions.Builder setApiTracerFactory(ApiTracerFactory apiTracerFactory) { + super.setApiTracerFactory(apiTracerFactory); + return this; + } + + @Override + public HttpStorageOptions build() { + HttpStorageOptions options = new HttpStorageOptions(this, defaults()); + + // todo: In the future, this step will be done automatically, and the getResolvedApiaryHost + // helper method will + // be removed. When that happens, delete the following block. + // https://github.com/googleapis/google-api-java-client-services/issues/19286 + if (options.getHost() != null) { // user did not manually set a host + this.setHost(options.getResolvedApiaryHost("storage")); + return new HttpStorageOptions(this, defaults()); + } + return options; + } + + /** + * Enable OpenTelemetry Tracing and provide an instance for the client to use. + * + * @param openTelemetry User defined instance of OpenTelemetry to be used by the library + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public HttpStorageOptions.Builder setOpenTelemetry(OpenTelemetry openTelemetry) { + requireNonNull(openTelemetry, "openTelemetry must be non null"); + this.openTelemetry = openTelemetry; + return this; + } + } + + public static final class HttpStorageDefaults extends StorageDefaults { + static final HttpStorageDefaults INSTANCE = new HttpStorageDefaults(); + static final StorageFactory STORAGE_FACTORY = new HttpStorageFactory(); + static final StorageRpcFactory STORAGE_RPC_FACTORY = new HttpStorageRpcFactory(); + + private HttpStorageDefaults() {} + + @Override + public StorageFactory getDefaultServiceFactory() { + return STORAGE_FACTORY; + } + + @Override + public StorageRpcFactory getDefaultRpcFactory() { + return STORAGE_RPC_FACTORY; + } + + @Override + public HttpTransportOptions getDefaultTransportOptions() { + return HttpTransportOptions.newBuilder().build(); + } + + public StorageRetryStrategy getStorageRetryStrategy() { + return StorageRetryStrategy.getDefaultStorageRetryStrategy(); + } + + /** + * @since 2.29.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public BlobWriteSessionConfig getDefaultStorageWriterConfig() { + return BlobWriteSessionConfigs.getDefault(); + } + + /** + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public OpenTelemetry getDefaultOpenTelemetry() { + return OpenTelemetry.noop(); + } + } + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable}. + * + *

To access an instance of this class instead use {@link + * HttpStorageDefaults#getDefaultServiceFactory() + * HttpStorageOptions.defaults().getDefaultServiceFactory()}. + * + * @see HttpStorageOptions#defaults() + * @see HttpStorageDefaults#getDefaultServiceFactory() + */ + @InternalApi + public static class HttpStorageFactory implements StorageFactory, Serializable { + private static final long serialVersionUID = 1063208433681579145L; + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable}. + * + *

To access an instance of this class instead use {@link + * HttpStorageDefaults#getDefaultServiceFactory() + * HttpStorageOptions.defaults().getDefaultServiceFactory()}. + * + * @see HttpStorageOptions#defaults() + * @see HttpStorageDefaults#getDefaultServiceFactory() + * @deprecated instead use {@link HttpStorageDefaults#getDefaultServiceFactory() + * HttpStorageOptions.defaults().getDefaultServiceFactory()} + */ + // this class needs to be public due to ServiceOptions forName'ing it in it's readObject method + @InternalApi + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") + public HttpStorageFactory() {} + + @Override + public Storage create(StorageOptions options) { + if (options instanceof HttpStorageOptions) { + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) options; + Clock clock = Clock.systemUTC(); + try { + OpenTelemetry otel = httpStorageOptions.getOpenTelemetry(); + BlobWriteSessionConfig blobWriteSessionConfig = httpStorageOptions.blobWriteSessionConfig; + if (blobWriteSessionConfig == null) { + blobWriteSessionConfig = HttpStorageOptions.defaults().getDefaultStorageWriterConfig(); + } + WriterFactory factory = blobWriteSessionConfig.createFactory(clock); + StorageImpl storage = + new StorageImpl( + httpStorageOptions, factory, new HttpRetrier(options.createRetrier())); + return OtelStorageDecorator.decorate(storage, otel, Transport.HTTP); + } catch (IOException e) { + throw new IllegalStateException( + "Unable to instantiate HTTP com.google.cloud.storage.Storage client.", e); + } + } else { + throw new IllegalArgumentException("Only HttpStorageOptions supported"); + } + } + } + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable}. + * + *

To access an instance of this class instead use {@link + * HttpStorageDefaults#getDefaultRpcFactory() + * HttpStorageOptions.defaults().getDefaultRpcFactory()}. + * + * @see HttpStorageOptions#defaults() + * @see HttpStorageDefaults#getDefaultRpcFactory() + */ + @InternalApi + public static class HttpStorageRpcFactory implements StorageRpcFactory, Serializable { + private static final long serialVersionUID = -5896805045709989797L; + + /** + * Internal implementation detail, only public to allow for {@link java.io.Serializable}. + * + *

To access an instance of this class instead use {@link + * HttpStorageDefaults#getDefaultRpcFactory() + * HttpStorageOptions.defaults().getDefaultRpcFactory()}. + * + * @see HttpStorageOptions#defaults() + * @see HttpStorageDefaults#getDefaultRpcFactory() + * @deprecated instead use {@link HttpStorageDefaults#getDefaultRpcFactory() + * HttpStorageOptions.defaults().getDefaultRpcFactory()} + */ + // this class needs to be public due to ServiceOptions forName'ing it in it's readObject method + @InternalApi + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") + public HttpStorageRpcFactory() {} + + @Override + public ServiceRpc create(StorageOptions options) { + if (options instanceof HttpStorageOptions) { + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) options; + return new HttpStorageRpc(httpStorageOptions); + } else { + throw new IllegalArgumentException("Only HttpStorageOptions supported"); + } + } + } + + /** + * We don't yet want to make HttpStorageOptions itself implement {@link RetryingDependencies} but + * we do need use it in a couple places, for those we create this adapter. + */ + private final class RetryDependenciesAdapter implements RetryingDependencies { + private RetryDependenciesAdapter() {} + + @Override + public RetrySettings getRetrySettings() { + return HttpStorageOptions.this.getRetrySettings(); + } + + @Override + public ApiClock getClock() { + return HttpStorageOptions.this.getClock(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpUploadSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpUploadSessionBuilder.java new file mode 100644 index 000000000000..26ecf18c81f2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpUploadSessionBuilder.java @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import org.checkerframework.checker.nullness.qual.NonNull; + +final class HttpUploadSessionBuilder { + private static final HttpUploadSessionBuilder INSTANCE = new HttpUploadSessionBuilder(); + + private HttpUploadSessionBuilder() {} + + static HttpUploadSessionBuilder create() { + return INSTANCE; + } + + @NonNull HttpWritableByteChannelSessionBuilder byteChannel( + @NonNull HttpClientContext httpClientContext) { + return new HttpWritableByteChannelSessionBuilder(httpClientContext); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpWritableByteChannelSessionBuilder.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpWritableByteChannelSessionBuilder.java new file mode 100644 index 000000000000..ddb999e9b7cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpWritableByteChannelSessionBuilder.java @@ -0,0 +1,180 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.ChannelSession.BufferedWriteSession; +import com.google.cloud.storage.ChannelSession.UnbufferedWriteSession; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; +import java.util.function.LongConsumer; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class HttpWritableByteChannelSessionBuilder { + + private static final int DEFAULT_BUFFER_CAPACITY = ByteSizeConstants._16MiB; + @NonNull private final HttpClientContext httpClientContext; + + HttpWritableByteChannelSessionBuilder(@NonNull HttpClientContext httpClientContext) { + this.httpClientContext = + requireNonNull(httpClientContext, "httpClientContext must be non null"); + } + + /** + * The build {@link WritableByteChannelSession} will perform a "Resumable" upload. + * + *

A "Resumable" upload will sync the transmitted data with GCS upon each individual flush and + * when the channel is closed. + * + *

If an error is returned the individual flush can be transparently retried. + */ + ResumableUploadBuilder resumable() { + return new ResumableUploadBuilder(httpClientContext); + } + + static final class ResumableUploadBuilder { + + @NonNull private final HttpClientContext httpClientContext; + private RetrierWithAlg retrier; + private LongConsumer committedBytesCallback; + private Hasher hasher; + + ResumableUploadBuilder(@NonNull HttpClientContext httpClientContext) { + this.httpClientContext = httpClientContext; + this.retrier = RetrierWithAlg.attemptOnce(); + this.committedBytesCallback = l -> {}; + this.hasher = Hasher.defaultHasher(); + } + + ResumableUploadBuilder setCommittedBytesCallback(@NonNull LongConsumer committedBytesCallback) { + this.committedBytesCallback = + requireNonNull(committedBytesCallback, "committedBytesCallback must be non null"); + return this; + } + + ResumableUploadBuilder withRetryConfig(@NonNull RetrierWithAlg retrier) { + this.retrier = requireNonNull(retrier, "retrier must be non null"); + return this; + } + + ResumableUploadBuilder setHasher(@NonNull Hasher hasher) { + this.hasher = requireNonNull(hasher, "hasher must be non null"); + return this; + } + + /** + * Do not apply any intermediate buffering. Any call to {@link + * java.nio.channels.WritableByteChannel#write(ByteBuffer)} will be segmented as is and sent to + * GCS. + * + *

Note: this is considered an advanced API, and should not be used in circumstances in which + * control of {@link ByteBuffer}s sent to {@code write} is not self-contained. + */ + UnbufferedResumableUploadBuilder unbuffered() { + return new UnbufferedResumableUploadBuilder(); + } + + /** Buffer up to {@link #DEFAULT_BUFFER_CAPACITY} worth of bytes before attempting to flush */ + BufferedResumableUploadBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + /** + * Buffer using {@code byteBuffer} worth of space before attempting to flush. + * + *

The provided {@link ByteBuffer} should be aligned with GCSs block size of 256 + * KiB. + */ + BufferedResumableUploadBuilder buffered(ByteBuffer byteBuffer) { + return buffered(BufferHandle.handleOf(byteBuffer)); + } + + BufferedResumableUploadBuilder buffered(BufferHandle bufferHandle) { + return new BufferedResumableUploadBuilder(bufferHandle); + } + + private BiFunction< + JsonResumableWrite, SettableApiFuture, UnbufferedWritableByteChannel> + bindFunction() { + // it is theoretically possible that the setter methods for the following variables could + // be called again between when this method is invoked and the resulting function is invoked. + // To ensure we are using the specified values at the point in time they are bound to the + // function read them into local variables which will be closed over rather than the class + // fields. + RetrierWithAlg boundRetrier = retrier; + return (start, resultFuture) -> + new ApiaryUnbufferedWritableByteChannel( + httpClientContext, boundRetrier, start, resultFuture, committedBytesCallback); + } + + final class UnbufferedResumableUploadBuilder { + + private ApiFuture start; + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the + * Write stream. + */ + UnbufferedResumableUploadBuilder setStartAsync(ApiFuture start) { + this.start = requireNonNull(start, "start must be non null"); + return this; + } + + UnbufferedWritableByteChannelSession build() { + return new UnbufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + bindFunction().andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + + final class BufferedResumableUploadBuilder { + + private final BufferHandle bufferHandle; + + private ApiFuture start; + + BufferedResumableUploadBuilder(BufferHandle bufferHandle) { + this.bufferHandle = bufferHandle; + } + + /** + * Set the Future which will contain the ResumableWrite information necessary to open the + * Write stream. + */ + BufferedResumableUploadBuilder setStartAsync(ApiFuture start) { + this.start = requireNonNull(start, "start must be non null"); + return this; + } + + BufferedWritableByteChannelSession build() { + return new BufferedWriteSession<>( + requireNonNull(start, "start must be non null"), + bindFunction() + .andThen(c -> new DefaultBufferedWritableByteChannel(bufferHandle, c)) + .andThen(StorageByteChannels.writable()::createSynchronized)); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/IOAutoCloseable.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/IOAutoCloseable.java new file mode 100644 index 000000000000..2769bb482e16 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/IOAutoCloseable.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.common.base.MoreObjects; +import java.io.Closeable; +import java.io.IOException; + +/** + * Specialized sub-interface to AutoClosable narrowing the exception from {@link #close} to be an + * {@link IOException}. Also implements {@link Closeable} for ease of cross usage. + */ +@FunctionalInterface +@InternalApi +@InternalExtensionOnly +interface IOAutoCloseable extends AutoCloseable, Closeable { + + @Override + void close() throws IOException; + + @InternalApi + default IOAutoCloseable andThen(IOAutoCloseable then) { + if (NoOpIOAutoCloseable.INSTANCE.equals(this)) { + return then; + } else if (NoOpIOAutoCloseable.INSTANCE.equals(then)) { + return this; + } else { + return new AndThenIOAutoClosable(this, then); + } + } + + @InternalApi + static IOAutoCloseable noOp() { + return NoOpIOAutoCloseable.INSTANCE; + } + + final class NoOpIOAutoCloseable implements IOAutoCloseable { + private static final NoOpIOAutoCloseable INSTANCE = new NoOpIOAutoCloseable(); + + private NoOpIOAutoCloseable() {} + + @Override + public void close() throws IOException {} + } + + final class AndThenIOAutoClosable implements IOAutoCloseable { + private final IOAutoCloseable first; + private final IOAutoCloseable second; + + private AndThenIOAutoClosable(IOAutoCloseable first, IOAutoCloseable second) { + this.first = first; + this.second = second; + } + + @Override + public void close() throws IOException { + //noinspection EmptyTryBlock + try (IOAutoCloseable ignore2 = second; + IOAutoCloseable ignore1 = first) {} + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("first", first).add("second", second).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JournalingBlobWriteSessionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JournalingBlobWriteSessionConfig.java new file mode 100644 index 000000000000..d8db80a656a0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JournalingBlobWriteSessionConfig.java @@ -0,0 +1,281 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.RecoveryFileManager.RecoveryVolumeSinkFactory; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.ThroughputSink.Record; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.storage.v2.ServiceConstants.Values; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Objects; +import java.util.stream.Collector; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * There are scenarios in which disk space is more plentiful than memory space. This new {@link + * BlobWriteSessionConfig} allows augmenting an instance of storage to produce {@link + * BlobWriteSession}s which will buffer to disk rather than holding things in memory. + * + *

If we have disk available we can checkpoint the contents of an object to disk before + * transmitting to GCS. The checkpointed data on disk allows arbitrary rewind in the case of failure + * but allows the upload to happen as soon as the checkpoint ack is complete. + * + *

Due to the details of how Resumable Upload Sessions are implemented in the GCS gRPC API this + * is possible. However, this approach will not work with the HTTP transports Resumable Upload + * Session spec. + * + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + */ +@Immutable +@BetaApi +@TransportCompatibility(Transport.GRPC) +public final class JournalingBlobWriteSessionConfig extends BlobWriteSessionConfig + implements BlobWriteSessionConfig.GrpcCompatible { + private static final long serialVersionUID = 9059242302276891867L; + + /** + * non-final because of {@link java.io.Serializable}, however this field is effectively final as + * it is immutable and there is not reference mutator method. + */ + @MonotonicNonNull private transient ImmutableList paths; + + private final boolean includeLoggingSink; + + /** Used for {@link java.io.Serializable} */ + @MonotonicNonNull private volatile ArrayList absolutePaths; + + @InternalApi + JournalingBlobWriteSessionConfig(ImmutableList paths, boolean includeLoggingSink) { + this.paths = paths; + this.includeLoggingSink = includeLoggingSink; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof JournalingBlobWriteSessionConfig)) { + return false; + } + JournalingBlobWriteSessionConfig that = (JournalingBlobWriteSessionConfig) o; + return includeLoggingSink == that.includeLoggingSink + && Objects.equals(paths, that.paths) + && Objects.equals(absolutePaths, that.absolutePaths); + } + + @Override + public int hashCode() { + return Objects.hash(paths, includeLoggingSink, absolutePaths); + } + + @VisibleForTesting + @InternalApi + JournalingBlobWriteSessionConfig withIncludeLoggingSink() { + return new JournalingBlobWriteSessionConfig(paths, true); + } + + @InternalApi + @Override + WriterFactory createFactory(Clock clock) throws IOException { + Duration window = Duration.ofMinutes(10); + RecoveryFileManager recoveryFileManager = + RecoveryFileManager.of(paths, getRecoverVolumeSinkFactory(clock, window)); + ThroughputSink gcs = ThroughputSink.windowed(ThroughputMovingWindow.of(window), clock); + gcs = includeLoggingSink ? ThroughputSink.tee(ThroughputSink.logged("gcs", clock), gcs) : gcs; + return new Factory(recoveryFileManager, clock, gcs); + } + + private RecoveryVolumeSinkFactory getRecoverVolumeSinkFactory(Clock clock, Duration window) { + return path -> { + ThroughputSink windowed = ThroughputSink.windowed(ThroughputMovingWindow.of(window), clock); + if (includeLoggingSink) { + return ThroughputSink.tee( + ThroughputSink.logged(path.toAbsolutePath().toString(), clock), windowed); + } else { + return windowed; + } + }; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + if (absolutePaths == null) { + synchronized (this) { + if (absolutePaths == null) { + absolutePaths = + paths.stream() + .map(Path::toAbsolutePath) + .map(Path::toString) + .collect( + Collector.of( + ArrayList::new, + ArrayList::add, + (left, right) -> { + left.addAll(right); + return left; + })); + } + } + } + out.defaultWriteObject(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + this.paths = absolutePaths.stream().map(Paths::get).collect(ImmutableList.toImmutableList()); + } + + private static final class Factory implements WriterFactory { + + private final RecoveryFileManager recoveryFileManager; + private final Clock clock; + private final ThroughputSink gcs; + + private Factory(RecoveryFileManager recoveryFileManager, Clock clock, ThroughputSink gcs) { + this.recoveryFileManager = recoveryFileManager; + this.clock = clock; + this.gcs = gcs; + } + + @InternalApi + @Override + public WritableByteChannelSession writeSession( + StorageInternal storage, BlobInfo info, Opts opts) { + if (storage instanceof GrpcStorageImpl) { + GrpcStorageImpl grpcStorage = (GrpcStorageImpl) storage; + RecoveryFile recoveryFile = recoveryFileManager.newRecoveryFile(info); + GrpcCallContext grpcCallContext = + opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + ApiFuture f = + grpcStorage.startResumableWrite( + grpcCallContext, grpcStorage.getWriteObjectRequest(info, opts), opts); + ApiFuture> start = + ApiFutures.transform( + f, s -> WriteCtx.of(s, opts.getHasher()), MoreExecutors.directExecutor()); + + ClientStreamingCallable write = + grpcStorage.storageClient.writeObjectCallable().withDefaultCallContext(grpcCallContext); + BufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(opts.getHasher()) + .setByteStringStrategy(ByteStringStrategy.copy()) + .journaling() + .withRetryConfig( + grpcStorage.retrier, + grpcStorage.retryAlgorithmManager.idempotent(), + grpcStorage.storageClient.queryWriteStatusCallable()) + .withBuffer(BufferHandle.allocate(Values.MAX_WRITE_CHUNK_BYTES_VALUE)) + .withRecoveryBuffer(BufferHandle.allocate(Values.MAX_WRITE_CHUNK_BYTES_VALUE)) + .withRecoveryFile(recoveryFile) + .setStartAsync(start) + .build(); + + return new JournalingUpload<>(session, start); + } else { + return CrossTransportUtils.throwGrpcOnly(BlobWriteSessionConfigs.class, "journaling"); + } + } + + private final class JournalingUpload + implements WritableByteChannelSession { + + private final WritableByteChannelSession session; + private final ApiFuture> start; + private final Decoder decoder; + + public JournalingUpload( + WritableByteChannelSession session, + ApiFuture> start) { + this.session = session; + this.start = start; + this.decoder = Conversions.grpc().blobInfo().compose(WriteObjectResponse::getResource); + } + + @Override + public ApiFuture openAsync() { + // register a callback on the result future to record our throughput estimate + Instant begin = clock.instant(); + ApiFutures.addCallback( + session.getResult(), + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + Instant end = clock.instant(); + // start MUST have completed in order for result to resolve, use the utility method + // to take care of the checked exception handling + WriteCtx writeCtx = + ApiExceptions.callAndTranslateApiException(start); + long totalSentBytes = writeCtx.getTotalSentBytes().get(); + gcs.recordThroughput(Record.of(totalSentBytes, begin, end, true)); + } + + @Override + public void onSuccess(WriteObjectResponse result) { + Instant end = clock.instant(); + long totalSentBytes = -1; + if (result.hasResource()) { + totalSentBytes = result.getResource().getSize(); + } else if (result.hasPersistedSize()) { + totalSentBytes = result.getPersistedSize(); + } + if (totalSentBytes > -1) { + gcs.recordThroughput(Record.of(totalSentBytes, begin, end, false)); + } + } + }, + MoreExecutors.directExecutor()); + return session.openAsync(); + } + + @Override + public ApiFuture getResult() { + return ApiFutures.transform( + session.getResult(), decoder::decode, MoreExecutors.directExecutor()); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonConversions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonConversions.java new file mode 100644 index 000000000000..48a85589772b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonConversions.java @@ -0,0 +1,1338 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Storage.BucketField.IP_FILTER; +import static com.google.cloud.storage.Storage.BucketField.SOFT_DELETE_POLICY; +import static com.google.cloud.storage.Utils.bucketNameCodec; +import static com.google.cloud.storage.Utils.dateTimeCodec; +import static com.google.cloud.storage.Utils.durationSecondsCodec; +import static com.google.cloud.storage.Utils.ifNonNull; +import static com.google.cloud.storage.Utils.lift; +import static com.google.cloud.storage.Utils.nullableDateTimeCodec; +import static com.google.common.base.MoreObjects.firstNonNull; + +import com.google.api.client.util.Data; +import com.google.api.client.util.DateTime; +import com.google.api.core.InternalApi; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.Bucket.Billing; +import com.google.api.services.storage.model.Bucket.Encryption; +import com.google.api.services.storage.model.Bucket.IamConfiguration.UniformBucketLevelAccess; +import com.google.api.services.storage.model.Bucket.IpFilter.VpcNetworkSources; +import com.google.api.services.storage.model.Bucket.Lifecycle; +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule; +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule.Action; +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule.Condition; +import com.google.api.services.storage.model.Bucket.RetentionPolicy; +import com.google.api.services.storage.model.Bucket.Versioning; +import com.google.api.services.storage.model.Bucket.Website; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.ObjectCustomContextPayload; +import com.google.api.services.storage.model.Policy.Bindings; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.StorageObject.Contexts; +import com.google.api.services.storage.model.StorageObject.Owner; +import com.google.cloud.Binding; +import com.google.cloud.Policy; +import com.google.cloud.storage.Acl.Domain; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Group; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.RawEntity; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.BlobInfo.CustomerEncryption; +import com.google.cloud.storage.BlobInfo.Retention; +import com.google.cloud.storage.BucketInfo.Autoclass; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.BucketInfo.CustomerManagedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.CustomerSuppliedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.EncryptionEnforcementRestrictionMode; +import com.google.cloud.storage.BucketInfo.GoogleManagedEncryptionEnforcementConfig; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.BucketInfo.IpFilter; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.AbortIncompleteMPUAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.DeleteLifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.BucketInfo.LifecycleRule.SetStorageClassLifecycleAction; +import com.google.cloud.storage.BucketInfo.Logging; +import com.google.cloud.storage.BucketInfo.ObjectRetention; +import com.google.cloud.storage.BucketInfo.PublicAccessPrevention; +import com.google.cloud.storage.BucketInfo.SoftDeletePolicy; +import com.google.cloud.storage.Conversions.Codec; +import com.google.cloud.storage.Cors.Origin; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.NotificationInfo.EventType; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Maps; +import java.math.BigInteger; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; + +@InternalApi +final class JsonConversions { + static final JsonConversions INSTANCE = new JsonConversions(); + + private final Codec entityCodec = + Codec.of(this::entityEncode, this::entityDecode); + private final Codec objectAclCodec = + Codec.of(this::objectAclEncode, this::objectAclDecode); + private final Codec bucketAclCodec = + Codec.of(this::bucketAclEncode, this::bucketAclDecode); + private final Codec + hmacKeyMetadataCodec = Codec.of(this::hmacKeyMetadataEncode, this::hmacKeyMetadataDecode); + private final Codec hmacKeyCodec = + Codec.of(this::hmacKeyEncode, this::hmacKeyDecode); + private final Codec + serviceAccountCodec = Codec.of(this::serviceAccountEncode, this::serviceAccountDecode); + private final Codec corsCodec = Codec.of(this::corsEncode, this::corsDecode); + private final Codec loggingCodec = + Codec.of(this::loggingEncode, this::loggingDecode); + private final Codec iamConfigurationCodec = + Codec.of(this::iamConfigEncode, this::iamConfigDecode); + private final Codec autoclassCodec = + Codec.of(this::autoclassEncode, this::autoclassDecode); + + private final Codec objectRetentionCodec = + Codec.of(this::objectRetentionEncode, this::objectRetentionDecode); + + private final Codec softDeletePolicyCodec = + Codec.of(this::softDeletePolicyEncode, this::softDeletePolicyDecode); + private final Codec lifecycleRuleCodec = + Codec.of(this::lifecycleRuleEncode, this::lifecycleRuleDecode); + private final Codec lifecycleConditionCodec = + Codec.of(this::ruleConditionEncode, this::ruleConditionDecode); + + private final Codec bucketInfoCodec = + Codec.of(this::bucketInfoEncode, this::bucketInfoDecode); + private final Codec + customerEncryptionCodec = + Codec.of(this::customerEncryptionEncode, this::customerEncryptionDecode); + + private final Codec retentionCodec = + Codec.of(this::retentionEncode, this::retentionDecode); + private final Codec blobIdCodec = + Codec.of(this::blobIdEncode, this::blobIdDecode); + private final Codec blobInfoCodec = + Codec.of(this::blobInfoEncode, this::blobInfoDecode); + + private final Codec + hierarchicalNamespaceCodec = + Codec.of(this::hierarchicalNamespaceEncode, this::hierarchicalNamespaceDecode); + + private final Codec + notificationInfoCodec = Codec.of(this::notificationEncode, this::notificationDecode); + private final Codec + customPlacementConfigCodec = + Codec.of(this::customPlacementConfigEncode, this::customPlacementConfigDecode); + private final Codec policyCodec = + Codec.of(this::policyEncode, this::policyDecode); + private final Codec bindingCodec = + Codec.of(this::bindingEncode, this::bindingDecode); + private final Codec + iamConditionCodec = Codec.of(this::conditionEncode, this::conditionDecode); + + private final Codec ipFilterCodec = + Codec.of(this::ipFilterEncode, this::ipFilterDecode); + private final Codec + publicNetworkSourceCodec = + Codec.of(this::publicNetworkSourceEncode, this::publicNetworkSourceDecode); + private final Codec + vpcNetworkSourceCodec = Codec.of(this::vpcNetworkSourceEncode, this::vpcNetworkSourceDecode); + + private final Codec + encryptionEnforcementRestrictionModeCodec = + Codec.of( + EncryptionEnforcementRestrictionMode::toString, + EncryptionEnforcementRestrictionMode::valueOf); + private final Codec< + GoogleManagedEncryptionEnforcementConfig, + Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig> + googleManagedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig to = + new Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), dateTimeCodec::encode, to::setEffectiveTime); + return to; + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.getRestrictionMode() != null) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.getEffectiveTime() != null) { + return GoogleManagedEncryptionEnforcementConfig.of( + mode, dateTimeCodec.decode(from.getEffectiveTime())); + } + return GoogleManagedEncryptionEnforcementConfig.of(mode); + }); + private final Codec< + CustomerManagedEncryptionEnforcementConfig, + Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig> + customerManagedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig to = + new Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), dateTimeCodec::encode, to::setEffectiveTime); + return to; + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.getRestrictionMode() != null) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.getEffectiveTime() != null) { + return CustomerManagedEncryptionEnforcementConfig.of( + mode, dateTimeCodec.decode(from.getEffectiveTime())); + } + return CustomerManagedEncryptionEnforcementConfig.of(mode); + }); + private final Codec< + CustomerSuppliedEncryptionEnforcementConfig, + Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig> + customerSuppliedEncryptionEnforcementConfigCodec = + Codec.of( + from -> { + Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig to = + new Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig(); + ifNonNull( + from.getRestrictionMode(), + encryptionEnforcementRestrictionModeCodec::encode, + to::setRestrictionMode); + ifNonNull(from.getEffectiveTime(), dateTimeCodec::encode, to::setEffectiveTime); + return to; + }, + from -> { + @Nullable EncryptionEnforcementRestrictionMode mode = null; + if (from.getRestrictionMode() != null) { + mode = + encryptionEnforcementRestrictionModeCodec.decode(from.getRestrictionMode()); + } + if (from.getEffectiveTime() != null) { + return CustomerSuppliedEncryptionEnforcementConfig.of( + mode, dateTimeCodec.decode(from.getEffectiveTime())); + } + return CustomerSuppliedEncryptionEnforcementConfig.of(mode); + }); + private final Codec objectContextsCodec = + Codec.of(this::objectContextsEncode, this::objectContextsDecode); + + private final Codec + objectCustomContextPayloadCodec = + Codec.of(this::objectCustomContextPayloadEncode, this::objectCustomContextPayloadDecode) + .nullable(); + + private JsonConversions() {} + + Codec entity() { + return entityCodec; + } + + Codec objectAcl() { + return objectAclCodec; + } + + Codec bucketAcl() { + return bucketAclCodec; + } + + Codec hmacKeyMetadata() { + return hmacKeyMetadataCodec; + } + + Codec hmacKey() { + return hmacKeyCodec; + } + + Codec serviceAccount() { + return serviceAccountCodec; + } + + Codec cors() { + return corsCodec; + } + + Codec logging() { + return loggingCodec; + } + + Codec iamConfiguration() { + return iamConfigurationCodec; + } + + Codec lifecycleRule() { + return lifecycleRuleCodec; + } + + Codec bucketInfo() { + return bucketInfoCodec; + } + + Codec customerEncryption() { + return customerEncryptionCodec; + } + + Codec blobId() { + return blobIdCodec; + } + + Codec blobInfo() { + return blobInfoCodec; + } + + Codec notificationInfo() { + return notificationInfoCodec; + } + + Codec lifecycleCondition() { + return lifecycleConditionCodec; + } + + Codec customPlacementConfig() { + return customPlacementConfigCodec; + } + + Codec policyCodec() { + return policyCodec; + } + + private StorageObject blobInfoEncode(BlobInfo from) { + StorageObject to = blobIdEncode(from.getBlobId()); + ifNonNull(from.getAcl(), toListOf(objectAcl()::encode), to::setAcl); + ifNonNull(from.getDeleteTimeOffsetDateTime(), dateTimeCodec::encode, to::setTimeDeleted); + ifNonNull(from.getUpdateTimeOffsetDateTime(), dateTimeCodec::encode, to::setUpdated); + ifNonNull(from.getCreateTimeOffsetDateTime(), dateTimeCodec::encode, to::setTimeCreated); + ifNonNull(from.getCustomTimeOffsetDateTime(), dateTimeCodec::encode, to::setCustomTime); + ifNonNull(from.getSize(), BigInteger::valueOf, to::setSize); + ifNonNull( + from.getOwner(), + lift(this::entityEncode).andThen(o -> new Owner().setEntity(o)), + to::setOwner); + ifNonNull(from.getStorageClass(), StorageClass::toString, to::setStorageClass); + ifNonNull( + from.getTimeStorageClassUpdatedOffsetDateTime(), + dateTimeCodec::encode, + to::setTimeStorageClassUpdated); + ifNonNull( + from.getCustomerEncryption(), this::customerEncryptionEncode, to::setCustomerEncryption); + ifNonNull( + from.getRetentionExpirationTimeOffsetDateTime(), + dateTimeCodec::encode, + to::setRetentionExpirationTime); + + ifNonNull(from.getSoftDeleteTime(), dateTimeCodec::encode, to::setSoftDeleteTime); + ifNonNull(from.getHardDeleteTime(), dateTimeCodec::encode, to::setHardDeleteTime); + + // todo: clean this up once retention is enabled in grpc + // This is a workaround so that explicitly null retention objects are only included when the + // user set an existing policy to null, to avoid sending any retention objects to the test + // bench. + // We should clean this up once the test bench can handle the retention field. + // See also the comment in StorageImpl.update(BlobInfo blobInfo, BlobTargetOption... options) + // todo: b/308194853 + if (from.getModifiedFields().contains(Storage.BlobField.RETENTION) + && from.getRetention() == null) { + to.setRetention(Data.nullOf(StorageObject.Retention.class)); + } + ifNonNull(from.getRetention(), this::retentionEncode, to::setRetention); + to.setKmsKeyName(from.getKmsKeyName()); + to.setEventBasedHold(from.getEventBasedHold()); + to.setTemporaryHold(from.getTemporaryHold()); + // Do not use, #getMetadata(), it can not return null, which is important to our logic here + Map pbMetadata = from.metadata; + if (pbMetadata != null && !Data.isNull(pbMetadata)) { + pbMetadata = Maps.newHashMapWithExpectedSize(from.getMetadata().size()); + for (Map.Entry entry : from.getMetadata().entrySet()) { + pbMetadata.put(entry.getKey(), firstNonNull(entry.getValue(), Data.nullOf(String.class))); + } + } + to.setMetadata(pbMetadata); + to.setCacheControl(from.getCacheControl()); + to.setContentEncoding(from.getContentEncoding()); + to.setCrc32c(from.getCrc32c()); + to.setContentType(from.getContentType()); + to.setMd5Hash(from.getMd5()); + to.setMediaLink(from.getMediaLink()); + to.setMetageneration(from.getMetageneration()); + to.setContentDisposition(from.getContentDisposition()); + to.setComponentCount(from.getComponentCount()); + to.setContentLanguage(from.getContentLanguage()); + to.setEtag(from.getEtag()); + to.setId(from.getGeneratedId()); + to.setSelfLink(from.getSelfLink()); + ifNonNull(from.getContexts(), objectContextsCodec::encode, to::setContexts); + return to; + } + + private BlobInfo blobInfoDecode(StorageObject from) { + BlobInfo.Builder to = BlobInfo.newBuilder(blobIdDecode(from)); + ifNonNull(from.getCacheControl(), to::setCacheControl); + ifNonNull(from.getContentEncoding(), to::setContentEncoding); + ifNonNull(from.getCrc32c(), to::setCrc32c); + ifNonNull(from.getContentType(), to::setContentType); + ifNonNull(from.getMd5Hash(), to::setMd5); + ifNonNull(from.getMediaLink(), to::setMediaLink); + ifNonNull(from.getMetageneration(), to::setMetageneration); + ifNonNull(from.getContentDisposition(), to::setContentDisposition); + ifNonNull(from.getComponentCount(), to::setComponentCount); + ifNonNull(from.getContentLanguage(), to::setContentLanguage); + ifNonNull(from.getEtag(), to::setEtag); + ifNonNull(from.getId(), to::setGeneratedId); + ifNonNull(from.getSelfLink(), to::setSelfLink); + ifNonNull(from.getMetadata(), to::setMetadata); + ifNonNull(from.getTimeDeleted(), dateTimeCodec::decode, to::setDeleteTimeOffsetDateTime); + ifNonNull(from.getUpdated(), dateTimeCodec::decode, to::setUpdateTimeOffsetDateTime); + ifNonNull(from.getTimeCreated(), dateTimeCodec::decode, to::setCreateTimeOffsetDateTime); + ifNonNull(from.getCustomTime(), dateTimeCodec::decode, to::setCustomTimeOffsetDateTime); + ifNonNull(from.getSize(), BigInteger::longValue, to::setSize); + ifNonNull(from.getOwner(), lift(Owner::getEntity).andThen(this::entityDecode), to::setOwner); + ifNonNull(from.getAcl(), toListOf(objectAcl()::decode), to::setAcl); + if (from.containsKey("isDirectory")) { + to.setIsDirectory(Boolean.TRUE); + } + ifNonNull( + from.getCustomerEncryption(), this::customerEncryptionDecode, to::setCustomerEncryption); + ifNonNull(from.getStorageClass(), StorageClass::valueOf, to::setStorageClass); + ifNonNull( + from.getTimeStorageClassUpdated(), + dateTimeCodec::decode, + to::setTimeStorageClassUpdatedOffsetDateTime); + ifNonNull(from.getKmsKeyName(), to::setKmsKeyName); + ifNonNull(from.getEventBasedHold(), to::setEventBasedHold); + ifNonNull(from.getTemporaryHold(), to::setTemporaryHold); + ifNonNull( + from.getRetentionExpirationTime(), + dateTimeCodec::decode, + to::setRetentionExpirationTimeOffsetDateTime); + ifNonNull(from.getRetention(), this::retentionDecode, to::setRetention); + ifNonNull(from.getSoftDeleteTime(), dateTimeCodec::decode, to::setSoftDeleteTime); + ifNonNull(from.getHardDeleteTime(), dateTimeCodec::decode, to::setHardDeleteTime); + ifNonNull(from.getContexts(), objectContextsCodec::decode, to::setContexts); + return to.build(); + } + + private StorageObject blobIdEncode(BlobId from) { + StorageObject to = new StorageObject(); + to.setBucket(from.getBucket()); + to.setName(from.getName()); + to.setGeneration(from.getGeneration()); + return to; + } + + private BlobId blobIdDecode(StorageObject from) { + return BlobId.of(from.getBucket(), from.getName(), from.getGeneration()); + } + + private StorageObject.CustomerEncryption customerEncryptionEncode(CustomerEncryption from) { + return new StorageObject.CustomerEncryption() + .setEncryptionAlgorithm(from.getEncryptionAlgorithm()) + .setKeySha256(from.getKeySha256()); + } + + private CustomerEncryption customerEncryptionDecode(StorageObject.CustomerEncryption from) { + return new CustomerEncryption(from.getEncryptionAlgorithm(), from.getKeySha256()); + } + + private StorageObject.Retention retentionEncode(Retention from) { + StorageObject.Retention to = new StorageObject.Retention(); + ifNonNull(from.getMode(), Retention.Mode::toString, to::setMode); + ifNonNull(from.getRetainUntilTime(), dateTimeCodec::encode, to::setRetainUntilTime); + return to; + } + + private Retention retentionDecode(StorageObject.Retention from) { + Retention.Builder to = Retention.newBuilder(); + ifNonNull(from.getMode(), Retention.Mode::valueOf, to::setMode); + ifNonNull(from.getRetainUntilTime(), dateTimeCodec::decode, to::setRetainUntilTime); + return to.build(); + } + + private Bucket.SoftDeletePolicy softDeletePolicyEncode(SoftDeletePolicy from) { + Bucket.SoftDeletePolicy to = new Bucket.SoftDeletePolicy(); + ifNonNull( + from.getRetentionDuration(), durationSecondsCodec::encode, to::setRetentionDurationSeconds); + return to; + } + + private SoftDeletePolicy softDeletePolicyDecode(Bucket.SoftDeletePolicy from) { + SoftDeletePolicy.Builder to = SoftDeletePolicy.newBuilder(); + ifNonNull( + from.getRetentionDurationSeconds(), durationSecondsCodec::decode, to::setRetentionDuration); + ifNonNull(from.getEffectiveTime(), dateTimeCodec::decode, to::setEffectiveTime); + return to.build(); + } + + private Bucket bucketInfoEncode(BucketInfo from) { + Bucket to = new Bucket(); + ifNonNull(from.getProject(), to::setProjectNumber); + ifNonNull(from.getAcl(), toListOf(bucketAcl()::encode), to::setAcl); + ifNonNull(from.getCors(), toListOf(cors()::encode), to::setCors); + ifNonNull(from.getCreateTimeOffsetDateTime(), dateTimeCodec::encode, to::setTimeCreated); + ifNonNull(from.getDefaultAcl(), toListOf(objectAcl()::encode), to::setDefaultObjectAcl); + ifNonNull(from.getLocation(), to::setLocation); + ifNonNull(from.getLocationType(), to::setLocationType); + ifNonNull(from.getMetageneration(), to::setMetageneration); + ifNonNull( + from.getOwner(), + lift(this::entityEncode).andThen(o -> new Bucket.Owner().setEntity(o)), + to::setOwner); + ifNonNull(from.getRpo(), Rpo::toString, to::setRpo); + ifNonNull(from.getStorageClass(), StorageClass::toString, to::setStorageClass); + ifNonNull(from.getUpdateTimeOffsetDateTime(), dateTimeCodec::encode, to::setUpdated); + ifNonNull(from.versioningEnabled(), b -> new Versioning().setEnabled(b), to::setVersioning); + to.setEtag(from.getEtag()); + to.setId(from.getGeneratedId()); + to.setName(from.getName()); + to.setSelfLink(from.getSelfLink()); + + ifNonNull(from.requesterPays(), b -> new Bucket.Billing().setRequesterPays(b), to::setBilling); + if (from.getIndexPage() != null || from.getNotFoundPage() != null) { + Website website = new Website(); + website.setMainPageSuffix(from.getIndexPage()); + website.setNotFoundPage(from.getNotFoundPage()); + to.setWebsite(website); + } + + // Do not use, #getLifecycleRules, it can not return null, which is important to our logic here + List lifecycleRules = from.lifecycleRules; + if (lifecycleRules != null) { + Lifecycle lifecycle = new Lifecycle(); + if (lifecycleRules.isEmpty()) { + lifecycle.setRule(Collections.emptyList()); + } else { + List rules = new ArrayList<>(); + ifNonNull(lifecycleRules, r -> r.stream().map(lifecycleRule()::encode).forEach(rules::add)); + if (!rules.isEmpty()) { + lifecycle.setRule(ImmutableList.copyOf(rules)); + } + } + + to.setLifecycle(lifecycle); + } + + ifNonNull(from.getDefaultEventBasedHold(), to::setDefaultEventBasedHold); + if (Stream.of( + from.getDefaultKmsKeyName(), + from.getGoogleManagedEncryptionEnforcementConfig(), + from.getCustomerManagedEncryptionEnforcementConfig(), + from.getCustomerSuppliedEncryptionEnforcementConfig()) + .anyMatch(Objects::nonNull)) { + Bucket.Encryption encryptionBuilder = new Encryption(); + ifNonNull(from.getDefaultKmsKeyName(), encryptionBuilder::setDefaultKmsKeyName); + ifNonNull( + from.getGoogleManagedEncryptionEnforcementConfig(), + googleManagedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setGoogleManagedEncryptionEnforcementConfig); + ifNonNull( + from.getCustomerManagedEncryptionEnforcementConfig(), + customerManagedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setCustomerManagedEncryptionEnforcementConfig); + ifNonNull( + from.getCustomerSuppliedEncryptionEnforcementConfig(), + customerSuppliedEncryptionEnforcementConfigCodec::encode, + encryptionBuilder::setCustomerSuppliedEncryptionEnforcementConfig); + to.setEncryption(encryptionBuilder); + } + Map pbLabels = from.getLabels(); + if (pbLabels != null && !Data.isNull(pbLabels)) { + pbLabels = Maps.newHashMapWithExpectedSize(from.getLabels().size()); + for (Map.Entry entry : from.getLabels().entrySet()) { + pbLabels.put(entry.getKey(), firstNonNull(entry.getValue(), Data.nullOf(String.class))); + } + } + to.setLabels(pbLabels); + maybeEncodeRetentionPolicy(from, to); + ifNonNull(from.getIamConfiguration(), this::iamConfigEncode, to::setIamConfiguration); + ifNonNull(from.getAutoclass(), this::autoclassEncode, to::setAutoclass); + ifNonNull(from.getLogging(), this::loggingEncode, to::setLogging); + ifNonNull( + from.getCustomPlacementConfig(), + this::customPlacementConfigEncode, + to::setCustomPlacementConfig); + ifNonNull(from.getObjectRetention(), this::objectRetentionEncode, to::setObjectRetention); + ifNonNull(from.getSoftDeletePolicy(), this::softDeletePolicyEncode, to::setSoftDeletePolicy); + if (from.getSoftDeletePolicy() == null + && from.getModifiedFields().contains(SOFT_DELETE_POLICY)) { + to.setSoftDeletePolicy(Data.nullOf(Bucket.SoftDeletePolicy.class)); + } + ifNonNull( + from.getHierarchicalNamespace(), + this::hierarchicalNamespaceEncode, + to::setHierarchicalNamespace); + ifNonNull(from.getIpFilter(), ipFilterCodec::encode, to::setIpFilter); + if (from.getModifiedFields().contains(IP_FILTER) && from.getIpFilter() == null) { + to.setIpFilter(Data.nullOf(Bucket.IpFilter.class)); + } + return to; + } + + @SuppressWarnings("deprecation") + private BucketInfo bucketInfoDecode(com.google.api.services.storage.model.Bucket from) { + BucketInfo.Builder to = new BucketInfo.BuilderImpl(bucketNameCodec.decode(from.getName())); + ifNonNull(from.getProjectNumber(), to::setProject); + ifNonNull(from.getAcl(), toListOf(bucketAcl()::decode), to::setAcl); + ifNonNull(from.getCors(), toListOf(cors()::decode), to::setCors); + ifNonNull(from.getDefaultObjectAcl(), toListOf(objectAcl()::decode), to::setDefaultAcl); + ifNonNull(from.getEtag(), to::setEtag); + ifNonNull(from.getId(), to::setGeneratedId); + ifNonNull(from.getLocation(), to::setLocation); + ifNonNull(from.getLocationType(), to::setLocationType); + ifNonNull(from.getMetageneration(), to::setMetageneration); + ifNonNull( + from.getOwner(), lift(Bucket.Owner::getEntity).andThen(this::entityDecode), to::setOwner); + ifNonNull(from.getRpo(), Rpo::valueOf, to::setRpo); + ifNonNull(from.getSelfLink(), to::setSelfLink); + ifNonNull(from.getStorageClass(), StorageClass::valueOf, to::setStorageClass); + ifNonNull(from.getTimeCreated(), dateTimeCodec::decode, to::setCreateTimeOffsetDateTime); + ifNonNull(from.getUpdated(), dateTimeCodec::decode, to::setUpdateTimeOffsetDateTime); + ifNonNull(from.getVersioning(), Versioning::getEnabled, to::setVersioningEnabled); + ifNonNull(from.getWebsite(), Website::getMainPageSuffix, to::setIndexPage); + ifNonNull(from.getWebsite(), Website::getNotFoundPage, to::setNotFoundPage); + ifNonNull( + from.getLifecycle(), + lift(Lifecycle::getRule).andThen(toListOf(lifecycleRule()::decode)), + to::setLifecycleRules); + ifNonNull(from.getDefaultEventBasedHold(), to::setDefaultEventBasedHold); + ifNonNull(from.getLabels(), JsonConversions::replaceDataNullValuesWithNull, to::setLabels); + ifNonNull(from.getBilling(), Billing::getRequesterPays, to::setRequesterPays); + Encryption encryption = from.getEncryption(); + if (encryption != null) { + String defaultKmsKeyName = encryption.getDefaultKmsKeyName(); + if (defaultKmsKeyName != null && !encryption.getDefaultKmsKeyName().isEmpty()) { + to.setDefaultKmsKeyName(defaultKmsKeyName); + } + if (encryption.getGoogleManagedEncryptionEnforcementConfig() != null) { + to.setGoogleManagedEncryptionEnforcementConfig( + googleManagedEncryptionEnforcementConfigCodec.decode( + encryption.getGoogleManagedEncryptionEnforcementConfig())); + } + if (encryption.getCustomerManagedEncryptionEnforcementConfig() != null) { + to.setCustomerManagedEncryptionEnforcementConfig( + customerManagedEncryptionEnforcementConfigCodec.decode( + encryption.getCustomerManagedEncryptionEnforcementConfig())); + } + if (encryption.getCustomerSuppliedEncryptionEnforcementConfig() != null) { + to.setCustomerSuppliedEncryptionEnforcementConfig( + customerSuppliedEncryptionEnforcementConfigCodec.decode( + encryption.getCustomerSuppliedEncryptionEnforcementConfig())); + } + } + + maybeDecodeRetentionPolicy(from, to); + ifNonNull(from.getIamConfiguration(), this::iamConfigDecode, to::setIamConfiguration); + ifNonNull(from.getAutoclass(), this::autoclassDecode, to::setAutoclass); + ifNonNull(from.getLogging(), this::loggingDecode, to::setLogging); + ifNonNull( + from.getCustomPlacementConfig(), + this::customPlacementConfigDecode, + to::setCustomPlacementConfig); + ifNonNull( + from.getHierarchicalNamespace(), + this::hierarchicalNamespaceDecode, + to::setHierarchicalNamespace); + ifNonNull(from.getObjectRetention(), this::objectRetentionDecode, to::setObjectRetention); + ifNonNull(from.getSoftDeletePolicy(), this::softDeletePolicyDecode, to::setSoftDeletePolicy); + ifNonNull(from.getIpFilter(), ipFilterCodec::decode, to::setIpFilter); + if (from.containsKey("isUnreachable")) { + to.setIsUnreachable(Boolean.TRUE); + } + return to.build(); + } + + private Bucket.IamConfiguration iamConfigEncode(IamConfiguration from) { + Bucket.IamConfiguration to = new Bucket.IamConfiguration(); + to.setUniformBucketLevelAccess(ublaEncode(from)); + ifNonNull( + from.getPublicAccessPrevention(), + PublicAccessPrevention::getValue, + to::setPublicAccessPrevention); + return to; + } + + private IamConfiguration iamConfigDecode(Bucket.IamConfiguration from) { + Bucket.IamConfiguration.UniformBucketLevelAccess ubla = from.getUniformBucketLevelAccess(); + + IamConfiguration.Builder to = + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(ubla.getEnabled()); + ifNonNull( + ubla.getLockedTime(), + dateTimeCodec::decode, + to::setUniformBucketLevelAccessLockedTimeOffsetDateTime); + ifNonNull( + from.getPublicAccessPrevention(), + PublicAccessPrevention::parse, + to::setPublicAccessPrevention); + return to.build(); + } + + private Bucket.Autoclass autoclassEncode(Autoclass from) { + Bucket.Autoclass to = new Bucket.Autoclass(); + ifNonNull(from.getEnabled(), to::setEnabled); + ifNonNull(from.getToggleTime(), dateTimeCodec::encode, to::setToggleTime); + ifNonNull(from.getTerminalStorageClass(), StorageClass::toString, to::setTerminalStorageClass); + ifNonNull( + from.getTerminalStorageClassUpdateTime(), + dateTimeCodec::encode, + to::setTerminalStorageClassUpdateTime); + return to; + } + + private Autoclass autoclassDecode(Bucket.Autoclass from) { + Autoclass.Builder to = Autoclass.newBuilder(); + to.setEnabled(from.getEnabled()); + ifNonNull(from.getToggleTime(), dateTimeCodec::decode, to::setToggleTime); + ifNonNull(from.getTerminalStorageClass(), StorageClass::valueOf, to::setTerminalStorageClass); + ifNonNull( + from.getTerminalStorageClassUpdateTime(), + dateTimeCodec::decode, + to::setTerminalStorageClassUpdateTime); + return to.build(); + } + + private Bucket.ObjectRetention objectRetentionEncode(ObjectRetention from) { + Bucket.ObjectRetention to = new Bucket.ObjectRetention(); + ifNonNull(from.getMode(), ObjectRetention.Mode::toString, to::setMode); + return to; + } + + private ObjectRetention objectRetentionDecode(Bucket.ObjectRetention from) { + ObjectRetention.Builder to = ObjectRetention.newBuilder(); + ifNonNull(from.getMode(), ObjectRetention.Mode::valueOf, to::setMode); + return to.build(); + } + + private UniformBucketLevelAccess ublaEncode(IamConfiguration from) { + UniformBucketLevelAccess to = new UniformBucketLevelAccess(); + to.setEnabled(from.isUniformBucketLevelAccessEnabled()); + ifNonNull( + from.getUniformBucketLevelAccessLockedTimeOffsetDateTime(), + dateTimeCodec::encode, + to::setLockedTime); + return to; + } + + private Rule lifecycleRuleEncode(LifecycleRule from) { + Rule to = new Rule(); + to.setAction(ruleActionEncode(from.getAction())); + to.setCondition(ruleConditionEncode(from.getCondition())); + return to; + } + + private Condition ruleConditionEncode(LifecycleCondition from) { + Function truncatingDateFunction = + lift(dateTimeCodec::encode) + // truncate the date time to the date, and strip any tz drift + .andThen(dt -> new DateTime(true, dt.getValue(), 0)); + Condition to = + new Condition() + .setAge(from.getAge()) + .setIsLive(from.getIsLive()) + .setNumNewerVersions(from.getNumberOfNewerVersions()) + .setDaysSinceNoncurrentTime(from.getDaysSinceNoncurrentTime()) + .setDaysSinceCustomTime(from.getDaysSinceCustomTime()); + ifNonNull(from.getCreatedBeforeOffsetDateTime(), truncatingDateFunction, to::setCreatedBefore); + ifNonNull( + from.getNoncurrentTimeBeforeOffsetDateTime(), + truncatingDateFunction, + to::setNoncurrentTimeBefore); + ifNonNull( + from.getCustomTimeBeforeOffsetDateTime(), truncatingDateFunction, to::setCustomTimeBefore); + ifNonNull( + from.getMatchesStorageClass(), toListOf(Object::toString), to::setMatchesStorageClass); + ifNonNull(from.getMatchesPrefix(), to::setMatchesPrefix); + ifNonNull(from.getMatchesSuffix(), to::setMatchesSuffix); + return to; + } + + private LifecycleCondition ruleConditionDecode(Condition condition) { + if (condition == null) { + return LifecycleCondition.newBuilder().build(); + } + + LifecycleCondition.Builder conditionBuilder = + LifecycleCondition.newBuilder() + .setAge(condition.getAge()) + .setCreatedBeforeOffsetDateTime( + nullableDateTimeCodec.decode(condition.getCreatedBefore())) + .setIsLive(condition.getIsLive()) + .setNumberOfNewerVersions(condition.getNumNewerVersions()) + .setDaysSinceNoncurrentTime(condition.getDaysSinceNoncurrentTime()) + .setNoncurrentTimeBeforeOffsetDateTime( + nullableDateTimeCodec.decode(condition.getNoncurrentTimeBefore())) + .setCustomTimeBeforeOffsetDateTime( + nullableDateTimeCodec.decode(condition.getCustomTimeBefore())) + .setDaysSinceCustomTime(condition.getDaysSinceCustomTime()); + ifNonNull( + condition.getMatchesStorageClass(), + toListOf(StorageClass::valueOf), + conditionBuilder::setMatchesStorageClass); + ifNonNull(condition.getMatchesPrefix(), conditionBuilder::setMatchesPrefix); + ifNonNull(condition.getMatchesSuffix(), conditionBuilder::setMatchesSuffix); + + return conditionBuilder.build(); + } + + private Action ruleActionEncode(LifecycleAction from) { + Action to = new Action().setType(from.getActionType()); + if (from.getActionType().equals(SetStorageClassLifecycleAction.TYPE)) { + to.setStorageClass(((SetStorageClassLifecycleAction) from).getStorageClass().toString()); + } + return to; + } + + private LifecycleRule lifecycleRuleDecode(Rule from) { + LifecycleAction lifecycleAction; + + Rule.Action action = from.getAction(); + + switch (action.getType()) { + case DeleteLifecycleAction.TYPE: + lifecycleAction = LifecycleAction.newDeleteAction(); + break; + case SetStorageClassLifecycleAction.TYPE: + lifecycleAction = + LifecycleAction.newSetStorageClassAction( + StorageClass.valueOf(action.getStorageClass())); + break; + case AbortIncompleteMPUAction.TYPE: + lifecycleAction = LifecycleAction.newAbortIncompleteMPUploadAction(); + break; + default: + BucketInfo.log.warning( + "The lifecycle action " + + action.getType() + + " is not supported by this version of the library. " + + "Attempting to update with this rule may cause errors. Please " + + "update to the latest version of google-cloud-storage."); + lifecycleAction = LifecycleAction.newLifecycleAction("Unknown action"); + } + + LifecycleCondition lifecycleCondition = ruleConditionDecode(from.getCondition()); + return new LifecycleRule(lifecycleAction, lifecycleCondition); + } + + private Bucket.Logging loggingEncode(Logging from) { + Bucket.Logging to; + if (from.getLogBucket() != null || from.getLogObjectPrefix() != null) { + to = new Bucket.Logging(); + ifNonNull(from.getLogBucket(), to::setLogBucket); + ifNonNull(from.getLogObjectPrefix(), to::setLogObjectPrefix); + } else { + to = Data.nullOf(Bucket.Logging.class); + } + return to; + } + + private Logging loggingDecode(Bucket.Logging from) { + return Logging.newBuilder() + .setLogBucket(from.getLogBucket()) + .setLogObjectPrefix(from.getLogObjectPrefix()) + .build(); + } + + private Bucket.Cors corsEncode(Cors from) { + Bucket.Cors to = new Bucket.Cors(); + to.setMaxAgeSeconds(from.getMaxAgeSeconds()); + to.setResponseHeader(from.getResponseHeaders()); + ifNonNull(from.getMethods(), toListOf(Object::toString), to::setMethod); + ifNonNull(from.getOrigins(), toListOf(Object::toString), to::setOrigin); + return to; + } + + private Cors corsDecode(Bucket.Cors from) { + Cors.Builder to = Cors.newBuilder().setMaxAgeSeconds(from.getMaxAgeSeconds()); + ifNonNull( + from.getMethod(), + m -> + m.stream() + .map(String::toUpperCase) + .map(HttpMethod::valueOf) + .collect(ImmutableList.toImmutableList()), + to::setMethods); + ifNonNull(from.getOrigin(), toListOf(Origin::of), to::setOrigins); + to.setResponseHeaders(from.getResponseHeader()); + return to.build(); + } + + private com.google.api.services.storage.model.ServiceAccount serviceAccountEncode( + ServiceAccount from) { + return new com.google.api.services.storage.model.ServiceAccount() + .setEmailAddress(from.getEmail()); + } + + private ServiceAccount serviceAccountDecode( + com.google.api.services.storage.model.ServiceAccount from) { + return ServiceAccount.of(from.getEmailAddress()); + } + + private com.google.api.services.storage.model.HmacKey hmacKeyEncode(HmacKey from) { + com.google.api.services.storage.model.HmacKey to = + new com.google.api.services.storage.model.HmacKey(); + to.setSecret(from.getSecretKey()); + ifNonNull(from.getMetadata(), this::hmacKeyMetadataEncode, to::setMetadata); + return to; + } + + private HmacKey hmacKeyDecode(com.google.api.services.storage.model.HmacKey from) { + return HmacKey.newBuilder(from.getSecret()) + .setMetadata(hmacKeyMetadataDecode(from.getMetadata())) + .build(); + } + + private com.google.api.services.storage.model.HmacKeyMetadata hmacKeyMetadataEncode( + HmacKeyMetadata from) { + com.google.api.services.storage.model.HmacKeyMetadata to = + new com.google.api.services.storage.model.HmacKeyMetadata(); + to.setAccessId(from.getAccessId()); + to.setEtag(from.getEtag()); + to.setId(from.getId()); + to.setProjectId(from.getProjectId()); + ifNonNull(from.getServiceAccount(), ServiceAccount::getEmail, to::setServiceAccountEmail); + ifNonNull(from.getState(), Enum::name, to::setState); + ifNonNull(from.getCreateTimeOffsetDateTime(), dateTimeCodec::encode, to::setTimeCreated); + ifNonNull(from.getUpdateTimeOffsetDateTime(), dateTimeCodec::encode, to::setUpdated); + return to; + } + + private HmacKeyMetadata hmacKeyMetadataDecode( + com.google.api.services.storage.model.HmacKeyMetadata from) { + return HmacKeyMetadata.newBuilder(ServiceAccount.of(from.getServiceAccountEmail())) + .setAccessId(from.getAccessId()) + .setCreateTimeOffsetDateTime(dateTimeCodec.decode(from.getTimeCreated())) + .setEtag(from.getEtag()) + .setId(from.getId()) + .setProjectId(from.getProjectId()) + .setState(HmacKeyState.valueOf(from.getState())) + .setUpdateTimeOffsetDateTime(dateTimeCodec.decode(from.getUpdated())) + .build(); + } + + private String entityEncode(Entity from) { + if (from instanceof RawEntity) { + return from.getValue(); + } else if (from instanceof User) { + switch (from.getValue()) { + case User.ALL_AUTHENTICATED_USERS: + return User.ALL_AUTHENTICATED_USERS; + case User.ALL_USERS: + return User.ALL_USERS; + default: + break; + } + } + + // intentionally not an else so that if the default is hit above it will fall through to here + return from.getType().name().toLowerCase() + "-" + from.getValue(); + } + + private Entity entityDecode(String from) { + if (from.startsWith("user-")) { + return new User(from.substring(5)); + } + if (from.equals(User.ALL_USERS)) { + return User.ofAllUsers(); + } + if (from.equals(User.ALL_AUTHENTICATED_USERS)) { + return User.ofAllAuthenticatedUsers(); + } + if (from.startsWith("group-")) { + return new Group(from.substring(6)); + } + if (from.startsWith("domain-")) { + return new Domain(from.substring(7)); + } + if (from.startsWith("project-")) { + int idx = from.indexOf('-', 8); + String team = from.substring(8, idx); + String projectId = from.substring(idx + 1); + return new Project(Project.ProjectRole.valueOf(team.toUpperCase()), projectId); + } + return new RawEntity(from); + } + + private Acl objectAclDecode(ObjectAccessControl from) { + Role role = Role.valueOf(from.getRole()); + Entity entity = entityDecode(from.getEntity()); + return Acl.newBuilder(entity, role).setEtag(from.getEtag()).setId(from.getId()).build(); + } + + private Acl bucketAclDecode(BucketAccessControl from) { + Role role = Role.valueOf(from.getRole()); + Entity entity = entityDecode(from.getEntity()); + return Acl.newBuilder(entity, role).setEtag(from.getEtag()).setId(from.getId()).build(); + } + + private BucketAccessControl bucketAclEncode(Acl from) { + return new BucketAccessControl() + .setEntity(from.getEntity().toString()) + .setRole(from.getRole().toString()) + .setId(from.getId()) + .setEtag(from.getEtag()); + } + + private ObjectAccessControl objectAclEncode(Acl from) { + return new ObjectAccessControl() + .setEntity(entityEncode(from.getEntity())) + .setRole(from.getRole().name()) + .setId(from.getId()) + .setEtag(from.getEtag()); + } + + private com.google.api.services.storage.model.Notification notificationEncode( + NotificationInfo from) { + com.google.api.services.storage.model.Notification to = + new com.google.api.services.storage.model.Notification(); + + to.setEtag(from.getEtag()); + to.setSelfLink(from.getSelfLink()); + to.setTopic(from.getTopic()); + ifNonNull(from.getNotificationId(), to::setId); + ifNonNull(from.getCustomAttributes(), to::setCustomAttributes); + ifNonNull(from.getObjectNamePrefix(), to::setObjectNamePrefix); + + List eventTypes = from.getEventTypes(); + if (eventTypes != null && eventTypes.size() > 0) { + List eventTypesPb = new ArrayList<>(); + for (EventType eventType : eventTypes) { + eventTypesPb.add(eventType.toString()); + } + to.setEventTypes(eventTypesPb); + } + + PayloadFormat payloadFormat = from.getPayloadFormat(); + if (payloadFormat != null) { + to.setPayloadFormat(payloadFormat.toString()); + } else { + to.setPayloadFormat(PayloadFormat.NONE.toString()); + } + return to; + } + + private Bucket.HierarchicalNamespace hierarchicalNamespaceEncode( + BucketInfo.HierarchicalNamespace from) { + Bucket.HierarchicalNamespace to = new Bucket.HierarchicalNamespace(); + ifNonNull(from.getEnabled(), to::setEnabled); + return to; + } + + private BucketInfo.HierarchicalNamespace hierarchicalNamespaceDecode( + Bucket.HierarchicalNamespace from) { + BucketInfo.HierarchicalNamespace.Builder to = BucketInfo.HierarchicalNamespace.newBuilder(); + to.setEnabled(from.getEnabled()); + return to.build(); + } + + private NotificationInfo notificationDecode( + com.google.api.services.storage.model.Notification from) { + NotificationInfo.Builder builder = new NotificationInfo.BuilderImpl(from.getTopic()); + ifNonNull(from.getId(), builder::setNotificationId); + ifNonNull(from.getEtag(), builder::setEtag); + ifNonNull(from.getCustomAttributes(), builder::setCustomAttributes); + ifNonNull(from.getSelfLink(), builder::setSelfLink); + ifNonNull(from.getObjectNamePrefix(), builder::setObjectNamePrefix); + ifNonNull(from.getPayloadFormat(), PayloadFormat::valueOf, builder::setPayloadFormat); + + if (from.getEventTypes() != null) { + List eventTypesPb = from.getEventTypes(); + EventType[] eventTypes = new EventType[eventTypesPb.size()]; + for (int index = 0; index < eventTypesPb.size(); index++) { + eventTypes[index] = EventType.valueOf(eventTypesPb.get(index)); + } + builder.setEventTypes(eventTypes); + } + return builder.build(); + } + + private com.google.api.services.storage.model.Policy policyEncode(Policy from) { + com.google.api.services.storage.model.Policy to = + new com.google.api.services.storage.model.Policy(); + ifNonNull(from.getEtag(), to::setEtag); + ifNonNull(from.getVersion(), to::setVersion); + if (from.getBindingsList() != null) { + ImmutableList bindings = + from.getBindingsList().stream() + .map(bindingCodec::encode) + .collect(ImmutableList.toImmutableList()); + to.setBindings(bindings); + } + return to; + } + + private Policy policyDecode(com.google.api.services.storage.model.Policy from) { + Policy.Builder to = Policy.newBuilder(); + String etag = from.getEtag(); + if (etag != null && !etag.isEmpty()) { + to.setEtag(etag); + } + to.setVersion(from.getVersion()); + List bindings = from.getBindings(); + if (bindings != null && !bindings.isEmpty()) { + to.setBindings( + bindings.stream().map(bindingCodec::decode).collect(ImmutableList.toImmutableList())); + } + return to.build(); + } + + private com.google.api.services.storage.model.Policy.Bindings bindingEncode(Binding from) { + com.google.api.services.storage.model.Policy.Bindings to = + new com.google.api.services.storage.model.Policy.Bindings(); + ifNonNull(from.getRole(), to::setRole); + ifNonNull(from.getMembers(), to::setMembers); + ifNonNull(from.getCondition(), iamConditionCodec::encode, to::setCondition); + return to; + } + + private Binding bindingDecode(com.google.api.services.storage.model.Policy.Bindings from) { + Binding.Builder to = Binding.newBuilder(); + ifNonNull(from.getRole(), to::setRole); + ifNonNull(from.getMembers(), to::setMembers); + ifNonNull(from.getCondition(), iamConditionCodec::decode, to::setCondition); + return to.build(); + } + + private com.google.api.services.storage.model.Expr conditionEncode( + com.google.cloud.Condition from) { + com.google.api.services.storage.model.Expr to = + new com.google.api.services.storage.model.Expr(); + ifNonNull(from.getExpression(), to::setExpression); + ifNonNull(from.getTitle(), to::setTitle); + ifNonNull(from.getDescription(), to::setDescription); + // apiary doesn't have a "location" field like grpc does + return to; + } + + private com.google.cloud.Condition conditionDecode( + com.google.api.services.storage.model.Expr from) { + com.google.cloud.Condition.Builder to = com.google.cloud.Condition.newBuilder(); + ifNonNull(from.getExpression(), to::setExpression); + ifNonNull(from.getTitle(), to::setTitle); + ifNonNull(from.getDescription(), to::setDescription); + return to.build(); + } + + private Bucket.CustomPlacementConfig customPlacementConfigEncode(CustomPlacementConfig from) { + Bucket.CustomPlacementConfig to = null; + if (from.getDataLocations() != null) { + to = new Bucket.CustomPlacementConfig(); + to.setDataLocations(from.getDataLocations()); + } + return to; + } + + private CustomPlacementConfig customPlacementConfigDecode(Bucket.CustomPlacementConfig from) { + return CustomPlacementConfig.newBuilder().setDataLocations(from.getDataLocations()).build(); + } + + private Bucket.IpFilter ipFilterEncode(IpFilter from) { + Bucket.IpFilter to = new Bucket.IpFilter(); + ifNonNull(from.getMode(), to::setMode); + ifNonNull( + from.getPublicNetworkSource(), + publicNetworkSourceCodec::encode, + to::setPublicNetworkSource); + ifNonNull( + from.getVpcNetworkSources(), + toListOf(vpcNetworkSourceCodec::encode), + to::setVpcNetworkSources); + ifNonNull(from.getAllowCrossOrgVpcs(), to::setAllowCrossOrgVpcs); + ifNonNull(from.getAllowAllServiceAgentAccess(), to::setAllowAllServiceAgentAccess); + return to; + } + + private IpFilter ipFilterDecode(Bucket.IpFilter from) { + IpFilter.Builder to = IpFilter.newBuilder(); + ifNonNull(from.getMode(), to::setMode); + ifNonNull( + from.getPublicNetworkSource(), + publicNetworkSourceCodec::decode, + to::setPublicNetworkSource); + ifNonNull( + from.getVpcNetworkSources(), + toListOf(vpcNetworkSourceCodec::decode), + to::setVpcNetworkSources); + ifNonNull(from.getAllowCrossOrgVpcs(), to::setAllowCrossOrgVpcs); + ifNonNull(from.getAllowAllServiceAgentAccess(), to::setAllowAllServiceAgentAccess); + return to.build(); + } + + private Bucket.IpFilter.PublicNetworkSource publicNetworkSourceEncode( + IpFilter.PublicNetworkSource from) { + Bucket.IpFilter.PublicNetworkSource to = new Bucket.IpFilter.PublicNetworkSource(); + ifNonNull(from.getAllowedIpCidrRanges(), to::setAllowedIpCidrRanges); + return to; + } + + private IpFilter.PublicNetworkSource publicNetworkSourceDecode( + Bucket.IpFilter.PublicNetworkSource from) { + return IpFilter.PublicNetworkSource.of(from.getAllowedIpCidrRanges()); + } + + private Bucket.IpFilter.VpcNetworkSources vpcNetworkSourceEncode(IpFilter.VpcNetworkSource from) { + VpcNetworkSources to = new VpcNetworkSources(); + ifNonNull(from.getNetwork(), to::setNetwork); + ifNonNull(from.getAllowedIpCidrRanges(), to::setAllowedIpCidrRanges); + return to; + } + + private IpFilter.VpcNetworkSource vpcNetworkSourceDecode(Bucket.IpFilter.VpcNetworkSources from) { + IpFilter.VpcNetworkSource.Builder to = IpFilter.VpcNetworkSource.newBuilder(); + ifNonNull(from.getNetwork(), to::setNetwork); + ifNonNull(from.getAllowedIpCidrRanges(), to::setAllowedIpCidrRanges); + return to.build(); + } + + private static void maybeEncodeRetentionPolicy(BucketInfo from, Bucket to) { + if (from.getRetentionPeriodDuration() != null + || from.retentionPolicyIsLocked() != null + || from.getRetentionEffectiveTimeOffsetDateTime() != null) { + RetentionPolicy retentionPolicy = new RetentionPolicy(); + ifNonNull( + from.getRetentionPeriodDuration(), + durationSecondsCodec::encode, + retentionPolicy::setRetentionPeriod); + ifNonNull(from.retentionPolicyIsLocked(), retentionPolicy::setIsLocked); + ifNonNull( + from.getRetentionEffectiveTimeOffsetDateTime(), + dateTimeCodec::encode, + retentionPolicy::setEffectiveTime); + to.setRetentionPolicy(retentionPolicy); + } else { + to.setRetentionPolicy(Data.nullOf(Bucket.RetentionPolicy.class)); + } + } + + private static void maybeDecodeRetentionPolicy(Bucket from, BucketInfo.Builder to) { + RetentionPolicy retentionPolicy = from.getRetentionPolicy(); + if (retentionPolicy != null && retentionPolicy.getEffectiveTime() != null) { + to.setRetentionEffectiveTimeOffsetDateTime( + dateTimeCodec.decode(retentionPolicy.getEffectiveTime())); + } + if (retentionPolicy != null) { + ifNonNull(retentionPolicy.getIsLocked(), to::setRetentionPolicyIsLocked); + ifNonNull( + retentionPolicy.getRetentionPeriod(), + durationSecondsCodec::decode, + to::setRetentionPeriodDuration); + } + } + + private Contexts objectContextsEncode(BlobInfo.ObjectContexts from) { + if (from == null) { + return null; + } + Contexts to = new Contexts(); + ifNonNull( + from.getCustom(), + m -> new HashMap<>(Maps.transformValues(m, objectCustomContextPayloadCodec::encode)), + to::setCustom); + return to; + } + + private BlobInfo.ObjectContexts objectContextsDecode(Contexts from) { + if (from == null) { + return null; + } + BlobInfo.ObjectContexts.Builder to = BlobInfo.ObjectContexts.newBuilder(); + ifNonNull( + from.getCustom(), + m -> new HashMap<>(Maps.transformValues(m, objectCustomContextPayloadCodec::decode)), + to::setCustom); + return to.build(); + } + + private ObjectCustomContextPayload objectCustomContextPayloadEncode( + BlobInfo.ObjectCustomContextPayload from) { + ObjectCustomContextPayload to = new ObjectCustomContextPayload(); + ifNonNull(from.getValue(), to::setValue); + ifNonNull(from.getCreateTime(), Utils.dateTimeCodec::encode, to::setCreateTime); + ifNonNull(from.getUpdateTime(), Utils.dateTimeCodec::encode, to::setUpdateTime); + return to; + } + + private BlobInfo.ObjectCustomContextPayload objectCustomContextPayloadDecode( + ObjectCustomContextPayload from) { + BlobInfo.ObjectCustomContextPayload.Builder to = + BlobInfo.ObjectCustomContextPayload.newBuilder(); + ifNonNull(from.getValue(), to::setValue); + ifNonNull(from.getCreateTime(), Utils.dateTimeCodec::decode, to::setCreateTime); + ifNonNull(from.getUpdateTime(), Utils.dateTimeCodec::decode, to::setUpdateTime); + return to.build(); + } + + private static Map replaceDataNullValuesWithNull(Map labels) { + boolean anyDataNull = labels.values().stream().anyMatch(Data::isNull); + if (anyDataNull) { + // If we received any Data null values, replace them with null before setting. + // Explicitly use a HashMap as it allows null values. + Map tmp = Maps.newHashMapWithExpectedSize(labels.size()); + for (Entry e : labels.entrySet()) { + String k = e.getKey(); + String v = e.getValue(); + tmp.put(k, Data.isNull(v) ? null : v); + } + return Collections.unmodifiableMap(tmp); + } else { + return labels; + } + } + + /** + * Several properties are translating lists of one type to another. This convenience method allows + * specifying a mapping function and composing as part of an {@code #isNonNull} definition. + * + *

Apiary specific utility method to convert from one list to another for specific Function + */ + private static Function, List> toListOf(Function f) { + // various data level methods in the apiary model are hostile to ImmutableList, as it does not + // provide a public default no args constructor. Instead, apiary uses ArrayList for all internal + // representations of JSON Arrays. + return l -> { + if (l == null) { + return ImmutableList.of(); + } + return l.stream().filter(Objects::nonNull).map(f).collect(Collectors.toList()); + }; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSession.java new file mode 100644 index 000000000000..7003bd5d6c57 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSession.java @@ -0,0 +1,176 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.HttpContentRange.HasRange; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.spi.v1.HttpRpcContext; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; +import io.opencensus.trace.EndSpanOptions; +import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class JsonResumableSession { + + static final String SPAN_NAME_WRITE = + String.format(Locale.US, "Sent.%s.write", HttpStorageRpc.class.getName()); + static final EndSpanOptions END_SPAN_OPTIONS = + EndSpanOptions.builder().setSampleToLocalSpanStore(true).build(); + + private final HttpClientContext context; + private final RetrierWithAlg retrier; + private final JsonResumableWrite resumableWrite; + + JsonResumableSession( + HttpClientContext context, RetrierWithAlg retrier, JsonResumableWrite resumableWrite) { + this.context = context; + this.retrier = retrier; + this.resumableWrite = resumableWrite; + } + + /** + * Not automatically retried. Usually called from within another retrying context. We don't yet + * have the concept of nested retry handling. + */ + ResumableOperationResult<@Nullable StorageObject> query() { + return new JsonResumableSessionQueryTask(context, resumableWrite).call(); + } + + ResumableOperationResult<@Nullable StorageObject> put( + RewindableContent content, HttpContentRange contentRange) { + Crc32cValue crc32cSoFar = resumableWrite.getCumulativeCrc32c(); + @Nullable Crc32cValue nextCumulativeCrc32c = + resumableWrite.getHasher().nullSafeConcat(crc32cSoFar, content.getCrc32c()); + @Nullable Crc32cValue finalChecksum = + contentRange.isFinalizing() ? nextCumulativeCrc32c : null; + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + context, resumableWrite, content, contentRange, finalChecksum); + HttpRpcContext httpRpcContext = HttpRpcContext.getInstance(); + try { + httpRpcContext.newInvocationId(); + AtomicBoolean dirty = new AtomicBoolean(false); + ResumableOperationResult<@Nullable StorageObject> result = + retrier.run( + () -> { + if (dirty.getAndSet(true)) { + ResumableOperationResult<@Nullable StorageObject> query = query(); + long persistedSize = query.getPersistedSize(); + if (contentRange.endOffsetEquals(persistedSize) || query.getObject() != null) { + return query; + } else { + task.rewindTo(persistedSize); + } + } + return task.call(); + }, + Decoder.identity()); + + if (nextCumulativeCrc32c != null) { + long persistedSize = result.getPersistedSize(); + if (contentRange.endOffsetEquals(persistedSize) || result.getObject() != null) { + resumableWrite.setCumulativeCrc32c(nextCumulativeCrc32c); + } else if (contentRange instanceof HasRange) { + ByteRangeSpec range = ((HasRange) contentRange).range(); + content.rewindTo(0); + long serverConsumedBytes = persistedSize - range.beginOffset(); + try (HashingGatheringByteChannel hashingChannel = + new HashingGatheringByteChannel(serverConsumedBytes)) { + StorageException.wrapIOException(() -> content.writeTo(hashingChannel)); + resumableWrite.setCumulativeCrc32c( + resumableWrite.getHasher().nullSafeConcat(crc32cSoFar, hashingChannel.cumulative)); + } + } else { + throw new StorageException( + 0, + String.format( + Locale.US, + "Result persistedSize (%d) did not match expected end of contentRange (%s) and" + + " contentRange does not have range to allow automatic recovery", + persistedSize, + contentRange)); + } + } + return result; + } finally { + httpRpcContext.clearInvocationId(); + } + } + + private static final class HashingGatheringByteChannel implements GatheringByteChannel { + private final long maxBytesToConsume; + + private Crc32cLengthKnown cumulative; + + private HashingGatheringByteChannel(long maxBytesToConsume) { + this.maxBytesToConsume = maxBytesToConsume; + this.cumulative = Crc32cValue.zero(); + } + + @Override + public int write(ByteBuffer src) { + return Math.toIntExact(write(new ByteBuffer[] {src}, 0, 1)); + } + + @Override + public long write(ByteBuffer[] srcs) { + return write(srcs, 0, srcs.length); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + Crc32cLengthKnown cum = Crc32cValue.zero(); + for (int i = offset; i < length; i++) { + long toConsume = maxBytesToConsume - cum.getLength(); + if (toConsume <= 0) { + if (cum.getLength() == 0) { + return -1; + } else { + break; + } + } + + ByteBuffer buf = srcs[i]; + if (buf.remaining() <= toConsume) { + cum = cum.concat(Hasher.enabled().hash(buf)); + } else { + ByteBuffer slice = buf.slice(); + int limit = Math.toIntExact(toConsume); + slice.limit(limit); + cum = cum.concat(Hasher.enabled().hash(slice)); + buf.position(buf.position() + limit); + } + } + cumulative = cumulative.concat(cum); + return cum.getLength(); + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java new file mode 100644 index 000000000000..81018d5f40eb --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java @@ -0,0 +1,266 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.HttpContentRange.HasRange; +import com.google.cloud.storage.StorageException.IOExceptionCallable; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import io.opencensus.common.Scope; +import io.opencensus.trace.Span; +import io.opencensus.trace.Status; +import java.io.IOException; +import java.math.BigInteger; +import java.util.Locale; +import java.util.Map.Entry; +import java.util.concurrent.Callable; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class JsonResumableSessionPutTask + implements Callable> { + + private final HttpClientContext context; + private final JsonResumableWrite jsonResumableWrite; + private final RewindableContent content; + private final HttpContentRange originalContentRange; + private final @Nullable Crc32cValue cumulativeCrc32c; + + private HttpContentRange contentRange; + + @VisibleForTesting + JsonResumableSessionPutTask( + HttpClientContext httpClientContext, + JsonResumableWrite jsonResumableWrite, + RewindableContent content, + HttpContentRange originalContentRange) { + this(httpClientContext, jsonResumableWrite, content, originalContentRange, null); + } + + @VisibleForTesting + JsonResumableSessionPutTask( + HttpClientContext httpClientContext, + JsonResumableWrite jsonResumableWrite, + RewindableContent content, + HttpContentRange originalContentRange, + @Nullable Crc32cValue cumulativeCrc32c) { + this.context = httpClientContext; + this.jsonResumableWrite = jsonResumableWrite; + this.content = content; + this.originalContentRange = originalContentRange; + this.contentRange = originalContentRange; + this.cumulativeCrc32c = cumulativeCrc32c; + } + + public void rewindTo(long offset) { + if (originalContentRange instanceof HasRange) { + HasRange hasRange = (HasRange) originalContentRange; + ByteRangeSpec range = hasRange.range(); + long originalBegin = range.beginOffset(); + long contentOffset = offset - originalBegin; + Preconditions.checkArgument( + 0 <= contentOffset && contentOffset < range.length(), + "Rewind offset is out of bounds. (%s <= %s < %s)", + originalBegin, + offset, + range.endOffset()); + content.rewindTo(contentOffset); + } else { + content.rewindTo(0); + } + + if (contentRange instanceof HttpContentRange.HasRange) { + HttpContentRange.HasRange range = (HttpContentRange.HasRange) contentRange; + contentRange = range.map(s -> s.withNewBeginOffset(offset)); + } + } + + public ResumableOperationResult<@Nullable StorageObject> call() throws IOException { + Span span = context.startSpan(JsonResumableSession.SPAN_NAME_WRITE); + Scope scope = context.getTracer().withSpan(span); + + boolean success = false; + boolean finalizing = originalContentRange.isFinalizing(); + + String uploadId = jsonResumableWrite.getUploadId(); + HttpRequest req = + context + .getRequestFactory() + .buildPutRequest(new GenericUrl(uploadId), content) + .setParser(context.getObjectParser()); + req.setThrowExceptionOnExecuteError(false); + HttpHeaders headers = req.getHeaders(); + headers.setContentRange(contentRange.getHeaderValue()); + for (Entry e : jsonResumableWrite.getExtraHeaders().entrySet()) { + headers.set(e.getKey(), e.getValue()); + } + if (cumulativeCrc32c != null) { + headers.set("x-goog-hash", "crc32c=" + Utils.crc32cCodec.encode(cumulativeCrc32c.getValue())); + } + + HttpResponse response = null; + try { + response = req.execute(); + + int code = response.getStatusCode(); + + if (!finalizing && UploadFailureScenario.isContinue(code)) { + long effectiveEnd = ((HttpContentRange.HasRange) contentRange).range().endOffset(); + @Nullable String range = response.getHeaders().getRange(); + ByteRangeSpec ackRange = ByteRangeSpec.parse(range); + if (ackRange.endOffset() == effectiveEnd) { + success = true; + return ResumableOperationResult.incremental(ackRange.endOffset()); + } else if (ackRange.endOffset() < effectiveEnd) { + rewindTo(ackRange.endOffset()); + success = true; + return ResumableOperationResult.incremental(ackRange.endOffset()); + } else { + StorageException se = + UploadFailureScenario.SCENARIO_7.toStorageException(uploadId, response); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + } else if (finalizing && UploadFailureScenario.isOk(code)) { + @Nullable StorageObject storageObject; + BigInteger actualSize = BigInteger.ZERO; + + Long contentLength = response.getHeaders().getContentLength(); + String contentType = response.getHeaders().getContentType(); + String storedContentLength = + HttpClientContext.firstHeaderValue( + response.getHeaders(), "x-goog-stored-content-length"); + boolean isJson = contentType != null && contentType.startsWith("application/json"); + if (isJson) { + storageObject = response.parseAs(StorageObject.class); + if (storageObject != null) { + BigInteger size = storageObject.getSize(); + if (size != null) { + actualSize = size; + } + } + } else if ((contentLength == null || contentLength == 0) && storedContentLength != null) { + // when a signed url is used, the finalize response is empty + response.ignore(); + actualSize = new BigInteger(storedContentLength, 10); + success = true; + storageObject = null; + } else { + response.ignore(); + StorageException se = + UploadFailureScenario.SCENARIO_0_1.toStorageException( + uploadId, response, null, () -> null); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + BigInteger expectedSize = + BigInteger.valueOf(((HttpContentRange.HasSize) contentRange).getSize()); + int compare = expectedSize.compareTo(actualSize); + if (compare == 0) { + success = true; + return ResumableOperationResult.complete(storageObject, actualSize.longValue()); + } else if (compare > 0) { + StorageException se = + UploadFailureScenario.SCENARIO_4_1.toStorageException( + uploadId, response, null, toString(storageObject)); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } else { + StorageException se = + UploadFailureScenario.SCENARIO_4_2.toStorageException( + uploadId, response, null, toString(storageObject)); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + } else if (!finalizing && UploadFailureScenario.isOk(code)) { + StorageException se = + UploadFailureScenario.SCENARIO_1.toStorageException(uploadId, response); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } else if (finalizing && UploadFailureScenario.isContinue(code)) { + // in order to finalize the content range must have a size, cast down to read it + HttpContentRange.HasSize size = (HttpContentRange.HasSize) contentRange; + + ByteRangeSpec range = ByteRangeSpec.parse(response.getHeaders().getRange()); + if (range.endOffsetInclusive() < size.getSize()) { + StorageException se = + UploadFailureScenario.SCENARIO_3.toStorageException(uploadId, response); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } else { + StorageException se = + UploadFailureScenario.SCENARIO_2.toStorageException(uploadId, response); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + } else { + HttpResponseException cause = new HttpResponseException(response); + String contentType = response.getHeaders().getContentType(); + Long contentLength = response.getHeaders().getContentLength(); + // If the content-range header value has run ahead of the backend, it will respond with + // a 503 with plain text content + // Attempt to detect this very loosely as to minimize impact of modified error message + // This is accurate circa 2023-06 + if ((!UploadFailureScenario.isOk(code) && !UploadFailureScenario.isContinue(code)) + && contentType != null + && contentType.startsWith("text/plain") + && contentLength != null + && contentLength > 0) { + String errorMessage = cause.getContent().toLowerCase(Locale.US); + if (errorMessage.contains("content-range") + && !errorMessage.contains("earlier")) { // TODO: exclude "earlier request" + StorageException se = + UploadFailureScenario.SCENARIO_5.toStorageException( + uploadId, response, cause, cause::getContent); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + } + StorageException se = UploadFailureScenario.toStorageException(response, cause, uploadId); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } + } catch (StorageException | IllegalArgumentException e) { + // IllegalArgumentException can happen if there is no json in the body and we try to parse it + // Our retry algorithms have special case for this, so in an effort to keep compatibility + // with those existing behaviors, explicitly rethrow an IllegalArgumentException that may have + // happened + span.setStatus(Status.UNKNOWN.withDescription(e.getMessage())); + throw e; + } catch (Exception e) { + StorageException se = + UploadFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); + span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); + throw se; + } finally { + if (success && !finalizing && response != null) { + response.ignore(); + } + scope.close(); + span.end(JsonResumableSession.END_SPAN_OPTIONS); + } + } + + static IOExceptionCallable<@Nullable String> toString(@Nullable Object o) { + return () -> o != null ? o.toString() : null; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java new file mode 100644 index 000000000000..f9d4a6e0405f --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java @@ -0,0 +1,147 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.HttpClientContext.firstHeaderValue; + +import com.google.api.client.http.EmptyContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.services.storage.model.StorageObject; +import java.io.IOException; +import java.math.BigInteger; +import java.util.Locale; +import java.util.Map.Entry; +import java.util.concurrent.Callable; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class JsonResumableSessionQueryTask + implements Callable> { + + private final HttpClientContext context; + private final JsonResumableWrite jsonResumableWrite; + + JsonResumableSessionQueryTask(HttpClientContext context, JsonResumableWrite jsonResumableWrite) { + this.context = context; + this.jsonResumableWrite = jsonResumableWrite; + } + + public ResumableOperationResult<@Nullable StorageObject> call() { + HttpResponse response = null; + String uploadId = jsonResumableWrite.getUploadId(); + try { + HttpRequest req = + context + .getRequestFactory() + .buildPutRequest(new GenericUrl(uploadId), new EmptyContent()) + .setParser(context.getObjectParser()); + req.setThrowExceptionOnExecuteError(false); + HttpHeaders headers = req.getHeaders(); + headers.setContentRange(HttpContentRange.query().getHeaderValue()); + for (Entry e : jsonResumableWrite.getExtraHeaders().entrySet()) { + headers.set(e.getKey(), e.getValue()); + } + + response = req.execute(); + + int code = response.getStatusCode(); + if (UploadFailureScenario.isOk(code)) { + @Nullable StorageObject storageObject; + @Nullable BigInteger actualSize; + + Long contentLength = response.getHeaders().getContentLength(); + String contentType = response.getHeaders().getContentType(); + String storedContentLength = + firstHeaderValue(response.getHeaders(), "x-goog-stored-content-length"); + boolean isJson = contentType != null && contentType.startsWith("application/json"); + if (isJson) { + storageObject = response.parseAs(StorageObject.class); + actualSize = storageObject != null ? storageObject.getSize() : null; + } else if ((contentLength == null || contentLength == 0) && storedContentLength != null) { + // when a signed url is used, the finalize response is empty + response.ignore(); + actualSize = new BigInteger(storedContentLength, 10); + storageObject = null; + } else { + response.ignore(); + throw UploadFailureScenario.SCENARIO_0_1.toStorageException( + uploadId, response, null, () -> null); + } + if (actualSize != null) { + if (storageObject != null) { + return ResumableOperationResult.complete(storageObject, actualSize.longValue()); + } else { + return ResumableOperationResult.incremental(actualSize.longValue()); + } + } else { + throw UploadFailureScenario.SCENARIO_0.toStorageException( + uploadId, + response, + null, + () -> storageObject != null ? storageObject.toString() : null); + } + } else if (UploadFailureScenario.isContinue(code)) { + String range1 = response.getHeaders().getRange(); + if (range1 != null) { + ByteRangeSpec range = ByteRangeSpec.parse(range1); + long endOffset = range.endOffset(); + return ResumableOperationResult.incremental(endOffset); + } else { + // According to + // https://cloud.google.com/storage/docs/performing-resumable-uploads#status-check a 308 + // response that does not contain a Range header should be interpreted as GCS having + // received no data. + return ResumableOperationResult.incremental(0); + } + } else { + HttpResponseException cause = new HttpResponseException(response); + String contentType = response.getHeaders().getContentType(); + Long contentLength = response.getHeaders().getContentLength(); + // If the content-range header value has run ahead of the backend, it will respond with + // a 503 with plain text content + // Attempt to detect this very loosely as to minimize impact of modified error message + // This is accurate circa 2023-06 + if ((!UploadFailureScenario.isOk(code) && !UploadFailureScenario.isContinue(code)) + && contentType != null + && contentType.startsWith("text/plain") + && contentLength != null + && contentLength > 0) { + String errorMessage = cause.getContent().toLowerCase(Locale.US); + if (errorMessage.contains("content-range")) { + throw UploadFailureScenario.SCENARIO_5.toStorageException( + uploadId, response, cause, cause::getContent); + } + } + throw UploadFailureScenario.toStorageException(response, cause, uploadId); + } + } catch (StorageException se) { + throw se; + } catch (Exception e) { + throw UploadFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); + } finally { + if (response != null) { + try { + response.ignore(); + } catch (IOException ignore) { + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableWrite.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableWrite.java new file mode 100644 index 000000000000..b2347c47f3cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableWrite.java @@ -0,0 +1,206 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.gson.Gson; +import com.google.gson.stream.JsonReader; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.io.StringReader; +import java.util.Map; +import java.util.Objects; +import org.checkerframework.checker.lock.qual.GuardedBy; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class JsonResumableWrite implements Serializable { + private static final long serialVersionUID = 7934407897802252292L; + private static final Gson gson = new Gson(); + + @MonotonicNonNull private transient StorageObject object; + @MonotonicNonNull private transient Hasher hasher; + @MonotonicNonNull private transient Crc32cValue cumulativeCrc32c; + @MonotonicNonNull private final Map options; + + @MonotonicNonNull private final String signedUrl; + + @NonNull private final String uploadId; + private final long beginOffset; + + private volatile String objectJson; + + @GuardedBy("objectJson") + private String base64CumulativeCrc32c; + + private JsonResumableWrite( + StorageObject object, + @MonotonicNonNull Hasher hasher, + @MonotonicNonNull Crc32cValue cumulativeCrc32c, + Map options, + String signedUrl, + @NonNull String uploadId, + long beginOffset) { + this.object = object; + this.hasher = hasher; + this.cumulativeCrc32c = cumulativeCrc32c; + this.options = options; + this.signedUrl = signedUrl; + this.uploadId = uploadId; + this.beginOffset = beginOffset; + } + + ImmutableMap getExtraHeaders() { + if (options != null) { + Object tmp = options.get(StorageRpc.Option.EXTRA_HEADERS); + if (tmp != null) { + return (ImmutableMap) tmp; + } + } + return ImmutableMap.of(); + } + + public @NonNull String getUploadId() { + return uploadId; + } + + public long getBeginOffset() { + return beginOffset; + } + + public JsonResumableWrite withBeginOffset(long newBeginOffset) { + checkArgument( + newBeginOffset >= beginOffset, + "New beginOffset must be >= existing beginOffset (%s >= %s)", + newBeginOffset, + beginOffset); + return new JsonResumableWrite( + object, hasher, cumulativeCrc32c, options, signedUrl, uploadId, newBeginOffset); + } + + public @MonotonicNonNull Hasher getHasher() { + return hasher; + } + + public @MonotonicNonNull Crc32cValue getCumulativeCrc32c() { + return cumulativeCrc32c; + } + + public void setCumulativeCrc32c(Crc32cValue cumulativeCrc32c) { + this.cumulativeCrc32c = cumulativeCrc32c; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof JsonResumableWrite)) { + return false; + } + JsonResumableWrite that = (JsonResumableWrite) o; + return beginOffset == that.beginOffset + && Objects.equals(object, that.object) + && Objects.equals(hasher, that.hasher) + && cumulativeCrc32c.eqValue(that.cumulativeCrc32c) + && Objects.equals(options, that.options) + && Objects.equals(signedUrl, that.signedUrl) + && Objects.equals(uploadId, that.uploadId); + } + + @Override + public int hashCode() { + return Objects.hash( + object, hasher, cumulativeCrc32c.getValue(), options, signedUrl, uploadId, beginOffset); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("object", object) + .add("hasher", hasher) + .add("cumulativeCrc32c", cumulativeCrc32c) + .add("options", options) + .add("signedUrl", signedUrl) + .add("uploadId", uploadId) + .add("beginOffset", beginOffset) + .toString(); + } + + private String getObjectJson() { + if (objectJson == null) { + synchronized (this) { + if (objectJson == null) { + objectJson = gson.toJson(object); + base64CumulativeCrc32c = Utils.crc32cCodec.encode(cumulativeCrc32c.getValue()); + } + } + } + return objectJson; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + String ignore = getObjectJson(); + out.defaultWriteObject(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + JsonReader jsonReader = gson.newJsonReader(new StringReader(this.objectJson)); + this.object = gson.fromJson(jsonReader, StorageObject.class); + if (base64CumulativeCrc32c != null) { + Integer decode = Utils.crc32cCodec.decode(base64CumulativeCrc32c); + if (decode == 0) { + this.cumulativeCrc32c = Crc32cValue.zero(); + } else { + this.cumulativeCrc32c = Crc32cValue.of(decode); + } + this.hasher = Hasher.enabled(); + } + } + + static JsonResumableWrite of( + StorageObject req, Map options, String uploadId, long beginOffset) { + return of(req, options, uploadId, beginOffset, Hasher.noop(), null); + } + + static JsonResumableWrite of( + StorageObject req, + Map options, + String uploadId, + long beginOffset, + Hasher hasher, + Crc32cValue initialValue) { + return new JsonResumableWrite(req, hasher, initialValue, options, null, uploadId, beginOffset); + } + + static JsonResumableWrite of(String signedUrl, String uploadId, long beginOffset) { + Hasher hasher = Hasher.noop(); + if (beginOffset == 0) { + hasher = Hasher.defaultHasher(); + } + return new JsonResumableWrite( + null, hasher, hasher.initialValue(), null, signedUrl, uploadId, beginOffset); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonUtils.java new file mode 100644 index 000000000000..f5297ee837c0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonUtils.java @@ -0,0 +1,266 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.json.GenericJson; +import com.google.api.client.json.JsonObjectParser; +import com.google.api.client.json.gson.GsonFactory; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableSet; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonNull; +import com.google.gson.JsonObject; +import com.google.gson.JsonPrimitive; +import java.io.IOException; +import java.io.StringReader; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class JsonUtils { + + private static final Gson gson = + new GsonBuilder() + // ensure null values are not stripped, they are important to us + .serializeNulls() + .setPrettyPrinting() + .create(); + @VisibleForTesting static final JsonObjectParser jop = new JsonObjectParser(new GsonFactory()); + private static final Pattern array_part = Pattern.compile("(.*)\\[(\\d+)]"); + + private JsonUtils() {} + + /** + * Given a GenericJson src, and a list of {@code fieldsForOutput} create a new GenericJson where + * every field specified in {@code fieldsForOutput} is present. If a field exists in {@code src} + * with a specified name, that value will be used. If the field does not exist in {@code src} it + * will be set to {@code null}. + */ + static T getOutputJsonWithSelectedFields( + T src, Set fieldsForOutput) { + Set fieldPaths = + fieldsForOutput.stream() + .map(NamedField::getApiaryName) + .collect(ImmutableSet.toImmutableSet()); + try { + // The datamodel of the apiary json representation doesn't have a common parent for all + // field types, rather than writing a significant amount of code to handle all of these types + // leverage Gson. + // 1. serialize the object to it's json string + // 2. load that back with gson + // 3. use gson's datamodel which is more sane to allow named field traversal and cross + // selection + // 4. output the json string of the resulting gson object + // 5. deserialize the json string to the apiary model class. + String string = jop.getJsonFactory().toPrettyString(src); + JsonObject jsonObject = gson.fromJson(string, JsonObject.class); + JsonObject ret = getOutputJson(jsonObject, fieldPaths); + String json = gson.toJson(ret); + Class aClass = src.getClass(); + //noinspection unchecked + Class clazz = (Class) aClass; + return jop.parseAndClose(new StringReader(json), clazz); + } catch (IOException e) { + // StringReader does not throw an IOException + throw StorageException.coalesce(e); + } + } + + /** + * Given the provided {@code inputJson} flatten it to a Map<String, String> where keys are the + * field path, and values are the string representation of the value. Then, create a + * Map<String, String> by defining an entry for each value from {@code fieldsInOutput} with a + * null value. Then, diff the two maps retaining those entries that present in both, and adding + * entries that only exist in the right. Then, turn that diffed map back into a tree. + */ + @VisibleForTesting + static @NonNull JsonObject getOutputJson(JsonObject inputJson, Set fieldsInOutput) { + + Map l = flatten(inputJson); + + // use hashmap so we can have null values + HashMap flat = new HashMap<>(); + for (String fieldToRetain : fieldsInOutput) { + boolean keyFound = false; + // Check for exact match or prefix match in the flattened source map (l) + for (Map.Entry sourceEntry : l.entrySet()) { + String sourceKey = sourceEntry.getKey(); + if (sourceKey.equals(fieldToRetain) || sourceKey.startsWith(fieldToRetain + ".")) { + flat.put(sourceKey, sourceEntry.getValue()); + keyFound = true; + } + } + // If the field to retain wasn't found in the source, it means we need to add it + // to the output with a null value, signaling a deletion. + if (!keyFound) { + flat.put(fieldToRetain, null); + } + } + return treeify(flat); + } + + /** + * Given a {@link JsonObject} produce a map where keys represent the full field path using json + * traversal notation ({@code a.b.c.d}) and the value is the string representations of that leaf + * value. + * + *

Inverse of {@link #treeify(Map)} + * + * @see #treeify + */ + @VisibleForTesting + static Map flatten(JsonObject o) { + // use hashmap so we can have null values + HashMap ret = new HashMap<>(); + for (Entry e : o.asMap().entrySet()) { + ret.putAll(flatten(e.getKey(), e.getValue())); + } + return ret; + } + + /** + * Given a map where keys represent json field paths and values represent values, produce a {@link + * JsonObject} with the tree structure matching those paths and values. + * + *

Inverse of {@link #flatten(JsonObject)} + * + * @see #flatten(JsonObject) + */ + @VisibleForTesting + static JsonObject treeify(Map m) { + JsonObject o = new JsonObject(); + for (Entry e : m.entrySet()) { + String key = e.getKey(); + String[] splits = key.split("\\."); + String leaf = splits[splits.length - 1]; + + JsonElement curr = o; + int currIdx = -1; + for (int i = 0, splitsEnd = splits.length, leafIdx = splitsEnd - 1; i < splitsEnd; i++) { + final String name; + final int idx; + { + String split = splits[i]; + Matcher matcher = array_part.matcher(split); + if (matcher.matches()) { + name = matcher.group(1); + String idxString = matcher.group(2); + idx = Integer.parseInt(idxString); + } else { + idx = -1; + name = split; + } + } + + if (curr.isJsonObject()) { + if (i != leafIdx) { + curr = + curr.getAsJsonObject() + .asMap() + .computeIfAbsent( + name, + s -> { + if (idx > -1) { + return new JsonArray(); + } + return new JsonObject(); + }); + } else if (idx > -1) { + curr = curr.getAsJsonObject().asMap().computeIfAbsent(name, s -> new JsonArray()); + } + if (currIdx == -1) { + currIdx = idx; + } else { + currIdx = -1; + } + } + + if (curr.isJsonArray()) { + JsonArray a = curr.getAsJsonArray(); + int size = a.size(); + int nullElementsToAdd = 0; + if (size < currIdx) { + nullElementsToAdd = currIdx - size; + } + + for (int j = 0; j < nullElementsToAdd; j++) { + a.add(JsonNull.INSTANCE); + } + } + + if (i == leafIdx) { + String v = e.getValue(); + if (curr.isJsonObject()) { + curr.getAsJsonObject().addProperty(leaf, v); + } else if (curr.isJsonArray()) { + JsonArray a = curr.getAsJsonArray(); + JsonElement toAdd; + if (idx != currIdx) { + JsonObject tmp = new JsonObject(); + tmp.addProperty(leaf, v); + toAdd = tmp; + } else { + toAdd = v == null ? JsonNull.INSTANCE : new JsonPrimitive(v); + } + + if (a.size() == currIdx) { + a.add(toAdd); + } else { + List l = a.asList(); + l.add(currIdx, toAdd); + // the add above will push all values after it down an index, we instead want to + // replace it. Remove the next index so we have the same overall size of array. + l.remove(currIdx + 1); + } + } + } + } + } + return o; + } + + private static Map flatten(String k, JsonElement e) { + HashMap ret = new HashMap<>(); + if (e.isJsonObject()) { + JsonObject o = e.getAsJsonObject(); + for (Entry oe : o.asMap().entrySet()) { + String prefix = k + "." + oe.getKey(); + ret.putAll(flatten(prefix, oe.getValue())); + } + } else if (e.isJsonArray()) { + List asList = e.getAsJsonArray().asList(); + for (int i = 0, asListSize = asList.size(); i < asListSize; i++) { + JsonElement ee = asList.get(i); + ret.putAll(flatten(k + "[" + i + "]", ee)); + } + } else if (e.isJsonNull()) { + ret.put(k, null); + } else { + ret.put(k, e.getAsString()); + } + return ret; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java new file mode 100644 index 000000000000..5eeb7d74313c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java @@ -0,0 +1,67 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.nio.channels.ReadableByteChannel; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class LazyReadChannel { + + private final Supplier> sessionSupplier; + + @MonotonicNonNull private volatile ReadableByteChannelSession session; + @MonotonicNonNull private volatile RBC channel; + + private boolean open = false; + + LazyReadChannel(Supplier> sessionSupplier) { + this.sessionSupplier = sessionSupplier; + } + + @NonNull RBC getChannel() { + if (channel != null) { + return channel; + } else { + synchronized (this) { + if (channel == null) { + open = true; + channel = getSession().open(); + } + return channel; + } + } + } + + @NonNull ReadableByteChannelSession getSession() { + if (session != null) { + return session; + } else { + synchronized (this) { + if (session == null) { + session = sessionSupplier.get(); + } + return session; + } + } + } + + boolean isOpen() { + return open && getChannel().isOpen(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyWriteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyWriteChannel.java new file mode 100644 index 000000000000..1f440ed936ce --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyWriteChannel.java @@ -0,0 +1,67 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class LazyWriteChannel { + + private final Supplier> sessionSupplier; + + @MonotonicNonNull private volatile BufferedWritableByteChannelSession session; + @MonotonicNonNull private volatile BufferedWritableByteChannel channel; + + private boolean open = false; + + LazyWriteChannel(Supplier> sessionSupplier) { + this.sessionSupplier = sessionSupplier; + } + + @NonNull BufferedWritableByteChannel getChannel() { + if (channel != null) { + return channel; + } else { + synchronized (this) { + if (channel == null) { + open = true; + channel = getSession().open(); + } + return channel; + } + } + } + + @NonNull BufferedWritableByteChannelSession getSession() { + if (session != null) { + return session; + } else { + synchronized (this) { + if (session == null) { + session = sessionSupplier.get(); + } + return session; + } + } + } + + boolean isOpen() { + return open && getChannel().isOpen(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LinearExponentialRangeSpecFunction.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LinearExponentialRangeSpecFunction.java new file mode 100644 index 000000000000..62826d737d3e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/LinearExponentialRangeSpecFunction.java @@ -0,0 +1,178 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.BetaApi; +import com.google.common.base.MoreObjects; +import com.google.common.math.DoubleMath; +import java.math.RoundingMode; +import java.util.Objects; +import java.util.OptionalLong; +import javax.annotation.concurrent.Immutable; + +/** + * Produce a new {@link RangeSpec} relative to the provided {@code offset} and {@code prev}. Scaling + * up the maxLength if a sequential match. + * + *

Instances of this class are immutable and thread safe. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class LinearExponentialRangeSpecFunction extends RangeSpecFunction { + + static final LinearExponentialRangeSpecFunction INSTANCE = + new LinearExponentialRangeSpecFunction(ByteSizeConstants._2MiB, 4.0d); + private final long initialMaxLength; + private final double maxLengthScalar; + + private LinearExponentialRangeSpecFunction(long initialMaxLength, double maxLengthScalar) { + this.initialMaxLength = initialMaxLength; + this.maxLengthScalar = maxLengthScalar; + } + + /** + * Initial maxLength a {@link RangeSpec}s maxLength should be set to if no previous maxLength is + * specified, or if the provided offset is not a sequential match. + * + *

Default: {@code 2097152 (2 MiB)} + * + * @see #withInitialMaxLength(long) + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public long getInitialMaxLength() { + return initialMaxLength; + } + + /** + * Return an instance with the {@code initialMaxLength} set to the specified value. + * + *

Default: {@code 2097152 (2 MiB)} + * + * @param initialMaxLength The number of bytes a {@link RangeSpec}s maxLength should be set to if + * no previous maxLength is specified, or if the provided offset is not a sequential match. + * Must be > {@code 0}. + * @see #getInitialMaxLength() + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public LinearExponentialRangeSpecFunction withInitialMaxLength(long initialMaxLength) { + checkArgument(initialMaxLength > 0, "initialMaxLength > 0 (%s > 0)", initialMaxLength); + return new LinearExponentialRangeSpecFunction(initialMaxLength, maxLengthScalar); + } + + /** + * The scalar value used to scale the max length of a {@link RangeSpec} when the provided offset + * is a sequential match. + * + *

Default: {@code 4.0} + * + * @see #withMaxLengthScalar(double) + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public double getMaxLengthScalar() { + return maxLengthScalar; + } + + /** + * Return an instance with the {@code maxLengthScalar} set to the specified value. + * + *

Default: {@code 4.0} + * + * @param maxLengthScalar The scalar to apply to the max length of a previous {@link RangeSpec} + * when the provided offset is a sequential match. Must be $gt;= {@code 1.0}. + * @see #getMaxLengthScalar() + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public LinearExponentialRangeSpecFunction withMaxLengthScalar(double maxLengthScalar) { + checkArgument(maxLengthScalar >= 1.0, "maxLengthScalar >= 1.0 (%s >= 1.0)", maxLengthScalar); + return new LinearExponentialRangeSpecFunction(initialMaxLength, maxLengthScalar); + } + + /** + * Produce a new {@link RangeSpec} relative to the provided {@code offset} and {@code prev}. + * + *

If {@code prev} is null, a {@code RangeSpec} beginning at {@code offset} and maxLength set + * to {@link #getInitialMaxLength()}. If {@code offset == (prev.begin + prev.maxLength)} create a + * new {@code RangeSpec} beginning at {@code offset} and maxLength set to {@code prev.maxLength * + * maxLengthScalar} + */ + @Override + RangeSpec apply(long offset, RangeSpec prev) { + if (prev == null) { + return RangeSpec.of(offset, initialMaxLength); + } + + OptionalLong maybeMaxLength = prev.maxLength(); + long maxLength; + if (maybeMaxLength.isPresent()) { + maxLength = maybeMaxLength.getAsLong(); + + long expectedOffset = prev.begin() + maxLength; + if (offset != expectedOffset) { + return RangeSpec.of(offset, initialMaxLength); + } + + } else { + maxLength = Long.MAX_VALUE; + } + + long scaleReadSize = scaleMaxLength(maxLength, maxLengthScalar); + + return RangeSpec.of(offset, scaleReadSize); + } + + private static long scaleMaxLength(long lastReadSize, double rangeMaxLengthScalar) { + double scaled = lastReadSize * rangeMaxLengthScalar; + if (Double.isInfinite(scaled)) { + return Long.MAX_VALUE; + } + return DoubleMath.roundToLong(scaled, RoundingMode.HALF_EVEN); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LinearExponentialRangeSpecFunction)) { + return false; + } + LinearExponentialRangeSpecFunction that = (LinearExponentialRangeSpecFunction) o; + return initialMaxLength == that.initialMaxLength + && Double.compare(maxLengthScalar, that.maxLengthScalar) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(initialMaxLength, maxLengthScalar); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("initialMaxLength", initialMaxLength) + .add("rangeMaxLengthScalar", maxLengthScalar) + .toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java new file mode 100644 index 000000000000..a8a25fefd58d --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import org.checkerframework.checker.nullness.qual.Nullable; + +final class Maths { + + private Maths() {} + + /** + * Null aware subtraction. + * + *

    + *
  • If {@code l} is non-null while {@code r} is null, return {@code l} + *
  • If both {@code l} and {@code r} are non-null, return {@code l - r} + *
  • Otherwise, return {@code null}. + *
+ */ + @Nullable + static Long sub(@Nullable Long l, @Nullable Long r) { + if (l != null && r == null) { + return l; + } else if (l == null) { + return null; + } else { + return l - r; + } + } + + /** + * Null aware subtraction. + * + *
    + *
  • If {@code l} is non-null while {@code r} is null, return {@code l} + *
  • If both {@code l} and {@code r} are non-null, return {@code l - r} + *
  • Otherwise, return {@code null}. + *
+ */ + @Nullable + static Integer sub(@Nullable Integer l, @Nullable Integer r) { + if (l != null && r == null) { + return l; + } else if (l == null) { + return null; + } else { + return l - r; + } + } + + /** + * Increment some {@code base} {@link Long} by {@code factor}. + * + *

If {@code base} is null, {@code factor} will be returned + */ + static long add(@Nullable Long base, long factor) { + return base != null ? base + factor : factor; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxLengthRangeSpecFunction.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxLengthRangeSpecFunction.java new file mode 100644 index 000000000000..04fc2412b16a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxLengthRangeSpecFunction.java @@ -0,0 +1,110 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.BetaApi; +import com.google.common.base.MoreObjects; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Produce a new {@link RangeSpec} relative to the provided {@code offset} and {@code prev}, where + * the RangeSpec will have a maxLength set to the lesser of {@code prev.maxLength} and {@code + * this.maxLength}. + * + *

Instances of this class are immutable and thread safe. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class MaxLengthRangeSpecFunction extends RangeSpecFunction { + static final MaxLengthRangeSpecFunction INSTANCE = new MaxLengthRangeSpecFunction(0); + private final long maxLength; + + MaxLengthRangeSpecFunction(long maxLength) { + this.maxLength = maxLength; + } + + /** + * The maximum maxLength for any RangeSpec returned from {@link #apply(long, RangeSpec)} + * + *

Default: {@code 0} -- no max length + * + * @see #withMaxLength(long) + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public long getMaxLength() { + return maxLength; + } + + /** + * Return an instance with the {@code maxLength} set to the specified value. + * + *

Default: {@code 0} -- no max length + * + * @param maxLength The number of bytes a {@link RangeSpec}s maxLength should be limited to. Must + * be > {@code 0}. + * @see #getMaxLength() + * @see RangeSpec#maxLength() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + public MaxLengthRangeSpecFunction withMaxLength(long maxLength) { + checkArgument(maxLength >= 0, "maxLength >= 0 (%s >= 0)", maxLength); + return new MaxLengthRangeSpecFunction(maxLength); + } + + /** + * Produce a new {@link RangeSpec} relative to the provided {@code offset} and {@code prev}, where + * the RangeSpec will have a maxLength set to the lesser of {@code prev.maxLength} and {@code + * this.maxLength}. + */ + @Override + RangeSpec apply(long offset, @Nullable RangeSpec prev) { + if (prev == null || !prev.maxLength().isPresent()) { + return RangeSpec.of(offset, maxLength); + } + long limit = prev.maxLength().getAsLong(); + return RangeSpec.of(offset, Math.min(limit, maxLength)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MaxLengthRangeSpecFunction)) { + return false; + } + MaxLengthRangeSpecFunction that = (MaxLengthRangeSpecFunction) o; + return maxLength == that.maxLength; + } + + @Override + public int hashCode() { + return Objects.hashCode(maxLength); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("maxLength", maxLength).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxRedirectsExceededException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxRedirectsExceededException.java new file mode 100644 index 000000000000..571389251238 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MaxRedirectsExceededException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +final class MaxRedirectsExceededException extends RuntimeException { + + MaxRedirectsExceededException(int maxRedirectAllowed, int actualRedirects) { + super( + String.format( + "max redirects exceeded (actual: %d, max: %d)", actualRedirects, maxRedirectAllowed)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MetadataField.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MetadataField.java new file mode 100644 index 000000000000..e3cc0a09ad52 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MetadataField.java @@ -0,0 +1,148 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.Conversions.Codec; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import java.util.Comparator; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Utility construct which allows defining a field name along with a codec for use in creating + * typeful files in {@link BlobInfo#getMetadata()} or {@link BucketInfo#getLabels()} + * + * @param The type of the value + */ +@SuppressWarnings("SameParameterValue") +final class MetadataField { + + @SuppressWarnings("RedundantTypeArguments") + private static final Codec CODEC_LONG = + Codec.of(String::valueOf, Long::parseLong).nullable(); + + private static final Codec CODEC_STRING = + Codec.of(s -> s, s -> s).nullable(); + private final String key; + private final Codec codec; + + private MetadataField(String key, Codec codec) { + this.key = key; + this.codec = codec; + } + + void appendTo(@NonNull T t, ImmutableMap.Builder b) { + b.put(key, codec.encode(t)); + } + + @Nullable T readFrom(BlobInfo info) { + Map map = info.getMetadata(); + if (map != null) { + return readFrom(map); + } + return null; + } + + @VisibleForTesting + @Nullable T readFrom(Map m) { + return codec.decode(m.get(key)); + } + + static MetadataField forLong(String key) { + return of(key, CODEC_LONG); + } + + static MetadataField forString(String key) { + return of(key, CODEC_STRING); + } + + static MetadataField forPartRange(String key) { + return of(key, PartRange.CODEC); + } + + static final class PartRange { + private static final Codec CODEC = + Codec.of(PartRange::encode, PartRange::decode).nullable(); + static final Comparator COMP = + Comparator.comparingLong(PartRange::getBegin).thenComparingLong(PartRange::getEnd); + private final long begin; + private final long end; + + private PartRange(long begin, long end) { + this.begin = begin; + this.end = end; + } + + public long getBegin() { + return begin; + } + + public long getEnd() { + return end; + } + + String encode() { + return String.format(Locale.US, "%04d-%04d", begin, end); + } + + static PartRange decode(String s) { + int splitPoint = s.indexOf("-"); + long being = Long.parseLong(s.substring(0, splitPoint)); + long end = Long.parseLong(s.substring(splitPoint + 1)); + return of(being, end); + } + + static PartRange of(long begin) { + return of(begin, begin); + } + + static PartRange of(long begin, long end) { + return new PartRange(begin, end); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PartRange)) { + return false; + } + PartRange partRange = (PartRange) o; + return begin == partRange.begin && end == partRange.end; + } + + @Override + public int hashCode() { + return Objects.hash(begin, end); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("begin", begin).add("end", end).toString(); + } + } + + private static MetadataField of(String key, Codec codec) { + return new MetadataField<>(key, codec); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java new file mode 100644 index 000000000000..4bd4d9eaa5ac --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java @@ -0,0 +1,173 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; + +/** + * Buffering {@link java.nio.channels.WritableByteChannel} which attempts to maximize the amount of + * bytes written to the underlying {@link UnbufferedWritableByteChannel} while minimizing + * unnecessary copying of said bytes. + * + *

Our flushing strategy is "eager", meaning as soon as we have enough bytes greater than or + * equal to the capacity of our buffer we will write all bytes to the underlying channel. + * + *

A few strategies are employed to meet the stated goals. + * + *

    + *
  1. If we do not have any bytes in our buffer and {@code src} is the same size as our buffer, + * simply {@link UnbufferedWritableByteChannel#write(ByteBuffer) write(src)} to the the + * underlying channel + *
  2. If we do not have any bytes in our buffer and {@code src} is smaller than the size of our + * buffer, enqueue it in full + *
  3. If we do have enqueued bytes and {@code src} is the size of our remaining buffer space + * {@link UnbufferedWritableByteChannel#write(ByteBuffer[]) write([buffer, src])} to the + * underlying channel + *
  4. If we do have enqueued bytes and {@code src} is larger than the size of our remaining + * buffer space, take a slice of {@code src} the same size as the remaining space in our + * buffer and {@link UnbufferedWritableByteChannel#write(ByteBuffer[]) write([buffer, slice])} + * to the underlying channel before enqueuing any outstanding bytes which are smaller than our + * buffer. + *
  5. If we do have enqueued bytes and {@code src} is smaller than our remaining buffer space, + * enqueue it in full + *
+ */ +final class MinFlushBufferedWritableByteChannel implements BufferedWritableByteChannel { + + private final BufferHandle handle; + + private final UnbufferedWritableByteChannel channel; + private final boolean blocking; + + MinFlushBufferedWritableByteChannel(BufferHandle handle, UnbufferedWritableByteChannel channel) { + this(handle, channel, true); + } + + MinFlushBufferedWritableByteChannel( + BufferHandle handle, UnbufferedWritableByteChannel channel, boolean blocking) { + this.handle = handle; + this.channel = channel; + this.blocking = blocking; + } + + @Override + public int write(ByteBuffer src) throws IOException { + if (!channel.isOpen()) { + throw new ClosedChannelException(); + } + int bytesConsumed = 0; + + while (Buffers.hasRemaining(src)) { + int srcRemaining = Buffers.remaining(src); + + int bufferRemaining = handle.remaining(); + + if (srcRemaining < bufferRemaining) { + // srcRemaining is smaller than the remaining space in our buffer, enqueue it in full + handle.get().put(src); + bytesConsumed += srcRemaining; + break; + } + + int capacity = handle.capacity(); + int position = handle.position(); + int bufferPending = capacity - bufferRemaining; + int totalPending = Math.addExact(srcRemaining, bufferPending); + ByteBuffer[] srcs; + boolean usingBuffer = false; + if (enqueuedBytes()) { + usingBuffer = true; + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + srcs = new ByteBuffer[] {buffer, src}; + } else { + srcs = new ByteBuffer[] {src}; + } + long written = channel.write(srcs); + checkState(written >= 0, "written >= 0 (%s > 0)", written); + if (usingBuffer) { + if (written >= bufferPending) { + // we wrote enough to consume the buffer + Buffers.clear(handle.get()); + } else if (written > 0) { + // we didn't write enough bytes to consume the whole buffer. + Buffers.compact(handle.get()); + } else /*if (written == 0)*/ { + // if none of the buffer was consumed, flip it back so we retain all bytes + Buffers.position(handle.get(), position); + Buffers.limit(handle.get(), capacity); + } + } + + int srcConsumed = Math.max(0, Math.toIntExact(written) - bufferPending); + bytesConsumed += srcConsumed; + + if (!blocking && written != totalPending) { + // we're configured in non-blocking mode, and we weren't able to make any progress on our + // call, break out to allow more bytes to be written to us or to allow underlying space + // to clear. + break; + } + } + return bytesConsumed; + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() throws IOException { + if (enqueuedBytes()) { + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + channel.writeAndClose(buffer); + if (buffer.hasRemaining()) { + buffer.compact(); + } else { + Buffers.clear(buffer); + } + } else { + channel.close(); + } + } + + @Override + public void flush() throws IOException { + while (enqueuedBytes()) { + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + channel.write(buffer); + if (buffer.hasRemaining()) { + buffer.compact(); + } else { + Buffers.clear(buffer); + } + } + } + + private boolean enqueuedBytes() { + return handle.position() > 0; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClient.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClient.java new file mode 100644 index 000000000000..509a80224b08 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClient.java @@ -0,0 +1,125 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; + +/** + * A client for interacting with Google Cloud Storage's Multipart Upload API. + * + *

This class is for internal use only and is not intended for public consumption. It provides a + * low-level interface for creating and managing multipart uploads. + * + * @see Multipart Uploads + * @since 2.60.0 + */ +@InternalExtensionOnly +public abstract class MultipartUploadClient { + + MultipartUploadClient() {} + + /** + * Creates a new multipart upload. + * + * @param request The request object containing the details for creating the multipart upload. + * @return A {@link CreateMultipartUploadResponse} object containing the upload ID. + * @since 2.60.0 + */ + public abstract CreateMultipartUploadResponse createMultipartUpload( + CreateMultipartUploadRequest request); + + /** + * Lists the parts that have been uploaded for a specific multipart upload. + * + * @param listPartsRequest The request object containing the details for listing the parts. + * @return A {@link ListPartsResponse} object containing the list of parts. + * @since 2.60.0 + */ + public abstract ListPartsResponse listParts(ListPartsRequest listPartsRequest); + + /** + * Aborts a multipart upload. + * + * @param request The request object containing the details for aborting the multipart upload. + * @return An {@link AbortMultipartUploadResponse} object. + * @since 2.60.0 + */ + public abstract AbortMultipartUploadResponse abortMultipartUpload( + AbortMultipartUploadRequest request); + + /** + * Completes a multipart upload. + * + * @param request The request object containing the details for completing the multipart upload. + * @return A {@link CompleteMultipartUploadResponse} object containing information about the + * completed upload. + * @since 2.60.0 + */ + public abstract CompleteMultipartUploadResponse completeMultipartUpload( + CompleteMultipartUploadRequest request); + + /** + * Uploads a part in a multipart upload. + * + * @param request The request object containing the details for uploading the part. + * @param requestBody The content of the part to upload. + * @return An {@link UploadPartResponse} object containing the ETag of the uploaded part. + * @since 2.60.0 + */ + public abstract UploadPartResponse uploadPart(UploadPartRequest request, RequestBody requestBody); + + /** + * Lists all multipart uploads in a bucket. + * + * @param request The request object containing the details for listing the multipart uploads. + * @return A {@link ListMultipartUploadsResponse} object containing the list of multipart uploads. + * @since 2.61.0 + */ + public abstract ListMultipartUploadsResponse listMultipartUploads( + ListMultipartUploadsRequest request); + + /** + * Creates a new instance of {@link MultipartUploadClient}. + * + * @param config The configuration for the client. + * @return A new {@link MultipartUploadClient} instance. + * @since 2.60.0 + */ + public static MultipartUploadClient create(MultipartUploadSettings config) { + HttpStorageOptions options = config.getOptions(); + MultipartUploadClient client = + new MultipartUploadClientImpl( + options.createRetrier(), + MultipartUploadHttpRequestManager.createFrom(options), + options.getRetryAlgorithmManager()); + return OtelMultipartUploadClientDecorator.decorate( + client, options.getOpenTelemetry(), Transport.HTTP); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClientImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClientImpl.java new file mode 100644 index 000000000000..2aecba6232b1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadClientImpl.java @@ -0,0 +1,109 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This class is an implementation of {@link MultipartUploadClient} that uses the Google Cloud + * Storage XML API to perform multipart uploads. + */ +final class MultipartUploadClientImpl extends MultipartUploadClient { + + private final MultipartUploadHttpRequestManager httpRequestManager; + private final Retrier retrier; + private final HttpRetryAlgorithmManager retryAlgorithmManager; + + MultipartUploadClientImpl( + Retrier retrier, + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager, + HttpRetryAlgorithmManager retryAlgorithmManager) { + this.httpRequestManager = multipartUploadHttpRequestManager; + this.retrier = retrier; + this.retryAlgorithmManager = retryAlgorithmManager; + } + + @Override + public CreateMultipartUploadResponse createMultipartUpload(CreateMultipartUploadRequest request) { + return retrier.run( + retryAlgorithmManager.nonIdempotent(), + () -> httpRequestManager.sendCreateMultipartUploadRequest(request), + Decoder.identity()); + } + + @Override + public ListPartsResponse listParts(ListPartsRequest request) { + + return retrier.run( + retryAlgorithmManager.idempotent(), + () -> httpRequestManager.sendListPartsRequest(request), + Decoder.identity()); + } + + @Override + public AbortMultipartUploadResponse abortMultipartUpload(AbortMultipartUploadRequest request) { + + return retrier.run( + retryAlgorithmManager.idempotent(), + () -> httpRequestManager.sendAbortMultipartUploadRequest(request), + Decoder.identity()); + } + + @Override + public CompleteMultipartUploadResponse completeMultipartUpload( + CompleteMultipartUploadRequest request) { + return retrier.run( + retryAlgorithmManager.idempotent(), + () -> httpRequestManager.sendCompleteMultipartUploadRequest(request), + Decoder.identity()); + } + + @Override + public UploadPartResponse uploadPart(UploadPartRequest request, RequestBody requestBody) { + AtomicBoolean dirty = new AtomicBoolean(false); + return retrier.run( + retryAlgorithmManager.idempotent(), + () -> { + if (dirty.getAndSet(true)) { + requestBody.getContent().rewindTo(0); + } + return httpRequestManager.sendUploadPartRequest(request, requestBody.getContent()); + }, + Decoder.identity()); + } + + @Override + public ListMultipartUploadsResponse listMultipartUploads(ListMultipartUploadsRequest request) { + return retrier.run( + retryAlgorithmManager.idempotent(), + () -> httpRequestManager.sendListMultipartUploadsRequest(request), + Decoder.identity()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadHttpRequestManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadHttpRequestManager.java new file mode 100644 index 000000000000..aba60f6bbac5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadHttpRequestManager.java @@ -0,0 +1,368 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import com.google.api.client.http.ByteArrayContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestFactory; +import com.google.api.client.http.UriTemplate; +import com.google.api.client.util.ObjectParser; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.api.services.storage.Storage; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import com.google.common.base.StandardSystemProperty; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URLEncoder; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class MultipartUploadHttpRequestManager { + + private final HttpRequestFactory requestFactory; + private final ObjectParser objectParser; + private final HeaderProvider headerProvider; + private final URI uri; + + MultipartUploadHttpRequestManager( + HttpRequestFactory requestFactory, + ObjectParser objectParser, + HeaderProvider headerProvider, + URI uri) { + this.requestFactory = requestFactory; + this.objectParser = objectParser; + this.headerProvider = headerProvider; + this.uri = uri; + } + + CreateMultipartUploadResponse sendCreateMultipartUploadRequest( + CreateMultipartUploadRequest request) throws IOException { + + String createUri = + UriTemplate.expand( + uri.toString(), + "{bucket}/{key}?uploads", + ImmutableMap.of("bucket", request.bucket(), "key", request.key()), + false); + + HttpRequest httpRequest = + requestFactory.buildPostRequest( + new GenericUrl(createUri), new ByteArrayContent(null, new byte[0])); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + addHeadersForCreateMultipartUpload(request, httpRequest.getHeaders()); + httpRequest.setParser(objectParser); + httpRequest.setThrowExceptionOnExecuteError(true); + return httpRequest.execute().parseAs(CreateMultipartUploadResponse.class); + } + + ListPartsResponse sendListPartsRequest(ListPartsRequest request) throws IOException { + + ImmutableMap.Builder params = + ImmutableMap.builder() + .put("bucket", request.bucket()) + .put("key", request.key()) + .put("uploadId", request.uploadId()); + if (request.maxParts() != null) { + params.put("max-parts", request.maxParts()); + } + if (request.partNumberMarker() != null) { + params.put("part-number-marker", request.partNumberMarker()); + } + + String listUri = + UriTemplate.expand( + uri.toString(), + "{bucket}/{key}{?uploadId,max-parts,part-number-marker}", + params.build(), + false); + HttpRequest httpRequest = requestFactory.buildGetRequest(new GenericUrl(listUri)); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + if (request.userProject() != null) { + httpRequest.getHeaders().put("x-goog-user-project", request.userProject()); + } + httpRequest.setParser(objectParser); + httpRequest.setThrowExceptionOnExecuteError(true); + return httpRequest.execute().parseAs(ListPartsResponse.class); + } + + ListMultipartUploadsResponse sendListMultipartUploadsRequest(ListMultipartUploadsRequest request) + throws IOException { + + ImmutableMap.Builder params = + ImmutableMap.builder().put("bucket", request.bucket()); + if (request.delimiter() != null) { + params.put("delimiter", request.delimiter()); + } + if (request.encodingType() != null) { + params.put("encoding-type", request.encodingType()); + } + if (request.keyMarker() != null) { + params.put("key-marker", request.keyMarker()); + } + if (request.maxUploads() != null) { + params.put("max-uploads", request.maxUploads()); + } + if (request.prefix() != null) { + params.put("prefix", request.prefix()); + } + if (request.uploadIdMarker() != null) { + params.put("upload-id-marker", request.uploadIdMarker()); + } + String listUri = + UriTemplate.expand( + uri.toString() + + "{bucket}?uploads{&delimiter,encoding-type,key-marker,max-uploads,prefix,upload-id-marker}", + params.build(), + false); + HttpRequest httpRequest = requestFactory.buildGetRequest(new GenericUrl(listUri)); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + if (request.userProject() != null) { + httpRequest.getHeaders().put("x-goog-user-project", request.userProject()); + } + httpRequest.setParser(objectParser); + httpRequest.setThrowExceptionOnExecuteError(true); + return httpRequest.execute().parseAs(ListMultipartUploadsResponse.class); + } + + AbortMultipartUploadResponse sendAbortMultipartUploadRequest(AbortMultipartUploadRequest request) + throws IOException { + + String abortUri = + UriTemplate.expand( + uri.toString(), + "{bucket}/{key}{?uploadId}", + ImmutableMap.of( + "bucket", request.bucket(), "key", request.key(), "uploadId", request.uploadId()), + false); + + HttpRequest httpRequest = requestFactory.buildDeleteRequest(new GenericUrl(abortUri)); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + if (request.userProject() != null) { + httpRequest.getHeaders().put("x-goog-user-project", request.userProject()); + } + httpRequest.setParser(objectParser); + httpRequest.setThrowExceptionOnExecuteError(true); + return httpRequest.execute().parseAs(AbortMultipartUploadResponse.class); + } + + CompleteMultipartUploadResponse sendCompleteMultipartUploadRequest( + CompleteMultipartUploadRequest request) throws IOException { + String completeUri = + UriTemplate.expand( + uri.toString() + "{bucket}/{key}{?uploadId}", + ImmutableMap.of( + "bucket", request.bucket(), "key", request.key(), "uploadId", request.uploadId()), + false); + byte[] bytes = new XmlMapper().writeValueAsBytes(request.multipartUpload()); + HttpRequest httpRequest = + requestFactory.buildPostRequest( + new GenericUrl(completeUri), new ByteArrayContent("application/xml", bytes)); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + if (request.userProject() != null) { + httpRequest.getHeaders().put("x-goog-user-project", request.userProject()); + } + @Nullable Crc32cLengthKnown crc32cValue = Hasher.defaultHasher().hash(ByteBuffer.wrap(bytes)); + addChecksumHeader(crc32cValue, httpRequest.getHeaders()); + httpRequest.setParser(objectParser); + httpRequest.setThrowExceptionOnExecuteError(true); + return ChecksumResponseParser.parseCompleteResponse(httpRequest.execute()); + } + + UploadPartResponse sendUploadPartRequest( + UploadPartRequest request, RewindableContent rewindableContent) throws IOException { + String uploadUri = + UriTemplate.expand( + uri.toString() + "{bucket}/{key}{?partNumber,uploadId}", + ImmutableMap.of( + "bucket", + request.bucket(), + "key", + request.key(), + "partNumber", + request.partNumber(), + "uploadId", + request.uploadId()), + false); + HttpRequest httpRequest = + requestFactory.buildPutRequest(new GenericUrl(uploadUri), rewindableContent); + httpRequest.getHeaders().putAll(headerProvider.getHeaders()); + if (request.userProject() != null) { + httpRequest.getHeaders().put("x-goog-user-project", request.userProject()); + } + if (request.crc32c() != null) { + addChecksumHeader(request.crc32c(), httpRequest.getHeaders()); + } else { + addChecksumHeader(rewindableContent.getCrc32c(), httpRequest.getHeaders()); + } + httpRequest.setThrowExceptionOnExecuteError(true); + return ChecksumResponseParser.parseUploadResponse(httpRequest.execute()); + } + + @SuppressWarnings("DataFlowIssue") + static MultipartUploadHttpRequestManager createFrom(HttpStorageOptions options) { + Storage storage = options.getStorageRpcV1().getStorage(); + ImmutableMap.Builder stableHeaders = ImmutableMap.builder(); + // http-java-client will automatically add its value the user-agent + if (!options + .getMergedHeaderProvider(FixedHeaderProvider.create(ImmutableMap.of())) + .getHeaders() + .containsKey("User-Agent")) { + stableHeaders.put("User-Agent", "gcloud-java/" + options.getLibraryVersion()); + } + stableHeaders.put( + "x-goog-api-client", + String.format( + "gl-java/%s gccl/%s %s/%s", + GaxProperties.getJavaVersion(), + options.getLibraryVersion(), + formatName(StandardSystemProperty.OS_NAME.value()), + formatSemver(StandardSystemProperty.OS_VERSION.value()))); + return new MultipartUploadHttpRequestManager( + storage.getRequestFactory(), + new XmlObjectParser(new XmlMapper()), + options.getMergedHeaderProvider(FixedHeaderProvider.create(stableHeaders.build())), + URI.create(ensureTrailingSlash(options.getHost()))); + } + + private static String ensureTrailingSlash(String host) { + return host.endsWith("/") ? host : host + "/"; + } + + private void addChecksumHeader(@Nullable Crc32cLengthKnown crc32c, HttpHeaders headers) { + if (crc32c != null) { + addChecksumHeader(Utils.crc32cCodec.encode(crc32c.getValue()), headers); + } + } + + private void addChecksumHeader(@Nullable String crc32c, HttpHeaders headers) { + if (crc32c != null) { + headers.put("x-goog-hash", "crc32c=" + crc32c); + } + } + + private void addHeadersForCreateMultipartUpload( + CreateMultipartUploadRequest request, HttpHeaders headers) { + if (request.cannedAcl() != null) { + headers.put("x-goog-acl", request.cannedAcl().getXmlEntry()); + } + if (request.metadata() != null) { + for (Map.Entry entry : request.metadata().entrySet()) { + if (entry.getKey() != null || entry.getValue() != null) { + headers.put("x-goog-meta-" + urlEncode(entry.getKey()), urlEncode(entry.getValue())); + } + } + } + if (request.contentType() != null) { + headers.put("Content-Type", request.contentType()); + } + if (request.contentDisposition() != null) { + headers.put("Content-Disposition", request.contentDisposition()); + } + if (request.contentEncoding() != null) { + headers.put("Content-Encoding", request.contentEncoding()); + } + if (request.contentLanguage() != null) { + headers.put("Content-Language", request.contentLanguage()); + } + if (request.cacheControl() != null) { + headers.put("Cache-Control", request.cacheControl()); + } + if (request.storageClass() != null) { + headers.put("x-goog-storage-class", request.storageClass().toString()); + } + if (request.kmsKeyName() != null && !request.kmsKeyName().isEmpty()) { + headers.put("x-goog-encryption-kms-key-name", request.kmsKeyName()); + } + if (request.objectLockMode() != null) { + headers.put("x-goog-object-lock-mode", request.objectLockMode().toString()); + } + if (request.objectLockRetainUntilDate() != null) { + headers.put( + "x-goog-object-lock-retain-until-date", + Utils.offsetDateTimeRfc3339Codec.encode(request.objectLockRetainUntilDate())); + } + if (request.customTime() != null) { + headers.put( + "x-goog-custom-time", Utils.offsetDateTimeRfc3339Codec.encode(request.customTime())); + } + if (request.userProject() != null) { + headers.put("x-goog-user-project", request.userProject()); + } + } + + private static String urlEncode(String value) { + try { + return URLEncoder.encode(value, StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new StorageException(0, "Unable to load UTF-8 charset for encoding", e); + } + } + + /** + * copied from + * com.google.api.client.googleapis.services.AbstractGoogleClientRequest.ApiClientVersion#formatName(java.lang.String) + */ + private static String formatName(String name) { + // Only lowercase letters, digits, and "-" are allowed + return name.toLowerCase().replaceAll("[^\\w-]", "-"); + } + + private static String formatSemver(String version) { + return formatSemver(version, version); + } + + /** + * copied from + * com.google.api.client.googleapis.services.AbstractGoogleClientRequest.ApiClientVersion#formatSemver(java.lang.String, + * java.lang.String) + */ + private static String formatSemver(String version, String defaultValue) { + if (version == null) { + return null; + } + + // Take only the semver version: x.y.z-a_b_c -> x.y.z + Matcher m = Pattern.compile("(\\d+\\.\\d+\\.\\d+).*").matcher(version); + if (m.find()) { + return m.group(1); + } else { + return defaultValue; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadSettings.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadSettings.java new file mode 100644 index 000000000000..c3ccd049e21e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/MultipartUploadSettings.java @@ -0,0 +1,49 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +/** + * Settings for configuring the {@link MultipartUploadClient}. + * + *

This class is for internal use only and is not intended for public consumption. + */ +public final class MultipartUploadSettings { + private final HttpStorageOptions options; + + private MultipartUploadSettings(HttpStorageOptions options) { + this.options = options; + } + + /** + * Returns the {@link HttpStorageOptions} configured for multipart uploads. + * + * @return The {@link HttpStorageOptions}. + */ + public HttpStorageOptions getOptions() { + return options; + } + + /** + * Creates a new {@code MultipartUploadSettings} instance with the specified {@link + * HttpStorageOptions}. + * + * @param options The {@link HttpStorageOptions} to use. + * @return A new {@code MultipartUploadSettings} instance. + */ + public static MultipartUploadSettings of(HttpStorageOptions options) { + return new MultipartUploadSettings(options); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Notification.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Notification.java new file mode 100644 index 000000000000..bb6d2747d4c4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Notification.java @@ -0,0 +1,133 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.util.Map; +import java.util.Objects; + +/** + * The class representing Pub/Sub notifications for the Storage. See pubsub-notifications for + * details. + */ +public class Notification extends NotificationInfo { + private static final long serialVersionUID = 3150928330690874200L; + + private final StorageOptions options; + private transient Storage storage; + + /** Builder for {@code Notification}. */ + public static class Builder extends NotificationInfo.Builder { + private final Storage storage; + private final NotificationInfo.BuilderImpl infoBuilder; + + Builder(Notification notification) { + this.storage = notification.storage; + this.infoBuilder = new NotificationInfo.BuilderImpl(notification); + } + + @Override + Builder setNotificationId(String notificationId) { + infoBuilder.setNotificationId(notificationId); + return this; + } + + @Override + public Builder setSelfLink(String selfLink) { + infoBuilder.setSelfLink(selfLink); + return this; + } + + @Override + public Builder setTopic(String topic) { + infoBuilder.setTopic(topic); + return this; + } + + @Override + public Builder setPayloadFormat(PayloadFormat payloadFormat) { + infoBuilder.setPayloadFormat(payloadFormat); + return this; + } + + @Override + public Builder setObjectNamePrefix(String objectNamePrefix) { + infoBuilder.setObjectNamePrefix(objectNamePrefix); + return this; + } + + @Override + public Builder setEventTypes(EventType... eventTypes) { + infoBuilder.setEventTypes(eventTypes); + return this; + } + + @Override + public Builder setEtag(String etag) { + infoBuilder.setEtag(etag); + return this; + } + + @Override + public Builder setCustomAttributes(Map customAttributes) { + infoBuilder.setCustomAttributes(customAttributes); + return this; + } + + @Override + public Notification build() { + return new Notification(storage, infoBuilder); + } + } + + Notification(Storage storage, NotificationInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.storage = checkNotNull(storage); + this.options = storage.getOptions(); + } + + /** Returns the notification's {@code Storage} object used to issue requests. */ + public Storage getStorage() { + return storage; + } + + @Override + public Builder toBuilder() { + return new Notification.Builder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Notification)) { + return false; + } + if (!super.equals(o)) { + return false; + } + Notification that = (Notification) o; + return Objects.equals(options, that.options) && Objects.equals(storage, that.storage); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), options, storage); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/NotificationInfo.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/NotificationInfo.java new file mode 100644 index 000000000000..cbf1f7e93141 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/NotificationInfo.java @@ -0,0 +1,304 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** The class representing Pub/Sub Notification metadata for the Storage. */ +public class NotificationInfo implements Serializable { + + private static final long serialVersionUID = -996243512290027661L; + private static final PathTemplate PATH_TEMPLATE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/topics/{topic}"); + + public enum PayloadFormat { + JSON_API_V1, + NONE + } + + public enum EventType { + OBJECT_FINALIZE, + OBJECT_METADATA_UPDATE, + OBJECT_DELETE, + OBJECT_ARCHIVE + } + + private final String notificationId; + private final String topic; + private final List eventTypes; + private final Map customAttributes; + private final PayloadFormat payloadFormat; + private final String objectNamePrefix; + private final String etag; + private final String selfLink; + + /** Builder for {@code NotificationInfo}. */ + public abstract static class Builder { + Builder() {} + + abstract Builder setNotificationId(String notificationId); + + public abstract Builder setSelfLink(String selfLink); + + public abstract Builder setTopic(String topic); + + public abstract Builder setPayloadFormat(PayloadFormat payloadFormat); + + public abstract Builder setObjectNamePrefix(String objectNamePrefix); + + public abstract Builder setEventTypes(EventType... eventTypes); + + public abstract Builder setEtag(String etag); + + public abstract Builder setCustomAttributes(Map customAttributes); + + /** Creates a {@code NotificationInfo} object. */ + public abstract NotificationInfo build(); + } + + /** Builder for {@code NotificationInfo}. */ + public static class BuilderImpl extends Builder { + + private String notificationId; + private String topic; + private List eventTypes; + private Map customAttributes; + private PayloadFormat payloadFormat; + private String objectNamePrefix; + private String etag; + private String selfLink; + + BuilderImpl(String topic) { + this.topic = topic; + } + + BuilderImpl(NotificationInfo notificationInfo) { + notificationId = notificationInfo.notificationId; + etag = notificationInfo.etag; + selfLink = notificationInfo.selfLink; + topic = notificationInfo.topic; + eventTypes = notificationInfo.eventTypes; + customAttributes = notificationInfo.customAttributes; + payloadFormat = notificationInfo.payloadFormat; + objectNamePrefix = notificationInfo.objectNamePrefix; + } + + @Override + Builder setNotificationId(String notificationId) { + this.notificationId = notificationId; + return this; + } + + @Override + public Builder setSelfLink(String selfLink) { + this.selfLink = selfLink; + return this; + } + + /** Sets a topic in the format of "projects/{project}/topics/{topic}". */ + @Override + public Builder setTopic(String topic) { + this.topic = topic; + return this; + } + + @Override + public Builder setPayloadFormat(PayloadFormat payloadFormat) { + this.payloadFormat = payloadFormat; + return this; + } + + @Override + public Builder setObjectNamePrefix(String objectNamePrefix) { + this.objectNamePrefix = objectNamePrefix; + return this; + } + + @Override + public Builder setEventTypes(EventType... eventTypes) { + this.eventTypes = eventTypes != null ? Arrays.asList(eventTypes) : null; + return this; + } + + @Override + public Builder setEtag(String etag) { + this.etag = etag; + return this; + } + + @Override + public Builder setCustomAttributes(Map customAttributes) { + this.customAttributes = + customAttributes != null ? ImmutableMap.copyOf(customAttributes) : null; + return this; + } + + public NotificationInfo build() { + checkNotNull(topic); + checkTopicFormat(topic); + return new NotificationInfo(this); + } + } + + NotificationInfo(BuilderImpl builder) { + notificationId = builder.notificationId; + etag = builder.etag; + selfLink = builder.selfLink; + topic = builder.topic; + eventTypes = builder.eventTypes; + customAttributes = builder.customAttributes; + payloadFormat = builder.payloadFormat; + objectNamePrefix = builder.objectNamePrefix; + } + + /** Returns the service-generated id for the notification. */ + public String getNotificationId() { + return notificationId; + } + + /** Returns the topic in Pub/Sub that receives notifications. */ + public String getTopic() { + return topic; + } + + /** Returns the canonical URI of this topic as a string. */ + public String getSelfLink() { + return selfLink; + } + + /** Returns the desired content of the Payload. */ + public NotificationInfo.PayloadFormat getPayloadFormat() { + return payloadFormat; + } + + /** Returns the object name prefix for which this notification configuration applies. */ + public String getObjectNamePrefix() { + return objectNamePrefix; + } + + /** + * Returns HTTP 1.1 Entity tag for the notification. See Entity Tags + */ + public String getEtag() { + return etag; + } + + /** + * Returns the events that trigger a notification to be sent. If empty, notifications are + * triggered by any event. See Event types to get + * list of available events. + */ + public List getEventTypes() { + return eventTypes; + } + + /** + * Returns the list of additional attributes to attach to each Cloud PubSub message published for + * this notification subscription. + */ + public Map getCustomAttributes() { + return customAttributes; + } + + @Override + public int hashCode() { + return Objects.hash( + notificationId, + topic, + eventTypes, + customAttributes, + payloadFormat, + objectNamePrefix, + etag, + selfLink); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NotificationInfo)) { + return false; + } + NotificationInfo that = (NotificationInfo) o; + return Objects.equals(notificationId, that.notificationId) + && Objects.equals(topic, that.topic) + && Objects.equals(eventTypes, that.eventTypes) + && Objects.equals(customAttributes, that.customAttributes) + && payloadFormat == that.payloadFormat + && Objects.equals(objectNamePrefix, that.objectNamePrefix) + && Objects.equals(etag, that.etag) + && Objects.equals(selfLink, that.selfLink); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("topic", topic).toString(); + } + + /** + * Creates a {@code NotificationInfo} object for the provided topic. + * + *

Example of creating the NotificationInfo object: + * + *

{@code
+   * String topic = "projects/myProject/topics/myTopic"
+   * NotificationInfo notificationInfo = NotificationInfo.of(topic)
+   * }
+ * + * @param topic a string in the format "projects/{project}/topics/{topic}" + */ + public static NotificationInfo of(String topic) { + checkTopicFormat(topic); + return newBuilder(topic).build(); + } + + /** + * Creates a {@code NotificationInfo} object for the provided topic. + * + * @param topic a string in the format "projects/{project}/topics/{topic}" + */ + public static Builder newBuilder(String topic) { + checkTopicFormat(topic); + return new BuilderImpl(topic); + } + + /** Returns a builder for the current notification. */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + Notification asNotification(Storage storage) { + return new Notification(storage, new BuilderImpl(this)); + } + + private static void checkTopicFormat(String topic) { // todo: why does this exist? + PATH_TEMPLATE.validatedMatch(topic, "topic name must be in valid format"); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSession.java new file mode 100644 index 000000000000..655c64dda8d7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSession.java @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.storage.v2.Object; + +@InternalApi +@InternalExtensionOnly +interface ObjectReadSession extends IOAutoCloseable { + + Object getResource(); + + Projection readAs(ReadProjectionConfig config); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionImpl.java new file mode 100644 index 000000000000..2998f4d8ff33 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionImpl.java @@ -0,0 +1,188 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.storage.GrpcUtils.ZeroCopyBidiStreamingCallable; +import com.google.cloud.storage.ReadProjectionConfig.ProjectionType; +import com.google.cloud.storage.RetryContext.RetryContextProvider; +import com.google.common.annotations.VisibleForTesting; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.Object; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map.Entry; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiFunction; + +final class ObjectReadSessionImpl implements ObjectReadSession { + + private final ScheduledExecutorService executor; + private final ZeroCopyBidiStreamingCallable + callable; + private final ObjectReadSessionStream stream; + @VisibleForTesting final ObjectReadSessionState state; + private final Object resource; + private final RetryContextProvider retryContextProvider; + + private final ConcurrentIdentityMap children; + + private volatile boolean open; + + ObjectReadSessionImpl( + ScheduledExecutorService executor, + ZeroCopyBidiStreamingCallable callable, + ObjectReadSessionStream stream, + ObjectReadSessionState state, + RetryContextProvider retryContextProvider) { + this.executor = executor; + this.callable = callable; + this.stream = stream; + this.state = state; + this.resource = state.getMetadata(); + this.retryContextProvider = retryContextProvider; + this.children = new ConcurrentIdentityMap<>(); + this.open = true; + } + + @Override + public Object getResource() { + return resource; + } + + @Override + public Projection readAs(ReadProjectionConfig config) { + checkState(open, "Session already closed"); + switch (config.getType()) { + case STREAM_READ: + long readId = state.newReadId(); + ObjectReadSessionStreamRead read = + config.cast().newRead(readId, retryContextProvider.create()); + registerReadInState(readId, read); + return read.project(); + case SESSION_USER: + return config.project(this, IOAutoCloseable.noOp()); + default: + throw new IllegalStateException( + String.format( + Locale.US, + "Broken java enum %s value=%s", + ProjectionType.class.getName(), + config.getType().name())); + } + } + + @Override + public void close() throws IOException { + try { + if (!open) { + return; + } + open = false; + List> closing = + children.drainEntries((subStream, subStreamState) -> subStream.closeAsync()); + stream.close(); + ApiFutures.allAsList(closing).get(); + } catch (ExecutionException e) { + throw new IOException(e.getCause()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException(); + } + } + + private void registerReadInState(long readId, ObjectReadSessionStreamRead read) { + BidiReadObjectRequest request = + BidiReadObjectRequest.newBuilder().addReadRanges(read.makeReadRange()).build(); + if (state.canHandleNewRead(read)) { + state.putOutstandingRead(readId, read); + stream.send(request); + } else { + ObjectReadSessionState child = state.forkChild(); + ObjectReadSessionStream newStream = + ObjectReadSessionStream.create(executor, callable, child, retryContextProvider.create()); + children.put(newStream, child); + read.setOnCloseCallback( + () -> { + children.remove(newStream); + newStream.close(); + }); + child.putOutstandingRead(readId, read); + newStream.send(request); + } + } + + @VisibleForTesting + static final class ConcurrentIdentityMap { + private final ReentrantLock lock; + private final IdentityHashMap children; + + @VisibleForTesting + ConcurrentIdentityMap() { + lock = new ReentrantLock(); + children = new IdentityHashMap<>(); + } + + public void put(K key, V value) { + lock.lock(); + try { + children.put(key, value); + } finally { + lock.unlock(); + } + } + + public void remove(K key) { + lock.lock(); + try { + children.remove(key); + } finally { + lock.unlock(); + } + } + + public ArrayList drainEntries(BiFunction f) { + lock.lock(); + try { + Iterator> it = children.entrySet().iterator(); + ArrayList results = new ArrayList<>(children.size()); + while (it.hasNext()) { + Entry entry = it.next(); + K key = entry.getKey(); + V value = entry.getValue(); + it.remove(); + R r = f.apply(key, value); + results.add(r); + } + return results; + } finally { + lock.unlock(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionSeekableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionSeekableByteChannel.java new file mode 100644 index 000000000000..d5679a296e5b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionSeekableByteChannel.java @@ -0,0 +1,142 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.SeekableByteChannel; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class ObjectReadSessionSeekableByteChannel implements SeekableByteChannel, IOAutoCloseable { + + private final ObjectReadSession session; + private final ReadAsSeekableChannel config; + private final long size; + private final ReadAsChannel channelConfig; + private final IOAutoCloseable closeAlongWithThis; + + private ReadableByteChannel rbc; + + private long position; + private boolean open = true; + + @Nullable private RangeSpec lastRangeSpec; + + ObjectReadSessionSeekableByteChannel( + ObjectReadSession session, ReadAsSeekableChannel config, IOAutoCloseable closeAlongWithThis) { + this.session = session; + this.config = config; + this.closeAlongWithThis = closeAlongWithThis; + this.size = session.getResource().getSize(); + this.position = 0; + this.channelConfig = + ReadProjectionConfigs.asChannel() + .withCrc32cValidationEnabled(config.getCrc32cValidationEnabled()); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + if (remaining() <= 0) { + return -1; + } + + int totalRead = 0; + if (rbc == null) { + RangeSpec apply = config.getRangeSpecFunction().apply(position, lastRangeSpec); + checkState( + apply.begin() == position, + "RangeSpec does not begin at provided position. expected = %s, actual = %s", + position, + apply.begin()); + rbc = session.readAs(channelConfig.withRangeSpec(apply)); + lastRangeSpec = apply; + } + + int read = rbc.read(dst); + if (read < 0) { + rbc.close(); + rbc = null; + } else { + totalRead += read; + position += read; + } + + return totalRead; + } + + private long remaining() { + return size - position; + } + + @Override + public long size() throws IOException { + return size; + } + + @Override + public long position() throws IOException { + return position; + } + + @Override + public SeekableByteChannel position(long newPosition) throws IOException { + Preconditions.checkArgument(newPosition >= 0, "newPosition >= 0 (%s >= 0)", newPosition); + if (position == newPosition) { + return this; + } + position = newPosition; + try (ReadableByteChannel ignore = rbc) { + rbc = null; + } + return this; + } + + @Override + public int write(ByteBuffer src) throws IOException { + throw new UnsupportedOperationException("write(ByteBuffer)"); + } + + @Override + public SeekableByteChannel truncate(long size) throws IOException { + throw new UnsupportedOperationException("truncate(long)"); + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + try (IOAutoCloseable ignore1 = closeAlongWithThis; + ReadableByteChannel ignore2 = rbc) { + open = false; + rbc = null; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionState.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionState.java new file mode 100644 index 000000000000..86656219a23a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionState.java @@ -0,0 +1,269 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.storage.v2.BidiReadHandle; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.Object; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.lock.qual.GuardedBy; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class ObjectReadSessionState { + + private final GrpcCallContext baseContext; + private final BidiReadObjectRequest openRequest; + private final AtomicReference<@Nullable BidiReadHandle> bidiReadHandle; + private final AtomicReference<@Nullable String> routingToken; + private final AtomicReference<@MonotonicNonNull Object> metadata; + private final AtomicLong readIdSeq; + + @GuardedBy("this.lock") // https://errorprone.info/bugpattern/GuardedBy + private final Map> outstandingReads; + + private final ReentrantLock lock; + + ObjectReadSessionState( + @NonNull GrpcCallContext baseContext, @NonNull BidiReadObjectRequest openRequest) { + this( + baseContext, + openRequest, + new AtomicLong(1), + new AtomicReference<>(), + new AtomicReference<>(), + new AtomicReference<>()); + } + + private ObjectReadSessionState( + @NonNull GrpcCallContext baseContext, + @NonNull BidiReadObjectRequest openRequest, + AtomicLong readIdSeq, + AtomicReference<@Nullable BidiReadHandle> bidiReadHandle, + AtomicReference<@Nullable String> routingToken, + AtomicReference<@MonotonicNonNull Object> metadata) { + this.baseContext = baseContext; + this.openRequest = openRequest; + this.bidiReadHandle = bidiReadHandle; + this.routingToken = routingToken; + this.metadata = metadata; + this.readIdSeq = readIdSeq; + this.outstandingReads = new HashMap<>(); + this.lock = new ReentrantLock(); + } + + ObjectReadSessionState forkChild() { + return new ObjectReadSessionState( + baseContext, + openRequest, + readIdSeq, + new AtomicReference<>(bidiReadHandle.get()), + new AtomicReference<>(routingToken.get()), + new AtomicReference<>(metadata.get())); + } + + boolean canHandleNewRead(ObjectReadSessionStreamRead newRead) { + lock.lock(); + try { + // when the map is empty this will also return true, see #allMatch docs + return outstandingReads.values().stream().allMatch(r -> r.canShareStreamWith(newRead)); + } finally { + lock.unlock(); + } + } + + OpenArguments getOpenArguments() { + lock.lock(); + try { + BidiReadObjectRequest.Builder b = openRequest.toBuilder().clearReadRanges(); + + Object obj = metadata.get(); + BidiReadObjectSpec spec = openRequest.getReadObjectSpec(); + if (obj != null && obj.getGeneration() != spec.getGeneration()) { + b.getReadObjectSpecBuilder().setGeneration(obj.getGeneration()); + } + + String routingToken = this.routingToken.get(); + if (routingToken != null) { + b.getReadObjectSpecBuilder().setRoutingToken(routingToken); + } + + BidiReadHandle bidiReadHandle = this.bidiReadHandle.get(); + if (bidiReadHandle != null) { + b.getReadObjectSpecBuilder().setReadHandle(bidiReadHandle); + } + + outstandingReads.values().stream() + .filter(ObjectReadSessionStreamRead::readyToSend) + .map(ObjectReadSessionStreamRead::makeReadRange) + .forEach(b::addReadRanges); + + ImmutableMap> headers = + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of( + Stream.of( + "bucket=" + spec.getBucket(), + routingToken != null ? "routing_token=" + routingToken : null) + .filter(Objects::nonNull) + .collect(Collectors.joining("&")))); + return OpenArguments.of(baseContext.withExtraHeaders(headers), b.build()); + } finally { + lock.unlock(); + } + } + + void setBidiReadHandle(BidiReadHandle newValue) { + bidiReadHandle.set(newValue); + } + + Object getMetadata() { + return metadata.get(); + } + + void setMetadata(Object metadata) { + this.metadata.set(metadata); + } + + long newReadId() { + return readIdSeq.getAndIncrement(); + } + + @Nullable ObjectReadSessionStreamRead getOutstandingRead(long key) { + lock.lock(); + try { + return outstandingReads.get(key); + } finally { + lock.unlock(); + } + } + + void putOutstandingRead(long key, ObjectReadSessionStreamRead value) { + lock.lock(); + try { + outstandingReads.put(key, value); + } finally { + lock.unlock(); + } + } + + void removeOutstandingRead(long key) { + lock.lock(); + try { + outstandingReads.remove(key); + } finally { + lock.unlock(); + } + } + + OnFailure removeOutstandingReadOnFailure(long key, OnFailure onFail) { + return t -> { + removeOutstandingRead(key); + onFail.onFailure(t); + }; + } + + void setRoutingToken(String routingToken) { + this.routingToken.set(routingToken); + } + + ObjectReadSessionStreamRead assignNewReadId(long oldReadId) { + lock.lock(); + try { + ObjectReadSessionStreamRead remove = outstandingReads.remove(oldReadId); + checkState(remove != null, "unable to locate old read"); + long newReadId = newReadId(); + ObjectReadSessionStreamRead withNewReadId = remove.withNewReadId(newReadId); + outstandingReads.put(newReadId, withNewReadId); + return withNewReadId; + } finally { + lock.unlock(); + } + } + + ApiFuture failAll(Executor executor, Supplier terminalFailure) { + lock.lock(); + try { + Iterator>> iter = + outstandingReads.entrySet().iterator(); + ArrayList> futures = new ArrayList<>(); + while (iter.hasNext()) { + Entry> entry = iter.next(); + iter.remove(); + ObjectReadSessionStreamRead read = entry.getValue(); + read.preFail(); + ApiFuture f = + ApiFutures.transformAsync( + ApiFutures.immediateFuture("trigger"), + ignore -> read.fail(StorageException.coalesce(terminalFailure.get())), + executor); + futures.add(f); + } + // for our result here, we don't care if the individual futures fail or succeed, only that + // they resolve. Only collect successful results so we don't cause a failure to the caller + // that awaits this future. + return ApiFutures.successfulAsList(futures); + } finally { + lock.unlock(); + } + } + + static final class OpenArguments { + private final GrpcCallContext ctx; + private final BidiReadObjectRequest req; + + private OpenArguments(GrpcCallContext ctx, BidiReadObjectRequest req) { + this.ctx = ctx; + this.req = req; + } + + public GrpcCallContext getCtx() { + return ctx; + } + + public BidiReadObjectRequest getReq() { + return req; + } + + public static OpenArguments of(GrpcCallContext ctx, BidiReadObjectRequest req) { + return new OpenArguments(ctx, req); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java new file mode 100644 index 000000000000..6f02b16866a8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java @@ -0,0 +1,558 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.storage.GrpcUtils.ZeroCopyBidiStreamingCallable; +import com.google.cloud.storage.Hasher.UncheckedChecksumMismatchException; +import com.google.cloud.storage.ObjectReadSessionState.OpenArguments; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.cloud.storage.StorageDataClient.Borrowable; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.rpc.Status; +import com.google.storage.v2.BidiReadObjectError; +import com.google.storage.v2.BidiReadObjectRedirectedError; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectRangeData; +import com.google.storage.v2.ReadRange; +import com.google.storage.v2.ReadRangeError; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class ObjectReadSessionStream + implements ClientStream, ApiFuture, IOAutoCloseable, Borrowable { + + private final SettableApiFuture objectReadSessionResolveFuture; + + private final ObjectReadSessionState state; + private final ScheduledExecutorService executor; + private final ZeroCopyBidiStreamingCallable + callable; + private final RetryContext streamRetryContext; + private final int maxRedirectsAllowed; + + private final AtomicInteger openLeases; + private volatile MonitoringResponseObserver monitoringResponseObserver; + private volatile ResponseObserver responseObserver; + private volatile ClientStream requestStream; + private volatile StreamController controller; + private final AtomicInteger redirectCounter; + + private ObjectReadSessionStream( + ObjectReadSessionState state, + ScheduledExecutorService executor, + ZeroCopyBidiStreamingCallable callable, + int maxRedirectsAllowed, + RetryContext backoff) { + this.state = state; + this.executor = executor; + this.callable = callable; + this.streamRetryContext = backoff; + this.objectReadSessionResolveFuture = SettableApiFuture.create(); + this.maxRedirectsAllowed = maxRedirectsAllowed; + this.openLeases = new AtomicInteger(1); + this.redirectCounter = new AtomicInteger(); + } + + // TODO: make this more elegant + private ClientStream getRequestStream(@Nullable GrpcCallContext context) { + if (requestStream != null) { + return requestStream; + } else { + synchronized (this) { + if (requestStream == null) { + monitoringResponseObserver = + new MonitoringResponseObserver(new BidiReadObjectResponseObserver()); + responseObserver = + GrpcUtils.decorateAsStateChecking( + new RedirectHandlingResponseObserver(monitoringResponseObserver)); + requestStream = callable.splitCall(responseObserver, context); + } + return requestStream; + } + } + } + + @Override + public void close() { + ApiFuture closeAsync = closeAsync(); + ApiFutureUtils.await(closeAsync); + } + + public ApiFuture closeAsync() { + if (!isOpen()) { + return ApiFutures.immediateFuture(null); + } + int updatedLeaseCount = openLeases.decrementAndGet(); + if (updatedLeaseCount == 0) { + AsyncSessionClosedException cause = new AsyncSessionClosedException("Session already closed"); + ApiFuture f = failAll(() -> new StorageException(0, "Parent stream shutdown", cause)); + return ApiFutures.transformAsync(f, ignore -> ApiFutures.immediateFuture(null), executor); + } else { + return ApiFutures.immediateFuture(null); + } + } + + private void cleanUp() { + cancel(true); + if (requestStream != null) { + requestStream.closeSend(); + ApiFutureUtils.await(monitoringResponseObserver.closeSignal); + requestStream = null; + } + } + + @Override + public void send(BidiReadObjectRequest request) { + checkOpen(); + if (requestStream == null) { + OpenArguments openArguments = state.getOpenArguments(); + BidiReadObjectRequest merged = + openArguments.getReq().toBuilder().clearReadRanges().mergeFrom(request).build(); + getRequestStream(openArguments.getCtx()).send(merged); + } else { + getRequestStream(null).send(request); + } + } + + @Override + public void closeSendWithError(Throwable t) { + checkOpen(); + getRequestStream(null).closeSendWithError(t); + } + + @Override + public void closeSend() { + checkOpen(); + getRequestStream(null).closeSend(); + } + + @Override + public boolean isSendReady() { + checkOpen(); + return getRequestStream(null).isSendReady(); + } + + @Override + public void addListener(Runnable listener, Executor executor) { + objectReadSessionResolveFuture.addListener(listener, executor); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return objectReadSessionResolveFuture.cancel(mayInterruptIfRunning); + } + + @Override + public Void get() throws InterruptedException, ExecutionException { + return objectReadSessionResolveFuture.get(); + } + + @Override + public Void get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return objectReadSessionResolveFuture.get(timeout, unit); + } + + @Override + public boolean isCancelled() { + return objectReadSessionResolveFuture.isCancelled(); + } + + @Override + public boolean isDone() { + return objectReadSessionResolveFuture.isDone(); + } + + boolean isOpen() { + return openLeases.get() > 0; + } + + public void borrow() { + checkOpen(); + openLeases.incrementAndGet(); + } + + private void checkOpen() { + Preconditions.checkState(isOpen(), "Stream closed"); + } + + @VisibleForTesting + void restart() { + Preconditions.checkState( + requestStream == null, "attempting to restart stream when stream is already active"); + + OpenArguments openArguments = state.getOpenArguments(); + BidiReadObjectRequest req = openArguments.getReq(); + if (!req.getReadRangesList().isEmpty() || !objectReadSessionResolveFuture.isDone()) { + ClientStream requestStream1 = getRequestStream(openArguments.getCtx()); + requestStream1.send(req); + } + } + + private void failAll(Throwable terminalFailure) { + openLeases.set(0); + try { + objectReadSessionResolveFuture.setException(terminalFailure); + state.failAll(executor, () -> terminalFailure); + } finally { + cleanUp(); + } + } + + private ApiFuture failAll(Supplier terminalFailure) { + openLeases.set(0); + try { + objectReadSessionResolveFuture.setException(terminalFailure.get()); + return state.failAll(executor, terminalFailure); + } finally { + cleanUp(); + } + } + + private final class BidiReadObjectResponseObserver + implements ResponseObserver { + + private BidiReadObjectResponseObserver() {} + + @Override + public void onStart(StreamController controller) { + ObjectReadSessionStream.this.controller = controller; + controller.disableAutoInboundFlowControl(); + controller.request(1); + } + + @SuppressWarnings("rawtypes") + @Override + public void onResponse(BidiReadObjectResponse response) { + controller.request(1); + try (ResponseContentLifecycleHandle handle = + callable.getResponseContentLifecycleManager().get(response)) { + if (response.hasMetadata()) { + state.setMetadata(response.getMetadata()); + } + if (response.hasReadHandle()) { + state.setBidiReadHandle(response.getReadHandle()); + } + List rangeData = response.getObjectDataRangesList(); + if (rangeData.isEmpty()) { + return; + } + for (int i = 0; i < rangeData.size(); i++) { + ObjectRangeData d = rangeData.get(i); + ReadRange readRange = d.getReadRange(); + long id = readRange.getReadId(); + ObjectReadSessionStreamRead read = state.getOutstandingRead(id); + if (read == null || !read.acceptingBytes()) { + continue; + } + ChecksummedData checksummedData = d.getChecksummedData(); + ByteString content = checksummedData.getContent(); + int crc32C = checksummedData.getCrc32C(); + + try { + // On a Threadripper PRO 3945WX + // java11+ calculating the crc32c of a 2MiB segment is ~70us + // java8 the same calculation is ~1600us + // not something to worry about offloading to another thread at this time. + read.hasher().validateUnchecked(Crc32cValue.of(crc32C), content); + } catch (UncheckedChecksumMismatchException e) { + read.recordError( + e, + restartReadFromCurrentOffset(id), + state.removeOutstandingReadOnFailure(id, read::fail)); + continue; + } + + final int idx = i; + long begin = readRange.getReadOffset(); + long position = read.readOffset(); + if (begin == position) { + ChildRef childRef; + childRef = + handle.borrow(r -> r.getObjectDataRanges(idx).getChecksummedData().getContent()); + read.accept(childRef); + } else if (begin < position) { + int skip = Math.toIntExact(position - begin); + ChildRef childRef = + handle.borrow( + r -> + r.getObjectDataRanges(idx) + .getChecksummedData() + .getContent() + .substring(skip)); + read.accept(childRef); + ApiException apiException = + ApiExceptionFactory.createException( + String.format("position = %d, readRange.read_offset = %d", position, begin), + null, + GrpcStatusCode.of(Code.OUT_OF_RANGE), + true); + read.recordError( + apiException, + restartReadFromCurrentOffset(id), + state.removeOutstandingReadOnFailure(id, read::fail)); + continue; + } else { + ApiException apiException = + ApiExceptionFactory.createException( + String.format("position = %d, readRange.read_offset = %d", position, begin), + null, + GrpcStatusCode.of(Code.OUT_OF_RANGE), + true); + read.recordError( + apiException, + restartReadFromCurrentOffset(id), + state.removeOutstandingReadOnFailure(id, read::fail)); + continue; + } + + if (d.getRangeEnd()) { + // invoke eof on exec, the resolving future could have a downstream callback + // that we don't want to block this grpc thread + executor.execute( + StorageException.liftToRunnable( + () -> { + read.eof(); + // don't remove the outstanding read until the future has been resolved + state.removeOutstandingRead(id); + })); + } + } + } catch (IOException e) { + // + // When using zero-copy, the returned InputStream is of type InputStream rather than its + // concrete subclass. The subclass is `io.grpc.internal.ReadableBuffers.BufferInputStream` + // which exclusively operates on a `io.grpc.internal.ReadableBuffer`. `ReadableBuffer`s + // close method does not throw. + // + // This is defined as an exhaustiveness compliance. {@code javac} dictates we handle an + // `IOException`, even though the underlying classes won't throw it. If the behavior in grpc + // at some point does throw, we catch it here and funnel it into the stream retry handling. + // + requestStream = null; + streamRetryContext.recordError( + e, ObjectReadSessionStream.this::restart, ObjectReadSessionStream.this::failAll); + } + } + + @Override + public void onError(Throwable t) { + requestStream = null; + BidiReadObjectError error = GrpcUtils.getBidiReadObjectError(t); + if (error == null) { + // if there isn't a BidiReadObjectError that may contain more narrow failures, propagate + // the failure as is to the stream. + streamRetryContext.recordError( + t, ObjectReadSessionStream.this::restart, ObjectReadSessionStream.this::failAll); + return; + } + + List rangeErrors = error.getReadRangeErrorsList(); + if (rangeErrors.isEmpty()) { + // if there aren't any specific read id's that contain errors, propagate the error as is to + // the stream. + streamRetryContext.recordError( + t, ObjectReadSessionStream.this::restart, ObjectReadSessionStream.this::failAll); + return; + } + for (ReadRangeError rangeError : rangeErrors) { + Status status = rangeError.getStatus(); + long id = rangeError.getReadId(); + ObjectReadSessionStreamRead read = state.getOutstandingRead(id); + if (read == null) { + continue; + } + // mark read as failed, but don't resolve its future now. Schedule the delivery of the + // failure in executor to ensure any downstream future doesn't block this IO thread. + read.preFail(); + executor.execute( + StorageException.liftToRunnable( + () -> + state + .removeOutstandingReadOnFailure(id, read::fail) + .onFailure(GrpcUtils.statusToApiException(status)))); + } + // now that we've failed specific reads, raise a retryable ABORTED error to the stream to + // cause it to retry and pending remaining reads. + ApiException apiException = + ApiExceptionFactory.createException( + "Stream error, reclassifying as ABORTED for reads not specified in" + + " BidiReadObjectError", + t, + GrpcStatusCode.of(Code.ABORTED), + true); + streamRetryContext.recordError( + apiException, + ObjectReadSessionStream.this::restart, + ObjectReadSessionStream.this::failAll); + } + + private OnSuccess restartReadFromCurrentOffset(long id) { + return () -> { + //noinspection resource + ObjectReadSessionStreamRead readWithNewId = state.assignNewReadId(id); + BidiReadObjectRequest requestWithNewReadId = + BidiReadObjectRequest.newBuilder().addReadRanges(readWithNewId.makeReadRange()).build(); + ObjectReadSessionStream.this.send(requestWithNewReadId); + }; + } + + @Override + public void onComplete() {} + } + + private class MonitoringResponseObserver implements ResponseObserver { + private final ResponseObserver delegate; + private final SettableApiFuture openSignal; + private final SettableApiFuture closeSignal; + + private MonitoringResponseObserver(ResponseObserver delegate) { + this.delegate = delegate; + this.openSignal = SettableApiFuture.create(); + this.closeSignal = SettableApiFuture.create(); + } + + @Override + public void onStart(StreamController controller) { + delegate.onStart(controller); + } + + @Override + public void onResponse(BidiReadObjectResponse response) { + delegate.onResponse(response); + openSignal.set(null); + objectReadSessionResolveFuture.set(null); + } + + @Override + public void onError(Throwable t) { + delegate.onError(t); + openSignal.setException(t); + closeSignal.setException(t); + } + + @Override + public void onComplete() { + delegate.onComplete(); + if (state.getMetadata() == null) { + StatusRuntimeException cause = + Code.UNAVAILABLE + .toStatus() + .withDescription("onComplete without prior onNext") + .asRuntimeException(); + ApiException apiException = + ApiExceptionFactory.createException(cause, GrpcStatusCode.of(Code.UNAVAILABLE), false); + StorageException storageException = + new StorageException(0, cause.getMessage(), apiException); + streamRetryContext.recordError( + storageException, + ObjectReadSessionStream.this::restart, + objectReadSessionResolveFuture::setException); + } + openSignal.set(null); + closeSignal.set(null); + } + } + + private final class RedirectHandlingResponseObserver + implements ResponseObserver { + private final ResponseObserver delegate; + + private RedirectHandlingResponseObserver(ResponseObserver delegate) { + this.delegate = delegate; + } + + @Override + public void onStart(StreamController controller) { + delegate.onStart(controller); + } + + @Override + public void onResponse(BidiReadObjectResponse response) { + redirectCounter.set(0); + delegate.onResponse(response); + } + + @Override + public void onError(Throwable t) { + BidiReadObjectRedirectedError error = GrpcUtils.getBidiReadObjectRedirectedError(t); + if (error == null) { + delegate.onError(t); + return; + } + requestStream = null; + int redirectCount = redirectCounter.incrementAndGet(); + if (redirectCount > maxRedirectsAllowed) { + // attach the fact we're ignoring the redirect to the original exception as a suppressed + // Exception. The lower level handler can then perform its usual handling, but if things + // bubble all the way up to the invoker we'll be able to see it in a bug report. + t.addSuppressed(new MaxRedirectsExceededException(maxRedirectsAllowed, redirectCount)); + delegate.onError(t); + objectReadSessionResolveFuture.setException(t); + return; + } + if (error.hasReadHandle()) { + state.setBidiReadHandle(error.getReadHandle()); + } + if (error.hasRoutingToken()) { + state.setRoutingToken(error.getRoutingToken()); + } + executor.execute(ObjectReadSessionStream.this::restart); + } + + @Override + public void onComplete() { + delegate.onComplete(); + } + } + + static ObjectReadSessionStream create( + ScheduledExecutorService executor, + ZeroCopyBidiStreamingCallable callable, + ObjectReadSessionState state, + RetryContext retryContext) { + + int maxRedirectsAllowed = 3; // TODO: make this configurable in the ultimate public surface + return new ObjectReadSessionStream( + state, executor, callable, maxRedirectsAllowed, retryContext); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStreamRead.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStreamRead.java new file mode 100644 index 000000000000..92d902257ad7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStreamRead.java @@ -0,0 +1,82 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.ByteArrayAccumulatingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.StreamingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.ZeroCopyByteStringAccumulatingRead; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.storage.v2.ReadRange; +import java.io.IOException; + +@InternalApi +@InternalExtensionOnly +interface ObjectReadSessionStreamRead extends IOAutoCloseable { + + Projection project(); + + long readOffset(); + + boolean acceptingBytes(); + + void accept(ChildRef childRef) throws IOException; + + void eof() throws IOException; + + void preFail(); + + ApiFuture fail(Throwable t); + + ObjectReadSessionStreamRead withNewReadId(long newReadId); + + ReadRange makeReadRange(); + + void recordError(T t, OnSuccess onSuccess, OnFailure onFailure); + + boolean readyToSend(); + + Hasher hasher(); + + boolean canShareStreamWith(ObjectReadSessionStreamRead other); + + void setOnCloseCallback(IOAutoCloseable onCloseCallback); + + void internalClose() throws IOException; + + static AccumulatingRead createByteArrayAccumulatingRead( + long readId, RangeSpec rangeSpec, Hasher hasher, RetryContext retryContext) { + return new ByteArrayAccumulatingRead( + readId, rangeSpec, hasher, retryContext, IOAutoCloseable.noOp()); + } + + static ZeroCopyByteStringAccumulatingRead createZeroCopyByteStringAccumulatingRead( + long readId, RangeSpec rangeSpec, Hasher hasher, RetryContext retryContext) { + return new ZeroCopyByteStringAccumulatingRead( + readId, rangeSpec, hasher, retryContext, IOAutoCloseable.noOp()); + } + + static StreamingRead streamingRead( + long readId, RangeSpec rangeSpec, Hasher hasher, RetryContext retryContext) { + return new StreamingRead(readId, rangeSpec, hasher, retryContext, IOAutoCloseable.noOp()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtils.java new file mode 100644 index 000000000000..53fea0b8b6c2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtils.java @@ -0,0 +1,442 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.cloud.opentelemetry.metric.MonitoredResourceDescription; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import io.grpc.ManagedChannelBuilder; +import io.grpc.opentelemetry.GrpcOpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.internal.StringUtils; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.common.export.MemoryMode; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.DefaultAggregationSelector; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.resources.Resource; +import java.math.BigDecimal; +import java.math.MathContext; +import java.net.NoRouteToHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class OpenTelemetryBootstrappingUtils { + private static final Collection METRICS_TO_ENABLE = + ImmutableList.of( + "grpc.lb.wrr.rr_fallback", + "grpc.lb.wrr.endpoint_weight_not_yet_usable", + "grpc.lb.wrr.endpoint_weight_stale", + "grpc.lb.wrr.endpoint_weights", + "grpc.lb.rls.cache_entries", + "grpc.lb.rls.cache_size", + "grpc.lb.rls.default_target_picks", + "grpc.lb.rls.target_picks", + "grpc.lb.rls.failed_picks", + "grpc.xds_client.connected", + "grpc.xds_client.server_failure", + "grpc.xds_client.resource_updates_valid", + "grpc.xds_client.resource_updates_invalid", + "grpc.xds_client.resources"); + + private static final Collection METRICS_ENABLED_BY_DEFAULT = + ImmutableList.of( + "grpc.client.attempt.sent_total_compressed_message_size", + "grpc.client.attempt.rcvd_total_compressed_message_size", + "grpc.client.attempt.started", + "grpc.client.attempt.duration", + "grpc.client.call.duration"); + + static final Logger log = Logger.getLogger(OpenTelemetryBootstrappingUtils.class.getName()); + + @NonNull + static ChannelConfigurator enableGrpcMetrics( + @Nullable ChannelConfigurator channelConfigurator, + String endpoint, + @Nullable String projectId, + String universeDomain, + boolean shouldSuppressExceptions) { + GCPResourceProvider resourceProvider = new GCPResourceProvider(); + Attributes detectedAttributes = resourceProvider.getAttributes(); + + @Nullable String detectedProjectId = + detectedAttributes.get(AttributeKey.stringKey("cloud.account.id")); + if (projectId == null && detectedProjectId == null) { + log.warning( + "Unable to determine the Project ID in order to report metrics. No gRPC client metrics" + + " will be reported."); + return channelConfigurator != null ? channelConfigurator : ChannelConfigurator.identity(); + } + + String projectIdToUse = detectedProjectId == null ? projectId : detectedProjectId; + if (!projectIdToUse.equals(projectId)) { + log.warning( + "The Project ID configured for gRPC client metrics is " + + projectIdToUse + + ", but the Project ID of the storage client is " + + projectId + + ". Make sure that the service account in use has the required metric writing role " + + "(roles/monitoring.metricWriter) in the project " + + projectIdToUse + + ", or metrics will not be written."); + } + + String metricServiceEndpoint = getCloudMonitoringEndpoint(endpoint, universeDomain); + SdkMeterProvider provider = + createMeterProvider( + metricServiceEndpoint, projectIdToUse, detectedAttributes, shouldSuppressExceptions); + + OpenTelemetrySdk openTelemetrySdk = + OpenTelemetrySdk.builder().setMeterProvider(provider).build(); + GrpcOpenTelemetry grpcOpenTelemetry = + GrpcOpenTelemetry.newBuilder() + .sdk(openTelemetrySdk) + .addOptionalLabel("grpc.lb.locality") + .enableMetrics(METRICS_TO_ENABLE) + .build(); + ChannelConfigurator otelConfigurator = + b -> { + grpcOpenTelemetry.configureChannelBuilder(b); + return b; + }; + return otelConfigurator.andThen(channelConfigurator); + } + + @SuppressWarnings("rawtypes") // ManagedChannelBuilder + @FunctionalInterface + interface ChannelConfigurator extends ApiFunction { + @NonNull + default ChannelConfigurator andThen(@Nullable ChannelConfigurator then) { + if (then == null) { + return this; + } + return b -> then.apply(this.apply(b)); + } + + static ChannelConfigurator identity() { + return IdentityChannelConfigurator.INSTANCE; + } + + static ChannelConfigurator lift( + @Nullable ApiFunction f) { + if (f == null) { + return identity(); + } + return f::apply; + } + } + + @SuppressWarnings("rawtypes") // ManagedChannelBuilder + private static final class IdentityChannelConfigurator implements ChannelConfigurator { + private static final IdentityChannelConfigurator INSTANCE = new IdentityChannelConfigurator(); + + private IdentityChannelConfigurator() {} + + @Override + public ManagedChannelBuilder apply(ManagedChannelBuilder input) { + return input; + } + } + + @VisibleForTesting + static String getCloudMonitoringEndpoint(String endpoint, String universeDomain) { + String metricServiceEndpoint = "monitoring.googleapis.com"; + + // use contains instead of equals because endpoint has a port in it + if (universeDomain != null && endpoint.contains("storage." + universeDomain)) { + metricServiceEndpoint = "monitoring." + universeDomain; + } else if (!endpoint.contains("storage.googleapis.com")) { + String canonicalEndpoint = "storage.googleapis.com"; + String privateEndpoint = "private.googleapis.com"; + String restrictedEndpoint = "restricted.googleapis.com"; + if (universeDomain != null) { + canonicalEndpoint = "storage." + universeDomain; + privateEndpoint = "private." + universeDomain; + restrictedEndpoint = "restricted." + universeDomain; + } + String match = + ImmutableList.of(canonicalEndpoint, privateEndpoint, restrictedEndpoint).stream() + .filter(s -> endpoint.contains(s) || endpoint.contains("google-c2p:///" + s)) + .collect(Collectors.joining()); + if (!StringUtils.isNullOrEmpty(match)) { + metricServiceEndpoint = match; + } + } + return metricServiceEndpoint + ":" + endpoint.split(":")[1]; + } + + @VisibleForTesting + static SdkMeterProvider createMeterProvider( + String metricServiceEndpoint, + String projectIdToUse, + Attributes detectedAttributes, + boolean shouldSuppressExceptions) { + + MonitoredResourceDescription monitoredResourceDescription = + new MonitoredResourceDescription( + "storage.googleapis.com/Client", + ImmutableSet.of( + "project_id", "location", "cloud_platform", "host_id", "instance_id", "api")); + + MetricExporter cloudMonitoringExporter = + GoogleCloudMetricExporter.createWithConfiguration( + MetricConfiguration.builder() + .setMonitoredResourceDescription(monitoredResourceDescription) + .setInstrumentationLibraryLabelsEnabled(false) + .setMetricServiceEndpoint(metricServiceEndpoint) + .setPrefix("storage.googleapis.com/client") + .setUseServiceTimeSeries(true) + .setProjectId(projectIdToUse) + .build()); + + SdkMeterProviderBuilder providerBuilder = SdkMeterProvider.builder(); + + // This replaces the dots with slashes in each metric, which is the format needed for this + // monitored resource + for (String metric : + ImmutableList.copyOf(Iterables.concat(METRICS_TO_ENABLE, METRICS_ENABLED_BY_DEFAULT))) { + providerBuilder.registerView( + InstrumentSelector.builder().setName(metric).build(), + View.builder().setName(metric.replace(".", "/")).build()); + } + MetricExporter exporter = + shouldSuppressExceptions + ? new PermissionDeniedSingleReportMetricsExporter(cloudMonitoringExporter) + : cloudMonitoringExporter; + AttributesBuilder attributesBuilder = + Attributes.builder() + .put("gcp.resource_type", "storage.googleapis.com/Client") + .put("project_id", projectIdToUse) + .put("instance_id", UUID.randomUUID().toString()) + .put("api", "grpc"); + String detectedLocation = detectedAttributes.get(AttributeKey.stringKey("cloud.region")); + if (detectedLocation != null) { + attributesBuilder.put("location", detectedLocation); + } else { + attributesBuilder.put("location", "global"); + } + String detectedCloudPlatform = detectedAttributes.get(AttributeKey.stringKey("cloud.platform")); + if (detectedCloudPlatform != null) { + attributesBuilder.put("cloud_platform", detectedCloudPlatform); + } else { + attributesBuilder.put("cloud_platform", "unknown"); + } + String detectedHostId = detectedAttributes.get(AttributeKey.stringKey("host.id")); + if (detectedHostId != null) { + attributesBuilder.put("host_id", detectedHostId); + } else { + attributesBuilder.put("host_id", "unknown"); + } + providerBuilder + .registerMetricReader( + PeriodicMetricReader.builder(exporter) + .setInterval(java.time.Duration.ofSeconds(60)) + .build()) + .setResource(Resource.create(attributesBuilder.build())); + + addHistogramView( + providerBuilder, latencyHistogramBoundaries(), "grpc/client/attempt/duration", "s"); + addHistogramView( + providerBuilder, + sizeHistogramBoundaries(), + "grpc/client/attempt/rcvd_total_compressed_message_size", + "By"); + addHistogramView( + providerBuilder, + sizeHistogramBoundaries(), + "grpc/client/attempt/sent_total_compressed_message_size", + "By"); + + return providerBuilder.build(); + } + + private static void addHistogramView( + SdkMeterProviderBuilder provider, List boundaries, String name, String unit) { + InstrumentSelector instrumentSelector = + InstrumentSelector.builder() + .setType(InstrumentType.HISTOGRAM) + .setUnit(unit) + .setName(name) + .setMeterName("grpc-java") + .setMeterSchemaUrl("") + .build(); + View view = + View.builder() + .setName(name) + .setDescription( + "A view of " + + name + + " with histogram boundaries more appropriate for Google Cloud Storage RPCs") + .setAggregation(Aggregation.explicitBucketHistogram(boundaries)) + .build(); + provider.registerView(instrumentSelector, view); + } + + private static List latencyHistogramBoundaries() { + List boundaries = new ArrayList<>(); + BigDecimal boundary = new BigDecimal(0, MathContext.UNLIMITED); + BigDecimal increment = new BigDecimal("0.002", MathContext.UNLIMITED); // 2ms + + // 2ms buckets for the first 100ms, so we can have higher resolution for uploads and downloads + // in the 100 KiB range + for (int i = 0; i != 50; i++) { + boundaries.add(boundary.doubleValue()); + boundary = boundary.add(increment); + } + + // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes + increment = new BigDecimal("0.01", MathContext.UNLIMITED); // 10 ms + for (int i = 0; i != 150 && boundary.compareTo(new BigDecimal(300)) < 1; i++) { + boundaries.add(boundary.doubleValue()); + if (i != 0 && i % 10 == 0) { + increment = increment.multiply(new BigDecimal(2)); + } + boundary = boundary.add(increment); + } + + return boundaries; + } + + private static List sizeHistogramBoundaries() { + long kb = 1024; + long mb = 1024 * kb; + long gb = 1024 * mb; + + List boundaries = new ArrayList<>(); + long boundary = 0; + long increment = 128 * kb; + + // 128 KiB increments up to 4MiB, then exponential growth + while (boundaries.size() < 200 && boundary <= 16 * gb) { + boundaries.add((double) boundary); + boundary += increment; + if (boundary >= 4 * mb) { + increment *= 2; + } + } + return boundaries; + } + + private static final class PermissionDeniedSingleReportMetricsExporter implements MetricExporter { + private final MetricExporter delegate; + private final AtomicBoolean seenPermissionDenied = new AtomicBoolean(false); + private final AtomicBoolean seenNoRouteToHost = new AtomicBoolean(false); + + private PermissionDeniedSingleReportMetricsExporter(MetricExporter delegate) { + this.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection metrics) { + if (seenPermissionDenied.get() && seenNoRouteToHost.get()) { + return CompletableResultCode.ofFailure(); + } + + try { + return delegate.export(metrics); + } catch (PermissionDeniedException e) { + if (!seenPermissionDenied.get()) { + seenPermissionDenied.set(true); + throw e; + } + return CompletableResultCode.ofFailure(); + } catch (UnavailableException e) { + if (seenPermissionDenied.get() + && !seenNoRouteToHost.get() + && ultimateCause(e, NoRouteToHostException.class)) { + seenNoRouteToHost.set(true); + throw e; + } + return CompletableResultCode.ofFailure(); + } + } + + @Override + public Aggregation getDefaultAggregation(InstrumentType instrumentType) { + return delegate.getDefaultAggregation(instrumentType); + } + + @Override + public MemoryMode getMemoryMode() { + return delegate.getMemoryMode(); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } + + @Override + public DefaultAggregationSelector with(InstrumentType instrumentType, Aggregation aggregation) { + return delegate.with(instrumentType, aggregation); + } + + private static boolean ultimateCause(Throwable t, Class c) { + if (t == null) { + return false; + } + + Throwable cause = t.getCause(); + if (cause != null && c.isAssignableFrom(cause.getClass())) { + return true; + } else { + return ultimateCause(cause, c); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Option.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Option.java new file mode 100644 index 000000000000..e526486ef356 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Option.java @@ -0,0 +1,65 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.UnifiedOpts.Opt; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collection; +import java.util.function.IntFunction; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** Base class for Storage operation option. */ +@Deprecated +public abstract class Option extends UnifiedOpts.OptionShim + implements Serializable { + + private static final long serialVersionUID = -7579883369516703936L; + + Option(O opt) { + super(opt); + } + + @SafeVarargs + static > O[] dedupe(IntFunction gen, O... os) { + return dedupe(gen, Arrays.stream(os)); + } + + @SafeVarargs + static > O[] dedupe(IntFunction gen, Collection collection, O... os) { + return dedupe(gen, Stream.of(collection.stream(), Arrays.stream(os)).flatMap(s -> s)); + } + + @SafeVarargs + static > O[] dedupe(IntFunction gen, O[] array, O... os) { + return dedupe(gen, Stream.of(Arrays.stream(array), Arrays.stream(os)).flatMap(s -> s)); + } + + /** + * All Options contain an {@link Opt}, {@code Opt}s are distinct classes allowing us to group + * based on those classes. Once grouped, we select the last element to provide last wins behavior. + * + *

Each of these helpers is an internal implementation detail, primarily due to the fact that + * generic arrays can not be instantiated in Java and requires a factory to be passed in. + */ + private static > O[] dedupe(IntFunction gen, Stream s) { + return s.collect(Collectors.groupingBy(o -> o.getOpt().getClass())).values().stream() + .map(l -> l.get(l.size() - 1)) + .toArray(gen); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelMultipartUploadClientDecorator.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelMultipartUploadClientDecorator.java new file mode 100644 index 000000000000..f5e7080fed75 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelMultipartUploadClientDecorator.java @@ -0,0 +1,190 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; + +/** + * A decorator for {@link MultipartUploadClient} that adds OpenTelemetry tracing. + * + * @since 2.62.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +final class OtelMultipartUploadClientDecorator extends MultipartUploadClient { + + private final MultipartUploadClient delegate; + private final Tracer tracer; + + private OtelMultipartUploadClientDecorator( + MultipartUploadClient delegate, OpenTelemetry otel, Attributes baseAttributes) { + this.delegate = delegate; + this.tracer = + OtelStorageDecorator.TracerDecorator.decorate( + null, otel, baseAttributes, MultipartUploadClient.class.getName() + "/"); + } + + @Override + public CreateMultipartUploadResponse createMultipartUpload(CreateMultipartUploadRequest request) { + Span span = + tracer + .spanBuilder("createMultipartUpload") + .setAttribute( + "gsutil.uri", String.format("gs://%s/%s", request.bucket(), request.key())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createMultipartUpload(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public ListPartsResponse listParts(ListPartsRequest request) { + Span span = + tracer + .spanBuilder("listParts") + .setAttribute( + "gsutil.uri", String.format("gs://%s/%s", request.bucket(), request.key())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listParts(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public AbortMultipartUploadResponse abortMultipartUpload(AbortMultipartUploadRequest request) { + Span span = + tracer + .spanBuilder("abortMultipartUpload") + .setAttribute( + "gsutil.uri", String.format("gs://%s/%s", request.bucket(), request.key())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.abortMultipartUpload(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public CompleteMultipartUploadResponse completeMultipartUpload( + CompleteMultipartUploadRequest request) { + Span span = + tracer + .spanBuilder("completeMultipartUpload") + .setAttribute( + "gsutil.uri", String.format("gs://%s/%s", request.bucket(), request.key())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.completeMultipartUpload(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public UploadPartResponse uploadPart(UploadPartRequest request, RequestBody requestBody) { + Span span = + tracer + .spanBuilder("uploadPart") + .setAttribute( + "gsutil.uri", String.format("gs://%s/%s", request.bucket(), request.key())) + .setAttribute("partNumber", request.partNumber()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.uploadPart(request, requestBody); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public ListMultipartUploadsResponse listMultipartUploads(ListMultipartUploadsRequest request) { + Span span = + tracer + .spanBuilder("listMultipartUploads") + .setAttribute("gsutil.uri", String.format("gs://%s/", request.bucket())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listMultipartUploads(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + static MultipartUploadClient decorate( + MultipartUploadClient delegate, OpenTelemetry otel, Transport transport) { + if (otel == OpenTelemetry.noop()) { + return delegate; + } + Attributes baseAttributes = + Attributes.builder() + .put("gcp.client.service", "Storage") + .put("gcp.client.version", StorageOptions.getDefaultInstance().getLibraryVersion()) + .put("gcp.client.repo", "googleapis/java-storage") + .put("gcp.client.artifact", "com.google.cloud:google-cloud-storage") + .put("rpc.system", "XML") + .put("service.name", "storage.googleapis.com") + .build(); + return new OtelMultipartUploadClientDecorator(delegate, otel, baseAttributes); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java new file mode 100644 index 000000000000..291db00ae5d3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java @@ -0,0 +1,2279 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.ApiFutureUtils.OnFailureApiFutureCallback; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.ReadProjectionConfigs.BaseConfig; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.storage.v2.ReadRange; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanBuilder; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.TimeUnit; +import java.util.function.UnaryOperator; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +@SuppressWarnings("DuplicatedCode") +final class OtelStorageDecorator implements Storage { + + /** Becomes the {@code otel.scope.name} attribute in a span */ + private static final String OTEL_SCOPE_NAME = "cloud.google.com/java/storage"; + + private static final String BLOB_READ_SESSION = "blobReadSession"; + + @VisibleForTesting final Storage delegate; + private final OpenTelemetry otel; + private final Attributes baseAttributes; + private final Tracer tracer; + + private OtelStorageDecorator(Storage delegate, OpenTelemetry otel, Attributes baseAttributes) { + this.delegate = delegate; + this.otel = otel; + this.baseAttributes = baseAttributes; + this.tracer = + TracerDecorator.decorate(null, otel, baseAttributes, Storage.class.getName() + "/"); + } + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + Span span = + tracer + .spanBuilder("create") + .setAttribute("gsutil.uri", fmtBucket(bucketInfo.getName())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.create(bucketInfo, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + Span span = + tracer + .spanBuilder("create") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.create(blobInfo, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + Span span = + tracer + .spanBuilder("create") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.create(blobInfo, content, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob create( + BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options) { + Span span = + tracer + .spanBuilder("create") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.create(blobInfo, content, offset, length, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + @Deprecated + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + Span span = + tracer + .spanBuilder("create") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.create(blobInfo, content, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, BlobWriteOption... options) + throws IOException { + Span span = + tracer + .spanBuilder("createFrom") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createFrom(blobInfo, path, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, int bufferSize, BlobWriteOption... options) + throws IOException { + Span span = + tracer + .spanBuilder("createFrom") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createFrom(blobInfo, path, bufferSize, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob createFrom(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) + throws IOException { + Span span = + tracer + .spanBuilder("createFrom") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createFrom(blobInfo, content, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob createFrom( + BlobInfo blobInfo, InputStream content, int bufferSize, BlobWriteOption... options) + throws IOException { + Span span = + tracer + .spanBuilder("createFrom") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createFrom(blobInfo, content, bufferSize, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Bucket get(String bucket, BucketGetOption... options) { + Span span = tracer.spanBuilder("get").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Bucket lockRetentionPolicy(BucketInfo bucket, BucketTargetOption... options) { + Span span = + tracer + .spanBuilder("lockRetentionPolicy") + .setAttribute("gsutil.uri", fmtBucket(bucket.getName())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.lockRetentionPolicy(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + Span span = + tracer + .spanBuilder("get") + .setAttribute("gsutil.uri", String.format(Locale.US, "gs://%s/%s", bucket, blob)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(bucket, blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + Span span = + tracer + .spanBuilder("get") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob get(BlobId blob) { + Span span = + tracer + .spanBuilder("get") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(blob); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob restore(BlobId blob, BlobRestoreOption... options) { + Span span = + tracer + .spanBuilder("restore") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.restore(blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Page list(BucketListOption... options) { + Span span = tracer.spanBuilder("list").setAttribute("gsutil.uri", "gs://").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.list(options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Page list(String bucket, BlobListOption... options) { + Span span = + tracer.spanBuilder("list").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.list(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + Span span = + tracer + .spanBuilder("update") + .setAttribute("gsutil.uri", fmtBucket(bucketInfo.getName())) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.update(bucketInfo, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + Span span = + tracer + .spanBuilder("update") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.update(blobInfo, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob update(BlobInfo blobInfo) { + Span span = + tracer + .spanBuilder("update") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.update(blobInfo); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("delete").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + Span span = + tracer.spanBuilder("delete").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(bucket, blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("delete") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean delete(BlobId blob) { + Span span = + tracer + .spanBuilder("delete") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(blob); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Blob compose(ComposeRequest composeRequest) { + Span span = + tracer + .spanBuilder("compose") + .setAttribute("gsutil.uri", composeRequest.getTarget().getBlobId().toGsUtilUri()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.compose(composeRequest); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public CopyWriter copy(CopyRequest copyRequest) { + Span span = + tracer + .spanBuilder("copy") + .setAttribute("gsutil.uri", copyRequest.getTarget().getBlobId().toGsUtilUri()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + CopyWriter copyWriter = delegate.copy(copyRequest); + return new OtelDecoratedCopyWriter(copyWriter, span); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + span.end(); + throw t; + } + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("readAllBytes") + .setAttribute("gsutil.uri", BlobId.of(bucket, blob).toGsUtilUri()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.readAllBytes(bucket, blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("readAllBytes") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.readAllBytes(blob, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public StorageBatch batch() { + return delegate.batch(); + } + + @Override + public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("reader") + .setAttribute("gsutil.uri", BlobId.of(bucket, blob).toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + ReadChannel reader = delegate.reader(bucket, blob, options); + return new OtelDecoratedReadChannel(reader, span); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + span.end(); + throw t; + } + } + + @Override + public ReadChannel reader(BlobId blob, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("reader") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + ReadChannel reader = delegate.reader(blob, options); + return new OtelDecoratedReadChannel(reader, span); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + span.end(); + throw t; + } + } + + @Override + public void downloadTo(BlobId blob, Path path, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("downloadTo") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + delegate.downloadTo(blob, path, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public void downloadTo(BlobId blob, OutputStream outputStream, BlobSourceOption... options) { + Span span = + tracer + .spanBuilder("downloadTo") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + delegate.downloadTo(blob, outputStream, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + Span sessionSpan = + tracer + .spanBuilder("writer") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = sessionSpan.makeCurrent()) { + WriteChannel writer = delegate.writer(blobInfo, options); + return new OtelDecoratedWriteChannel(writer, sessionSpan); + } catch (Throwable t) { + sessionSpan.recordException(t); + sessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + sessionSpan.end(); + throw t; + } + } + + @Override + public WriteChannel writer(URL signedURL) { + Span sessionSpan = tracer.spanBuilder("writer").startSpan(); + try (Scope ignore = sessionSpan.makeCurrent()) { + WriteChannel writer = delegate.writer(signedURL); + return new OtelDecoratedWriteChannel(writer, sessionSpan); + } catch (Throwable t) { + sessionSpan.recordException(t); + sessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + sessionSpan.end(); + throw t; + } + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + Span span = + tracer + .spanBuilder("signUrl") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.signUrl(blobInfo, duration, unit, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + Span span = + tracer + .spanBuilder("generateSignedPostPolicyV4") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.generateSignedPostPolicyV4( + blobInfo, duration, unit, fields, conditions, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostPolicyV4Option... options) { + Span span = + tracer + .spanBuilder("generateSignedPostPolicyV4") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, fields, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + Span span = + tracer + .spanBuilder("generateSignedPostPolicyV4") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, conditions, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, long duration, TimeUnit unit, PostPolicyV4Option... options) { + Span span = + tracer + .spanBuilder("generateSignedPostPolicyV4") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List get(BlobId... blobIds) { + Span span = tracer.spanBuilder("get").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(blobIds); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List get(Iterable blobIds) { + Span span = tracer.spanBuilder("get").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.get(blobIds); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List update(BlobInfo... blobInfos) { + Span span = tracer.spanBuilder("update").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.update(blobInfos); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List update(Iterable blobInfos) { + Span span = tracer.spanBuilder("update").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.update(blobInfos); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List delete(BlobId... blobIds) { + Span span = tracer.spanBuilder("delete").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(blobIds); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List delete(Iterable blobIds) { + Span span = tracer.spanBuilder("delete").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.delete(blobIds); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl getAcl(String bucket, Entity entity, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("getAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getAcl(bucket, entity, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl getAcl(String bucket, Entity entity) { + Span span = + tracer.spanBuilder("getAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getAcl(bucket, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean deleteAcl(String bucket, Entity entity, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("deleteAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.deleteAcl(bucket, entity, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean deleteAcl(String bucket, Entity entity) { + Span span = + tracer.spanBuilder("deleteAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.deleteAcl(bucket, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("createAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createAcl(bucket, acl, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + Span span = + tracer.spanBuilder("createAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createAcl(bucket, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("updateAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.updateAcl(bucket, acl, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + Span span = + tracer.spanBuilder("updateAcl").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.updateAcl(bucket, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List listAcls(String bucket, BucketSourceOption... options) { + Span span = + tracer.spanBuilder("listAcls").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listAcls(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List listAcls(String bucket) { + Span span = + tracer.spanBuilder("listAcls").setAttribute("gsutil.uri", fmtBucket(bucket)).startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listAcls(bucket); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl getDefaultAcl(String bucket, Entity entity) { + Span span = + tracer + .spanBuilder("getDefaultAcl") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getDefaultAcl(bucket, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean deleteDefaultAcl(String bucket, Entity entity) { + Span span = + tracer + .spanBuilder("deleteDefaultAcl") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.deleteDefaultAcl(bucket, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + Span span = + tracer + .spanBuilder("createDefaultAcl") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createDefaultAcl(bucket, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + Span span = + tracer + .spanBuilder("updateDefaultAcl") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.updateDefaultAcl(bucket, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List listDefaultAcls(String bucket) { + Span span = + tracer + .spanBuilder("listDefaultAcls") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listDefaultAcls(bucket); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl getAcl(BlobId blob, Entity entity) { + Span span = + tracer + .spanBuilder("getAcl") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getAcl(blob, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean deleteAcl(BlobId blob, Entity entity) { + Span span = + tracer + .spanBuilder("deleteAcl") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.deleteAcl(blob, entity); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl createAcl(BlobId blob, Acl acl) { + Span span = + tracer + .spanBuilder("createAcl") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createAcl(blob, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + Span span = + tracer + .spanBuilder("updateAcl") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.updateAcl(blob, acl); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List listAcls(BlobId blob) { + Span span = + tracer + .spanBuilder("listAcls") + .setAttribute("gsutil.uri", blob.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listAcls(blob); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public HmacKey createHmacKey(ServiceAccount serviceAccount, CreateHmacKeyOption... options) { + Span span = tracer.spanBuilder("createHmacKey").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createHmacKey(serviceAccount, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Page listHmacKeys(ListHmacKeysOption... options) { + Span span = tracer.spanBuilder("listHmacKeys").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listHmacKeys(options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public HmacKeyMetadata getHmacKey(String accessId, GetHmacKeyOption... options) { + Span span = tracer.spanBuilder("getHmacKey").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getHmacKey(accessId, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, DeleteHmacKeyOption... options) { + Span span = tracer.spanBuilder("deleteHmacKey").startSpan(); + try (Scope ignore = span.makeCurrent()) { + delegate.deleteHmacKey(hmacKeyMetadata, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public HmacKeyMetadata updateHmacKeyState( + HmacKeyMetadata hmacKeyMetadata, HmacKeyState state, UpdateHmacKeyOption... options) { + Span span = tracer.spanBuilder("updateHmacKeyState").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.updateHmacKeyState(hmacKeyMetadata, state, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Policy getIamPolicy(String bucket, BucketSourceOption... options) { + Span span = + tracer + .spanBuilder("getIamPolicy") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getIamPolicy(bucket, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + Span span = + tracer + .spanBuilder("setIamPolicy") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.setIamPolicy(bucket, policy, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List testIamPermissions( + String bucket, List permissions, BucketSourceOption... options) { + Span span = + tracer + .spanBuilder("testIamPermissions") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.testIamPermissions(bucket, permissions, options); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + Span span = tracer.spanBuilder("getServiceAccount").startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getServiceAccount(projectId); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Notification createNotification(String bucket, NotificationInfo notificationInfo) { + Span span = + tracer + .spanBuilder("createNotification") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.createNotification(bucket, notificationInfo); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public Notification getNotification(String bucket, String notificationId) { + Span span = + tracer + .spanBuilder("getNotification") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.getNotification(bucket, notificationId); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public List listNotifications(String bucket) { + Span span = + tracer + .spanBuilder("listNotifications") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.listNotifications(bucket); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public boolean deleteNotification(String bucket, String notificationId) { + Span span = + tracer + .spanBuilder("deleteNotification") + .setAttribute("gsutil.uri", fmtBucket(bucket)) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.deleteNotification(bucket, notificationId); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public void close() throws Exception { + delegate.close(); + } + + @Override + @BetaApi + public BlobWriteSession blobWriteSession(BlobInfo blobInfo, BlobWriteOption... options) { + Span sessionSpan = + tracer + .spanBuilder("blobWriteSession") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = sessionSpan.makeCurrent()) { + BlobWriteSession session = delegate.blobWriteSession(blobInfo, options); + return new OtelDecoratedBlobWriteSession(session, sessionSpan); + } catch (Throwable t) { + sessionSpan.recordException(t); + sessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } + } + + @Override + public Blob moveBlob(MoveBlobRequest request) { + Span span = + tracer + .spanBuilder("moveBlob") + .setAttribute("gsutil.uri.source", request.getSource().toGsUtilUriWithGeneration()) + .setAttribute("gsutil.uri.target", request.getTarget().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + return delegate.moveBlob(request); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public ApiFuture blobReadSession(BlobId id, BlobSourceOption... options) { + Span blobReadSessionSpan = + tracer + .spanBuilder(BLOB_READ_SESSION) + .setAttribute("gsutil.uri", id.toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore1 = blobReadSessionSpan.makeCurrent()) { + Context blobReadSessionContext = Context.current(); + Span ready = tracer.spanBuilder(BLOB_READ_SESSION + "/ready").startSpan(); + ApiFuture blobReadSessionApiFuture = delegate.blobReadSession(id, options); + ApiFuture futureDecorated = + ApiFutures.transform( + blobReadSessionApiFuture, + delegate -> { + ready.end(); + return new OtelDecoratingBlobReadSession( + delegate, id, blobReadSessionContext, blobReadSessionSpan); + }, + MoreExecutors.directExecutor()); + ApiFutures.addCallback( + futureDecorated, + (OnFailureApiFutureCallback) + t -> { + blobReadSessionSpan.recordException(t); + blobReadSessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + blobReadSessionSpan.end(); + ready.recordException(t); + ready.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + ready.end(); + }, + MoreExecutors.directExecutor()); + return futureDecorated; + } catch (Throwable t) { + blobReadSessionSpan.recordException(t); + blobReadSessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + blobReadSessionSpan.end(); + throw t; + } + } + + @Override + public BlobAppendableUpload blobAppendableUpload( + BlobInfo blobInfo, BlobAppendableUploadConfig uploadConfig, BlobWriteOption... options) { + + Span span = + tracer + .spanBuilder("appendableBlobUpload") + .setAttribute("gsutil.uri", blobInfo.getBlobId().toGsUtilUriWithGeneration()) + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + + return new OtelDecoratingBlobAppendableUpload( + delegate.blobAppendableUpload(blobInfo, uploadConfig, options), span); + + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + span.end(); + throw t; + } + } + + @Override + public StorageOptions getOptions() { + return delegate.getOptions(); + } + + static Storage decorate(Storage delegate, OpenTelemetry otel, Transport transport) { + requireNonNull(delegate, "delegate must be non null"); + requireNonNull(otel, "otel must be non null"); + if (otel == OpenTelemetry.noop()) { + return delegate; + } + Attributes baseAttributes = + Attributes.builder() + .put("gcp.client.service", "Storage") + .put("gcp.client.version", StorageOptions.getDefaultInstance().getLibraryVersion()) + .put("gcp.client.repo", "googleapis/java-storage") + .put("gcp.client.artifact", "com.google.cloud:google-cloud-storage") + .put("rpc.system", transport.toString().toLowerCase(Locale.ROOT)) + .put("service.name", "storage.googleapis.com") + .build(); + return new OtelStorageDecorator(delegate, otel, baseAttributes); + } + + static UnaryOperator retryContextDecorator(OpenTelemetry otel) { + requireNonNull(otel, "otel must be non null"); + if (otel == OpenTelemetry.noop()) { + return UnaryOperator.identity(); + } + return ctx -> new OtelRetryContextDecorator(ctx, Span.current()); + } + + private static @NonNull String fmtBucket(String bucket) { + return String.format(Locale.US, "gs://%s/", bucket); + } + + static final class TracerDecorator implements Tracer { + @Nullable private final Context parentContextOverride; + private final Tracer delegate; + private final Attributes baseAttributes; + private final String spanNamePrefix; + + TracerDecorator( + @Nullable Context parentContextOverride, + Tracer delegate, + Attributes baseAttributes, + String spanNamePrefix) { + this.parentContextOverride = parentContextOverride; + this.delegate = delegate; + this.baseAttributes = baseAttributes; + this.spanNamePrefix = spanNamePrefix; + } + + static TracerDecorator decorate( + @Nullable Context parentContextOverride, + OpenTelemetry otel, + Attributes baseAttributes, + String spanNamePrefix) { + requireNonNull(otel, "otel must be non null"); + requireNonNull(baseAttributes, "baseAttributes must be non null"); + requireNonNull(spanNamePrefix, "spanNamePrefix must be non null"); + Tracer tracer = + otel.getTracer(OTEL_SCOPE_NAME, StorageOptions.getDefaultInstance().getLibraryVersion()); + return new TracerDecorator(parentContextOverride, tracer, baseAttributes, spanNamePrefix); + } + + @Override + public SpanBuilder spanBuilder(String spanName) { + SpanBuilder spanBuilder = + delegate.spanBuilder(spanNamePrefix + spanName).setAllAttributes(baseAttributes); + if (parentContextOverride != null) { + spanBuilder.setParent(parentContextOverride); + } + return spanBuilder; + } + } + + @VisibleForTesting + static final class OtelDecoratedReadChannel implements ReadChannel { + + @VisibleForTesting final ReadChannel reader; + private final Span span; + + private OtelDecoratedReadChannel(ReadChannel reader, Span span) { + this.reader = reader; + this.span = span; + } + + @Override + public void seek(long position) throws IOException { + reader.seek(position); + } + + @Override + public void setChunkSize(int chunkSize) { + reader.setChunkSize(chunkSize); + } + + @Override + public RestorableState capture() { + return reader.capture(); + } + + @Override + public ReadChannel limit(long limit) { + return reader.limit(limit); + } + + @Override + public long limit() { + return reader.limit(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + try (Scope ignore = span.makeCurrent()) { + return reader.read(dst); + } + } + + @Override + public boolean isOpen() { + return reader.isOpen(); + } + + @Override + public void close() { + try (Scope ignore = span.makeCurrent()) { + reader.close(); + } finally { + span.end(); + } + } + } + + private final class OtelDecoratedBlobWriteSession implements BlobWriteSession { + + private final BlobWriteSession delegate; + private final Span sessionSpan; + private final Tracer tracer; + + public OtelDecoratedBlobWriteSession(BlobWriteSession delegate, Span sessionSpan) { + this.delegate = delegate; + this.sessionSpan = sessionSpan; + this.tracer = + TracerDecorator.decorate( + Context.current(), + otel, + OtelStorageDecorator.this.baseAttributes, + BlobWriteSession.class.getName() + "/"); + } + + @Override + public WritableByteChannel open() throws IOException { + Span openSpan = tracer.spanBuilder("open").startSpan(); + try (Scope ignore = openSpan.makeCurrent()) { + WritableByteChannel delegate = this.delegate.open(); + return new OtelDecoratingWritableByteChannel(delegate, openSpan); + } catch (Throwable t) { + openSpan.recordException(t); + openSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } + } + + @Override + public ApiFuture getResult() { + return delegate.getResult(); + } + + private class OtelDecoratingWritableByteChannel implements WritableByteChannel { + + private final WritableByteChannel delegate; + private final Span openSpan; + + private OtelDecoratingWritableByteChannel(WritableByteChannel delegate, Span openSpan) { + this.delegate = delegate; + this.openSpan = openSpan; + } + + @Override + public int write(ByteBuffer src) throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + return delegate.write(src); + } + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void close() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + delegate.close(); + } catch (IOException | RuntimeException e) { + openSpan.recordException(e); + openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + sessionSpan.recordException(e); + sessionSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + throw e; + } finally { + openSpan.end(); + sessionSpan.end(); + } + } + } + } + + @VisibleForTesting + static final class OtelDecoratedWriteChannel implements WriteChannel { + @VisibleForTesting final WriteChannel delegate; + private final Span openSpan; + + private OtelDecoratedWriteChannel(WriteChannel delegate, Span openSpan) { + this.delegate = delegate; + this.openSpan = openSpan; + } + + @Override + public void setChunkSize(int chunkSize) { + delegate.setChunkSize(chunkSize); + } + + @Override + public RestorableState capture() { + return delegate.capture(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + return delegate.write(src); + } + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void close() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + delegate.close(); + } catch (IOException | RuntimeException e) { + openSpan.recordException(e); + openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + throw e; + } finally { + openSpan.end(); + } + } + } + + private final class OtelDecoratedCopyWriter extends CopyWriter { + + private final CopyWriter copyWriter; + private final Span span; + private final Context parentContext; + private final Tracer tracer; + + public OtelDecoratedCopyWriter(CopyWriter copyWriter, Span span) { + this.copyWriter = copyWriter; + this.span = span; + this.parentContext = Context.current(); + this.tracer = + TracerDecorator.decorate( + Context.current(), + otel, + OtelStorageDecorator.this.baseAttributes, + CopyWriter.class.getName() + "/"); + } + + @Override + public Blob getResult() { + try { + return copyWriter.getResult(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + + @Override + public long getBlobSize() { + return copyWriter.getBlobSize(); + } + + @Override + public boolean isDone() { + boolean done = copyWriter.isDone(); + if (done) { + span.end(); + } + return done; + } + + @Override + public long getTotalBytesCopied() { + return copyWriter.getTotalBytesCopied(); + } + + @Override + public RestorableState capture() { + return copyWriter.capture(); + } + + @Override + public void copyChunk() { + Span copyChunkSpan = tracer.spanBuilder("copyChunk").setParent(parentContext).startSpan(); + try (Scope ignore = copyChunkSpan.makeCurrent()) { + copyWriter.copyChunk(); + } catch (Throwable t) { + copyChunkSpan.recordException(t); + copyChunkSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + span.end(); + throw t; + } finally { + copyChunkSpan.end(); + } + } + } + + private static final class OtelReadProjectionConfig + extends ReadProjectionConfig { + private final ReadProjectionConfig delegate; + private final Span parentSpan; + + private OtelReadProjectionConfig(ReadProjectionConfig delegate, Span parentSpan) { + this.delegate = delegate; + this.parentSpan = parentSpan; + } + + @Override + BaseConfig cast() { + return new OtelBaseConfigDecorator(delegate.cast()); + } + + @Override + public ProjectionType getType() { + return delegate.getType(); + } + + @Override + Projection project(ObjectReadSession session, IOAutoCloseable closeAlongWith) { + try { + return delegate.project(session, closeAlongWith.andThen(parentSpan::end)); + } catch (Throwable t) { + parentSpan.recordException(t); + parentSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + parentSpan.end(); + throw t; + } + } + + private class OtelBaseConfigDecorator + extends BaseConfig> { + private final BaseConfig delegate; + + private OtelBaseConfigDecorator(BaseConfig delegate) { + this.delegate = delegate; + } + + @Override + ObjectReadSessionStreamRead newRead(long readId, RetryContext retryContext) { + OtelRetryContextDecorator otelRetryContext = + new OtelRetryContextDecorator(retryContext, parentSpan); + ObjectReadSessionStreamRead read = delegate.newRead(readId, otelRetryContext); + read.setOnCloseCallback(parentSpan::end); + return new OtelDecoratingObjectReadSessionStreamRead<>(read, parentSpan); + } + + @Override + BaseConfig cast() { + return this; + } + } + } + + private static final class OtelRetryContextDecorator implements RetryContext { + private final RetryContext delegate; + private final Span span; + + private OtelRetryContextDecorator(RetryContext delegate, Span span) { + this.delegate = delegate; + this.span = span; + } + + @Override + public boolean inBackoff() { + return delegate.inBackoff(); + } + + @Override + public void reset() { + delegate.reset(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + span.recordException(t); + delegate.recordError( + t, + () -> { + span.addEvent("retrying"); + onSuccess.onSuccess(); + }, + (tt) -> { + span.addEvent("terminal_failure"); + onFailure.onFailure(tt); + }); + } + } + + @VisibleForTesting + class OtelDecoratingBlobReadSession implements BlobReadSession { + + @VisibleForTesting final BlobReadSession delegate; + private final BlobId id; + private final Context blobReadSessionContext; + private final Span blobReadSessionSpan; + + private OtelDecoratingBlobReadSession( + BlobReadSession delegate, + BlobId id, + Context blobReadSessionContext, + Span blobReadSessionSpan) { + this.delegate = delegate; + this.id = id; + this.blobReadSessionContext = blobReadSessionContext; + this.blobReadSessionSpan = blobReadSessionSpan; + } + + @Override + public BlobInfo getBlobInfo() { + return delegate.getBlobInfo(); + } + + @Override + public Projection readAs(ReadProjectionConfig config) { + Span readRangeSpan = + tracer + .spanBuilder(BLOB_READ_SESSION + "/readAs") + .setAttribute("gsutil.uri", id.toGsUtilUriWithGeneration()) + .setParent(blobReadSessionContext) + .startSpan(); + try (Scope ignore2 = readRangeSpan.makeCurrent()) { + OtelReadProjectionConfig c = + new OtelReadProjectionConfig<>(config, readRangeSpan); + return delegate.readAs(c); + } catch (Throwable t) { + readRangeSpan.recordException(t); + readRangeSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + readRangeSpan.end(); + throw t; + } + } + + @Override + public void close() throws IOException { + try { + delegate.close(); + } finally { + blobReadSessionSpan.end(); + } + } + } + + @VisibleForTesting + static final class OtelDecoratingObjectReadSessionStreamRead + implements ObjectReadSessionStreamRead { + private final ObjectReadSessionStreamRead delegate; + private final Span parentSpan; + + @VisibleForTesting + OtelDecoratingObjectReadSessionStreamRead( + ObjectReadSessionStreamRead delegate, Span parentSpan) { + this.delegate = delegate; + this.parentSpan = parentSpan; + } + + @Override + public Projection project() { + return delegate.project(); + } + + @Override + public long readOffset() { + return delegate.readOffset(); + } + + @Override + public boolean acceptingBytes() { + return delegate.acceptingBytes(); + } + + @Override + public void accept(ChildRef childRef) throws IOException { + delegate.accept(childRef); + } + + @Override + public void eof() throws IOException { + delegate.eof(); + } + + @Override + public void preFail() { + delegate.preFail(); + } + + @Override + public ApiFuture fail(Throwable t) { + ApiFuture fail = delegate.fail(t); + ApiFutures.addCallback( + fail, + (OnFailureApiFutureCallback) + t1 -> { + parentSpan.recordException(t1); + parentSpan.setStatus(StatusCode.ERROR, t1.getClass().getSimpleName()); + }, + MoreExecutors.directExecutor()); + return fail; + } + + @Override + public Hasher hasher() { + return delegate.hasher(); + } + + @Override + public ObjectReadSessionStreamRead withNewReadId(long newReadId) { + return new OtelDecoratingObjectReadSessionStreamRead<>( + delegate.withNewReadId(newReadId), parentSpan); + } + + @Override + public ReadRange makeReadRange() { + return delegate.makeReadRange(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + delegate.recordError(t, onSuccess, onFailure); + } + + @Override + public boolean readyToSend() { + return delegate.readyToSend(); + } + + @Override + public boolean canShareStreamWith(ObjectReadSessionStreamRead other) { + if (other instanceof OtelDecoratingObjectReadSessionStreamRead) { + OtelDecoratingObjectReadSessionStreamRead dec = + (OtelDecoratingObjectReadSessionStreamRead) other; + return delegate.canShareStreamWith(dec.delegate); + } + return delegate.canShareStreamWith(other); + } + + @Override + public void setOnCloseCallback(IOAutoCloseable onCloseCallback) { + delegate.setOnCloseCallback(onCloseCallback); + } + + @Override + public void internalClose() throws IOException { + delegate.internalClose(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("delegate", delegate) + // .add("parentSpan", parentSpan) + .toString(); + } + } + + final class OtelDecoratingBlobAppendableUpload implements BlobAppendableUpload { + private final BlobAppendableUpload delegate; + private final Span uploadSpan; + private final Tracer tracer; + + private OtelDecoratingBlobAppendableUpload(BlobAppendableUpload delegate, Span uploadSpan) { + this.delegate = delegate; + this.uploadSpan = uploadSpan; + this.tracer = + TracerDecorator.decorate( + Context.current(), + otel, + OtelStorageDecorator.this.baseAttributes, + BlobAppendableUpload.class.getName() + "/"); + } + + @Override + public AppendableUploadWriteableByteChannel open() throws IOException { + Span openSpan = tracer.spanBuilder("open").startSpan(); + try (Scope ignore = openSpan.makeCurrent()) { + AppendableUploadWriteableByteChannel delegate = this.delegate.open(); + return new OtelDecoratingAppendableUploadWriteableByteChannel(delegate, openSpan); + } catch (Throwable t) { + openSpan.recordException(t); + openSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } + } + + @Override + public ApiFuture getResult() { + return delegate.getResult(); + } + + private final class OtelDecoratingAppendableUploadWriteableByteChannel + implements AppendableUploadWriteableByteChannel { + private final AppendableUploadWriteableByteChannel delegate; + private final Span openSpan; + private final Tracer tracer; + + private OtelDecoratingAppendableUploadWriteableByteChannel( + AppendableUploadWriteableByteChannel delegate, Span openSpan) { + this.delegate = delegate; + this.openSpan = openSpan; + this.tracer = + TracerDecorator.decorate( + Context.current(), + otel, + OtelStorageDecorator.this.baseAttributes, + AppendableUploadWriteableByteChannel.class.getName() + "/"); + } + + @Override + @BetaApi + public void finalizeAndClose() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + Span span = tracer.spanBuilder("finalizeAndClose").startSpan(); + try (Scope ignore2 = span.makeCurrent()) { + delegate.finalizeAndClose(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } catch (IOException | RuntimeException e) { + openSpan.recordException(e); + openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + throw e; + } finally { + openSpan.end(); + uploadSpan.end(); + } + } + + @Override + @BetaApi + public void closeWithoutFinalizing() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + Span span = tracer.spanBuilder("closeWithoutFinalizing").startSpan(); + try (Scope ignore2 = span.makeCurrent()) { + delegate.closeWithoutFinalizing(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } catch (IOException | RuntimeException e) { + openSpan.recordException(e); + openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + throw e; + } finally { + openSpan.end(); + uploadSpan.end(); + } + } + + @Override + @BetaApi + public void close() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + Span span = tracer.spanBuilder("close").startSpan(); + try (Scope ignore2 = span.makeCurrent()) { + delegate.close(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } catch (IOException | RuntimeException e) { + openSpan.recordException(e); + openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + throw e; + } finally { + openSpan.end(); + uploadSpan.end(); + } + } + + @Override + public void flush() throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + Span span = tracer.spanBuilder("flush").startSpan(); + try (Scope ignore2 = span.makeCurrent()) { + delegate.flush(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + } + + @Override + public int write(ByteBuffer src) throws IOException { + try (Scope ignore = openSpan.makeCurrent()) { + return delegate.write(src); + } + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig.java new file mode 100644 index 000000000000..d3f8764aed9f --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfig.java @@ -0,0 +1,1073 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.hash.HashCode; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.nio.charset.StandardCharsets; +import java.security.SecureRandom; +import java.time.Clock; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.Base64; +import java.util.Base64.Encoder; +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.UnaryOperator; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Immutable config builder to configure BlobWriteSession instances to perform Parallel Composite + * Uploads. + * + *

Parallel Composite Uploads can yield higher throughput when uploading large objects. However, + * there are some things which must be kept in mind when choosing to use this strategy. + * + *

    + *
  1. Performing parallel composite uploads costs more money. Class A operations + * are performed to create each part and to perform each compose. If a storage tier other than + * STANDARD + * is used, early deletion fees apply to deletion of the parts. + *

    An illustrative example. Upload a 5GiB object using 64MiB as the max size per part.
    + *

      + *
    1. 80 Parts will be created (Class A) + *
    2. 3 compose calls will be performed (Class A) + *
    3. Delete 80 Parts along with 2 intermediary Compose objects (Free tier as long as + * {@code STANDARD} class) + *
    + * Once the parts and intermediary compose objects are deleted, there will be no storage + * charges related to those temporary objects. + *
  2. The service account/credentials used to perform the parallel composite upload require {@code + * storage.objects.delete} in order to cleanup the temporary part and intermediary compose + * objects.
    + * To handle handle part and intermediary compose object deletion out of band passing + * {@link PartCleanupStrategy#never()} to {@link + * ParallelCompositeUploadBlobWriteSessionConfig#withPartCleanupStrategy(PartCleanupStrategy)} + * will prevent automatic cleanup. + *
  3. Please see the + * Parallel composite uploads documentation for a more in depth explanation of the + * limitations of Parallel composite uploads. + *
  4. A failed upload can leave part and intermediary compose objects behind which will count as + * storage usage, and you will be billed for it.
    + * By default if an upload fails, an attempt to cleanup the part and intermediary compose will + * be made. However if the program were to crash there is no means for the client to perform + * the cleanup.
    + * Every part and intermediary compose object will be created with a name which ends in {@code + * .part}. An Object Lifecycle Management rule can be setup on your bucket to automatically + * cleanup objects with the suffix after some period of time. See Object Lifecycle Management for + * full details and a guide on how to setup a Delete rule with a suffix + * match condition. + *
  5. Using parallel composite uploads are not a a one size fits all solution. They have very + * real overhead until uploading a large enough object. The inflection point is dependent upon + * many factors, and there is no one size fits all value. You will need to experiment with + * your deployment and workload to determine if parallel composite uploads are useful to you. + *
+ * + *

In general if you object sizes are smaller than several hundred megabytes it is unlikely + * parallel composite uploads will be beneficial to overall throughput. + * + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + * @see BlobWriteSessionConfigs#parallelCompositeUpload() + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see https://cloud.google.com/storage/docs/parallel-composite-uploads + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ +@Immutable +@BetaApi +@TransportCompatibility({Transport.GRPC, Transport.HTTP}) +public final class ParallelCompositeUploadBlobWriteSessionConfig extends BlobWriteSessionConfig + implements BlobWriteSessionConfig.HttpCompatible, BlobWriteSessionConfig.GrpcCompatible { + + private static final int MAX_PARTS_PER_COMPOSE = 32; + private final int maxPartsPerCompose; + private final ExecutorSupplier executorSupplier; + private final BufferAllocationStrategy bufferAllocationStrategy; + private final PartNamingStrategy partNamingStrategy; + private final PartCleanupStrategy partCleanupStrategy; + private final PartMetadataFieldDecorator partMetadataFieldDecorator; + + private ParallelCompositeUploadBlobWriteSessionConfig( + int maxPartsPerCompose, + ExecutorSupplier executorSupplier, + BufferAllocationStrategy bufferAllocationStrategy, + PartNamingStrategy partNamingStrategy, + PartCleanupStrategy partCleanupStrategy, + PartMetadataFieldDecorator partMetadataFieldDecorator) { + this.maxPartsPerCompose = maxPartsPerCompose; + this.executorSupplier = executorSupplier; + this.bufferAllocationStrategy = bufferAllocationStrategy; + this.partNamingStrategy = partNamingStrategy; + this.partCleanupStrategy = partCleanupStrategy; + this.partMetadataFieldDecorator = partMetadataFieldDecorator; + } + + @InternalApi + ParallelCompositeUploadBlobWriteSessionConfig withMaxPartsPerCompose(int maxPartsPerCompose) { + checkArgument( + 2 <= maxPartsPerCompose && maxPartsPerCompose <= 32, + "2 <= maxPartsPerCompose <= 32 (2 <= %s <= 32)", + maxPartsPerCompose); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + /** + * Specify a specific executor supplier where work will be submitted when performing a parallel + * composite upload. + * + *

Default: {@link ExecutorSupplier#cachedPool()} + * + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ParallelCompositeUploadBlobWriteSessionConfig withExecutorSupplier( + ExecutorSupplier executorSupplier) { + checkNotNull(executorSupplier, "executorSupplier must be non null"); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + /** + * Specify a specific buffering strategy which will dictate how buffers are allocated and used + * when performing a parallel composite upload. + * + *

Default: {@link BufferAllocationStrategy#simple(int) + * BufferAllocationStrategy#simple(16MiB)} + * + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ParallelCompositeUploadBlobWriteSessionConfig withBufferAllocationStrategy( + BufferAllocationStrategy bufferAllocationStrategy) { + checkNotNull(bufferAllocationStrategy, "bufferAllocationStrategy must be non null"); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + /** + * Specify a specific naming strategy which will dictate how individual part and intermediary + * compose objects will be named when performing a parallel composite upload. + * + *

Default: {@link PartNamingStrategy#noPrefix()} + * + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ParallelCompositeUploadBlobWriteSessionConfig withPartNamingStrategy( + PartNamingStrategy partNamingStrategy) { + checkNotNull(partNamingStrategy, "partNamingStrategy must be non null"); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + /** + * Specify a specific cleanup strategy which will dictate what cleanup operations are performed + * automatically when performing a parallel composite upload. + * + *

Default: {@link PartCleanupStrategy#always()} + * + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ParallelCompositeUploadBlobWriteSessionConfig withPartCleanupStrategy( + PartCleanupStrategy partCleanupStrategy) { + checkNotNull(partCleanupStrategy, "partCleanupStrategy must be non null"); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + /** + * Specify a Part Metadata Field decorator, this will manipulate the metadata associated with part + * objects, the ultimate object metadata will remain unchanged. + * + *

Default: {@link PartMetadataFieldDecorator#noOp()} + * + * @since 2.36.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ParallelCompositeUploadBlobWriteSessionConfig withPartMetadataFieldDecorator( + PartMetadataFieldDecorator partMetadataFieldDecorator) { + checkNotNull(partMetadataFieldDecorator, "partMetadataFieldDecorator must be non null"); + return new ParallelCompositeUploadBlobWriteSessionConfig( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + @BetaApi + static ParallelCompositeUploadBlobWriteSessionConfig withDefaults() { + return new ParallelCompositeUploadBlobWriteSessionConfig( + MAX_PARTS_PER_COMPOSE, + ExecutorSupplier.cachedPool(), + BufferAllocationStrategy.simple(ByteSizeConstants._16MiB), + PartNamingStrategy.noPrefix(), + PartCleanupStrategy.always(), + PartMetadataFieldDecorator.noOp()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ParallelCompositeUploadBlobWriteSessionConfig)) { + return false; + } + ParallelCompositeUploadBlobWriteSessionConfig that = + (ParallelCompositeUploadBlobWriteSessionConfig) o; + return maxPartsPerCompose == that.maxPartsPerCompose + && Objects.equals(executorSupplier, that.executorSupplier) + && Objects.equals(bufferAllocationStrategy, that.bufferAllocationStrategy) + && Objects.equals(partNamingStrategy, that.partNamingStrategy) + && Objects.equals(partCleanupStrategy, that.partCleanupStrategy) + && Objects.equals(partMetadataFieldDecorator, that.partMetadataFieldDecorator); + } + + @Override + public int hashCode() { + return Objects.hash( + maxPartsPerCompose, + executorSupplier, + bufferAllocationStrategy, + partNamingStrategy, + partCleanupStrategy, + partMetadataFieldDecorator); + } + + @InternalApi + @Override + WriterFactory createFactory(Clock clock) throws IOException { + Executor executor = executorSupplier.get(); + BufferHandlePool bufferHandlePool = bufferAllocationStrategy.get(); + PartMetadataFieldDecoratorInstance partMetadataFieldDecoratorInstance = + partMetadataFieldDecorator.newInstance(clock); + return new ParallelCompositeUploadWriterFactory( + clock, executor, bufferHandlePool, partMetadataFieldDecoratorInstance); + } + + /** + * A strategy which dictates how buffers are to be used for individual parts. The chosen strategy + * will apply to all instances of {@link BlobWriteSession} created from a single instance of + * {@link Storage}. + * + * @see #withBufferAllocationStrategy(BufferAllocationStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Immutable + public abstract static class BufferAllocationStrategy extends Factory + implements Serializable { + + private BufferAllocationStrategy() {} + + /** + * Create a buffer strategy which will rely upon standard garbage collection. Each buffer will + * be used once and then garbage collected. + * + * @param capacity the number of bytes each buffer should be + * @see #withBufferAllocationStrategy(BufferAllocationStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static BufferAllocationStrategy simple(int capacity) { + checkArgument(capacity > 0, "bufferCapacity must be > 0"); + return new SimpleBufferAllocationStrategy(capacity); + } + + /** + * Create a buffer strategy which will have a fixed size pool of buffers. Each buffer will be + * lazily allocated. + * + * @param bufferCount the number of buffers the pool will be + * @param bufferCapacity the number of bytes each buffer should be + * @see #withBufferAllocationStrategy(BufferAllocationStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static BufferAllocationStrategy fixedPool(int bufferCount, int bufferCapacity) { + checkArgument(bufferCount > 0, "bufferCount must be > 0"); + checkArgument(bufferCapacity > 0, "bufferCapacity must be > 0"); + return new FixedPoolBufferAllocationStrategy(bufferCount, bufferCapacity); + } + + private static class SimpleBufferAllocationStrategy extends BufferAllocationStrategy { + private static final long serialVersionUID = 8884826090481043434L; + + private final int capacity; + + private SimpleBufferAllocationStrategy(int capacity) { + this.capacity = capacity; + } + + @Override + BufferHandlePool get() { + return BufferHandlePool.simple(capacity); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SimpleBufferAllocationStrategy)) { + return false; + } + SimpleBufferAllocationStrategy that = (SimpleBufferAllocationStrategy) o; + return capacity == that.capacity; + } + + @Override + public int hashCode() { + return Objects.hashCode(capacity); + } + } + + private static class FixedPoolBufferAllocationStrategy extends BufferAllocationStrategy { + private static final long serialVersionUID = 3288902741819257066L; + + private final int bufferCount; + private final int bufferCapacity; + + private FixedPoolBufferAllocationStrategy(int bufferCount, int bufferCapacity) { + this.bufferCount = bufferCount; + this.bufferCapacity = bufferCapacity; + } + + @Override + BufferHandlePool get() { + return BufferHandlePool.fixedPool(bufferCount, bufferCapacity); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FixedPoolBufferAllocationStrategy)) { + return false; + } + FixedPoolBufferAllocationStrategy that = (FixedPoolBufferAllocationStrategy) o; + return bufferCount == that.bufferCount && bufferCapacity == that.bufferCapacity; + } + + @Override + public int hashCode() { + return Objects.hash(bufferCount, bufferCapacity); + } + } + } + + /** + * Class which will be used to supply an Executor where work will be submitted when performing a + * parallel composite upload. + * + * @see #withExecutorSupplier(ExecutorSupplier) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Immutable + public abstract static class ExecutorSupplier extends Factory implements Serializable { + private static final AtomicInteger INSTANCE_COUNTER = new AtomicInteger(1); + + private ExecutorSupplier() {} + + /** + * Create a cached thread pool for submitting work + * + * @see #withExecutorSupplier(ExecutorSupplier) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ExecutorSupplier cachedPool() { + return CachedSupplier.INSTANCE; + } + + /** + * Create a fixed size thread pool for submitting work + * + * @param poolSize the number of threads in the pool + * @see #withExecutorSupplier(ExecutorSupplier) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ExecutorSupplier fixedPool(int poolSize) { + checkArgument(poolSize > 0, "poolSize must be > 0"); + return new FixedSupplier(poolSize); + } + + /** + * Wrap an existing executor instance which will be used for submitting work + * + *

Choosing to use this supplier type will make your instance of {@link StorageOptions} + * unable to be serialized. + * + * @param executor the executor to use + * @see #withExecutorSupplier(ExecutorSupplier) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ExecutorSupplier useExecutor(Executor executor) { + requireNonNull(executor, "executor must be non null"); + return new SuppliedExecutorSupplier(executor); + } + + @NonNull + private static ThreadFactory newThreadFactory() { + return new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("c.g.c:g-c-s:pcu-" + INSTANCE_COUNTER.getAndIncrement() + "-%d") + .build(); + } + + private static class SuppliedExecutorSupplier extends ExecutorSupplier { + + private final Executor executor; + + public SuppliedExecutorSupplier(Executor executor) { + this.executor = executor; + } + + @Override + Executor get() { + return executor; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SuppliedExecutorSupplier)) { + return false; + } + SuppliedExecutorSupplier that = (SuppliedExecutorSupplier) o; + return Objects.equals(executor, that.executor); + } + + @Override + public int hashCode() { + return Objects.hashCode(executor); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + throw new java.io.InvalidClassException(this.getClass().getName() + "; Not serializable"); + } + } + + private static class CachedSupplier extends ExecutorSupplier implements Serializable { + private static final long serialVersionUID = 7768210719775319260L; + private static final CachedSupplier INSTANCE = new CachedSupplier(); + + @Override + Executor get() { + ThreadFactory threadFactory = newThreadFactory(); + return Executors.newCachedThreadPool(threadFactory); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + private static class FixedSupplier extends ExecutorSupplier implements Serializable { + private static final long serialVersionUID = 7771825977551614347L; + + private final int poolSize; + + public FixedSupplier(int poolSize) { + this.poolSize = poolSize; + } + + @Override + Executor get() { + ThreadFactory threadFactory = newThreadFactory(); + return Executors.newFixedThreadPool(poolSize, threadFactory); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FixedSupplier)) { + return false; + } + FixedSupplier that = (FixedSupplier) o; + return poolSize == that.poolSize; + } + + @Override + public int hashCode() { + return Objects.hashCode(poolSize); + } + } + } + + /** + * A naming strategy which will be used to generate a name for a part or intermediary compose + * object. + * + * @see #withPartNamingStrategy(PartNamingStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Immutable + public abstract static class PartNamingStrategy implements Serializable { + private static final long serialVersionUID = 8343436026774231869L; + private static final String FIELD_SEPARATOR = ";"; + private static final Encoder B64 = Base64.getUrlEncoder().withoutPadding(); + private static final HashFunction OBJECT_NAME_HASH_FUNCTION = Hashing.goodFastHash(128); + private final SecureRandom rand; + + @VisibleForTesting + @InternalApi + PartNamingStrategy(SecureRandom rand) { + this.rand = rand; + } + + String fmtName(String ultimateObjectName, PartRange partRange) { + // generate 128 bits of random data + byte[] bytes = new byte[16]; + rand.nextBytes(bytes); + + // encode it to base 64, yielding 22 characters + String randomKey = B64.encodeToString(bytes); + return fmtFields(randomKey, ultimateObjectName, partRange.encode()); + } + + abstract String fmtFields(String randomKey, String nameDigest, String partRange); + + /** + * Default strategy in which no stable prefix is defined. + * + *

General format is + * + *


+     *   {randomKeyDigest};{objectInfoDigest};{partIndex}.part
+     * 
+ * + *

{@code {objectInfoDigest}} will be fixed for an individual {@link BlobWriteSession}. + * + *

NOTE:The way in which both {@code randomKeyDigest} and {@code + * objectInfoDigest} are generated is undefined and subject to change at any time. + * + * @see #withPartNamingStrategy(PartNamingStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartNamingStrategy noPrefix() { + SecureRandom rand = new SecureRandom(); + return new NoPrefix(rand); + } + + /** + * Strategy in which an explicit stable prefix is present on each part and intermediary compose + * object. + * + *

General format is + * + *


+     *   {prefixPattern}/{randomKeyDigest};{objectInfoDigest};{partIndex}.part
+     * 
+ * + *

{@code {objectInfoDigest}} will be fixed for an individual {@link BlobWriteSession}. + * + *

NOTE:The way in which both {@code randomKeyDigest} and {@code + * objectInfoDigest} are generated is undefined and subject to change at any time. + * + *

Care must be taken when choosing to specify a stable prefix as this can create hotspots in + * the keyspace for object names. See Object Naming + * Convention Guidelines for more details. + * + * @see #withPartNamingStrategy(PartNamingStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartNamingStrategy prefix(String prefixPattern) { + checkNotNull(prefixPattern, "prefixPattern must be non null"); + SecureRandom rand = new SecureRandom(); + return new WithPrefix(rand, prefixPattern); + } + + /** + * Strategy in which the end object name is the prefix included and is present on each part and + * intermediary compose object. + * + *

General format is + * + *


+     *   {objectName}-{randomKeyDigest};{objectInfoDigest};{partIndex}.part
+     * 
+ * + *

{@code {objectInfoDigest}} will be fixed for an individual {@link BlobWriteSession}. + * + *

NOTE:The way in which both {@code randomKeyDigest} and {@code + * objectInfoDigest} are generated is undefined and subject to change at any time. + * + * @see #withPartNamingStrategy(PartNamingStrategy) + * @since 2.30.2 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartNamingStrategy useObjectNameAsPrefix() { + return useObjectNameAsPrefix(""); + } + + private static PartNamingStrategy useObjectNameAsPrefix(String prefixPattern) { + checkNotNull(prefixPattern, "prefixPattern must be non null"); + SecureRandom rand = new SecureRandom(); + return new WithObjectLevelPrefix(rand, prefixPattern); + } + + static final class WithPrefix extends PartNamingStrategy { + private static final long serialVersionUID = 5709330763161570411L; + + private final String prefix; + + private WithPrefix(SecureRandom rand, String prefix) { + super(rand); + this.prefix = prefix; + } + + @Override + protected String fmtFields(String randomKey, String ultimateObjectName, String partRange) { + HashCode hashCode = + OBJECT_NAME_HASH_FUNCTION.hashString(ultimateObjectName, StandardCharsets.UTF_8); + String nameDigest = B64.encodeToString(hashCode.asBytes()); + return prefix + + "/" + + randomKey + + FIELD_SEPARATOR + + nameDigest + + FIELD_SEPARATOR + + partRange + + ".part"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof WithPrefix)) { + return false; + } + WithPrefix that = (WithPrefix) o; + return Objects.equals(prefix, that.prefix); + } + + @Override + public int hashCode() { + return Objects.hashCode(prefix); + } + } + + static final class WithObjectLevelPrefix extends PartNamingStrategy { + + private static final long serialVersionUID = 5157942020618764450L; + private final String prefix; + + private WithObjectLevelPrefix(SecureRandom rand, String prefix) { + super(rand); + // If no prefix is specified we will create the part files under the same directory as the + // ultimate object. + this.prefix = prefix.isEmpty() ? prefix : prefix + "/"; + } + + @Override + protected String fmtFields(String randomKey, String ultimateObjectName, String partRange) { + HashCode hashCode = + OBJECT_NAME_HASH_FUNCTION.hashString(ultimateObjectName, StandardCharsets.UTF_8); + String nameDigest = B64.encodeToString(hashCode.asBytes()); + return prefix + + ultimateObjectName + + "-" + + randomKey + + FIELD_SEPARATOR + + nameDigest + + FIELD_SEPARATOR + + partRange + + ".part"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof WithObjectLevelPrefix)) { + return false; + } + WithObjectLevelPrefix that = (WithObjectLevelPrefix) o; + return Objects.equals(prefix, that.prefix); + } + + @Override + public int hashCode() { + return Objects.hashCode(prefix); + } + } + + static final class NoPrefix extends PartNamingStrategy { + private static final long serialVersionUID = 5202415556658566017L; + + public NoPrefix(SecureRandom rand) { + super(rand); + } + + @Override + protected String fmtFields(String randomKey, String ultimateObjectName, String partRange) { + HashCode hashCode = + OBJECT_NAME_HASH_FUNCTION.hashString(ultimateObjectName, StandardCharsets.UTF_8); + String nameDigest = B64.encodeToString(hashCode.asBytes()); + return randomKey + + FIELD_SEPARATOR + // todo: do we want to + // include a hint where the object came from, similar to gcloud + // https://cloud.google.com/storage/docs/parallel-composite-uploads#gcloud-pcu + // + "com.google.cloud:google-cloud-storage" + // + FIELD_SEPARATOR + + nameDigest + + FIELD_SEPARATOR + + partRange + + ".part"; + } + } + } + + /** + * A Decorator which is used to manipulate metadata fields, specifically on the part objects + * created in a Parallel Composite Upload + * + * @see #withPartMetadataFieldDecorator(PartMetadataFieldDecorator) + * @since 2.36.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Immutable + public abstract static class PartMetadataFieldDecorator implements Serializable { + + abstract PartMetadataFieldDecoratorInstance newInstance(Clock clock); + + /** + * A decorator that is used to manipulate the Custom Time Metadata field of part files. {@link + * BlobInfo#getCustomTimeOffsetDateTime()} + * + *

When provided with a duration, a time in the future will be calculated for each part + * object upon upload, this new value can be used in OLM rules to cleanup abandoned part files. + * + *

See [CustomTime OLM + * documentation](https://cloud.google.com/storage/docs/lifecycle#dayssincecustomtime) + * + * @see #withPartMetadataFieldDecorator(PartMetadataFieldDecorator) + * @since 2.36.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartMetadataFieldDecorator setCustomTimeInFuture(Duration timeInFuture) { + checkNotNull(timeInFuture, "timeInFuture must not be null"); + return new CustomTimeInFuture(timeInFuture); + } + + @BetaApi + public static PartMetadataFieldDecorator noOp() { + return NoOp.INSTANCE; + } + + @BetaApi + private static final class CustomTimeInFuture extends PartMetadataFieldDecorator { + private static final long serialVersionUID = -6213742554954751892L; + private final Duration duration; + + CustomTimeInFuture(Duration duration) { + this.duration = duration; + } + + @Override + PartMetadataFieldDecoratorInstance newInstance(Clock clock) { + return builder -> { + OffsetDateTime futureTime = + OffsetDateTime.from( + clock.instant().plus(duration).atZone(clock.getZone()).toOffsetDateTime()); + return builder.setCustomTimeOffsetDateTime(futureTime); + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CustomTimeInFuture)) { + return false; + } + CustomTimeInFuture that = (CustomTimeInFuture) o; + return Objects.equals(duration, that.duration); + } + + @Override + public int hashCode() { + return Objects.hashCode(duration); + } + } + + private static final class NoOp extends PartMetadataFieldDecorator { + private static final long serialVersionUID = -4569486383992999205L; + private static final NoOp INSTANCE = new NoOp(); + + @Override + PartMetadataFieldDecoratorInstance newInstance(Clock clock) { + return builder -> builder; + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + } + + /** + * A cleanup strategy which will dictate what cleanup operations are performed automatically when + * performing a parallel composite upload. + * + * @see #withPartCleanupStrategy(PartCleanupStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @Immutable + public static class PartCleanupStrategy implements Serializable { + private static final long serialVersionUID = -1434253614347199051L; + private final boolean deletePartsOnSuccess; + private final boolean deleteAllOnError; + + private PartCleanupStrategy(boolean deletePartsOnSuccess, boolean deleteAllOnError) { + this.deletePartsOnSuccess = deletePartsOnSuccess; + this.deleteAllOnError = deleteAllOnError; + } + + boolean isDeletePartsOnSuccess() { + return deletePartsOnSuccess; + } + + boolean isDeleteAllOnError() { + return deleteAllOnError; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PartCleanupStrategy)) { + return false; + } + PartCleanupStrategy that = (PartCleanupStrategy) o; + return deletePartsOnSuccess == that.deletePartsOnSuccess + && deleteAllOnError == that.deleteAllOnError; + } + + @Override + public int hashCode() { + return Objects.hash(deletePartsOnSuccess, deleteAllOnError); + } + + /** + * If an unrecoverable error is encountered, define whether to attempt to delete any objects + * already uploaded. + * + *

Default: {@code true} + * + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + PartCleanupStrategy withDeleteAllOnError(boolean deleteAllOnError) { + return new PartCleanupStrategy(deletePartsOnSuccess, deleteAllOnError); + } + + /** + * Cleanup strategy which will always attempt to clean up part and intermediary compose objects + * either on success or on error. + * + * @see #withPartCleanupStrategy(PartCleanupStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartCleanupStrategy always() { + return new PartCleanupStrategy(true, true); + } + + /** + * Cleanup strategy which will only attempt to clean up parts and intermediary compose objects + * either on success. + * + * @see #withPartCleanupStrategy(PartCleanupStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartCleanupStrategy onlyOnSuccess() { + return new PartCleanupStrategy(true, false); + } + + /** + * Cleanup strategy which will never attempt to clean up parts or intermediary compose objects + * either on success or on error. + * + * @see #withPartCleanupStrategy(PartCleanupStrategy) + * @since 2.28.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static PartCleanupStrategy never() { + return new PartCleanupStrategy(false, false); + } + } + + interface PartMetadataFieldDecoratorInstance extends UnaryOperator {} + + private abstract static class Factory implements Serializable { + private static final long serialVersionUID = 271806144227661056L; + + private Factory() {} + + abstract T get(); + } + + private class ParallelCompositeUploadWriterFactory implements WriterFactory { + + private final Clock clock; + private final Executor executor; + private final BufferHandlePool bufferHandlePool; + private final PartMetadataFieldDecoratorInstance partMetadataFieldDecoratorInstance; + + private ParallelCompositeUploadWriterFactory( + Clock clock, + Executor executor, + BufferHandlePool bufferHandlePool, + PartMetadataFieldDecoratorInstance partMetadataFieldDecoratorInstance) { + this.clock = clock; + this.executor = executor; + this.bufferHandlePool = bufferHandlePool; + this.partMetadataFieldDecoratorInstance = partMetadataFieldDecoratorInstance; + } + + @Override + public WritableByteChannelSession writeSession( + StorageInternal s, BlobInfo info, Opts opts) { + // if crc32cMatch or md5Match were specified, they will already be in opts + BlobInfo trimmed = info.toBuilder().clearCrc32c().clearMd5().build(); + return new PCUSession(s, trimmed, opts); + } + + private final class PCUSession + implements WritableByteChannelSession { + + private final SettableApiFuture result; + private final StorageInternal storageInternal; + private final BlobInfo info; + private final Opts opts; + + private PCUSession( + StorageInternal storageInternal, BlobInfo info, Opts opts) { + this.storageInternal = storageInternal; + this.info = info; + this.opts = opts; + result = SettableApiFuture.create(); + } + + @Override + public ApiFuture openAsync() { + ParallelCompositeUploadWritableByteChannel channel = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + executor, + partNamingStrategy, + partCleanupStrategy, + maxPartsPerCompose, + partMetadataFieldDecoratorInstance, + result, + storageInternal, + info, + opts); + return ApiFutures.immediateFuture( + StorageByteChannels.writable().createSynchronized(channel)); + } + + @Override + public ApiFuture getResult() { + return result; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadException.java new file mode 100644 index 000000000000..2cc791e16e04 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadException.java @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.StatusCode; +import io.grpc.Status.Code; +import java.util.List; + +/** + * An exception which provides access to created objects during a Parallel Composite Upload that did + * not finish successfully. + * + *

This exception can occur when calling any method on the {@link + * java.nio.channels.WritableByteChannel} returned from {@link BlobWriteSession#open()}, in which + * case it will be the cause of a {@link StorageException}. + * + *

Similarly, this exception will be the cause of a {@link + * java.util.concurrent.CancellationException} thrown by the {@link BlobWriteSession#getResult()}. + */ +public final class ParallelCompositeUploadException extends ApiException { + + private final ApiFuture> createdObjects; + + private ParallelCompositeUploadException( + Throwable cause, + StatusCode statusCode, + ErrorDetails errorDetails, + ApiFuture> createdObjects) { + super(cause, statusCode, false, errorDetails); + this.createdObjects = createdObjects; + } + + /** + * A future list of the {@link BlobId}s which were created during the Parallel Composite Upload + * but may not have successfully been cleaned up. + */ + public ApiFuture> getCreatedObjects() { + return createdObjects; + } + + static ParallelCompositeUploadException of(Throwable t, ApiFuture> createdObjects) { + StatusCode statusCode; + ErrorDetails errorDetails; + + Throwable cause = t; + if (t instanceof StorageException && t.getCause() != null) { + cause = t.getCause(); + } + + if (cause instanceof ApiException) { + ApiException apiException = (ApiException) cause; + statusCode = apiException.getStatusCode(); + errorDetails = apiException.getErrorDetails(); + } else { + statusCode = GrpcStatusCode.of(Code.UNKNOWN); + errorDetails = ErrorDetails.builder().build(); + } + return new ParallelCompositeUploadException(cause, statusCode, errorDetails, createdObjects); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannel.java new file mode 100644 index 000000000000..464c6b2372d4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannel.java @@ -0,0 +1,615 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.ApiFutureUtils.OnFailureApiFutureCallback; +import com.google.cloud.storage.ApiFutureUtils.OnSuccessApiFutureCallback; +import com.google.cloud.storage.AsyncAppendingQueue.ShortCircuitException; +import com.google.cloud.storage.BufferHandlePool.PooledBuffer; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecoratorInstance; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.UnifiedOpts.Crc32cMatch; +import com.google.cloud.storage.UnifiedOpts.GenerationMatch; +import com.google.cloud.storage.UnifiedOpts.GenerationNotMatch; +import com.google.cloud.storage.UnifiedOpts.Md5Match; +import com.google.cloud.storage.UnifiedOpts.MetagenerationMatch; +import com.google.cloud.storage.UnifiedOpts.MetagenerationNotMatch; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.UnifiedOpts.ResumableUploadExpectedObjectSize; +import com.google.cloud.storage.UnifiedOpts.SourceGenerationMatch; +import com.google.cloud.storage.UnifiedOpts.SourceGenerationNotMatch; +import com.google.cloud.storage.UnifiedOpts.SourceMetagenerationMatch; +import com.google.cloud.storage.UnifiedOpts.SourceMetagenerationNotMatch; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import io.grpc.Status.Code; +import io.opentelemetry.context.Context; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousCloseException; +import java.nio.channels.ClosedChannelException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.concurrent.CancellationException; +import java.util.concurrent.Executor; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +@SuppressWarnings("UnstableApiUsage") // guava hashing +final class ParallelCompositeUploadWritableByteChannel implements BufferedWritableByteChannel { + + private static final MetadataField FINAL_OBJECT_NAME = + MetadataField.forString("pcu_finalObjectName"); + private static final MetadataField PART_INDEX = + MetadataField.forPartRange("pcu_partIndex"); + private static final MetadataField OBJECT_OFFSET = + MetadataField.forLong("pcu_objectOffset"); + private static final Comparator comparator = + Comparator.comparing(PART_INDEX::readFrom, PartRange.COMP); + private static final Predicate TO_EXCLUDE_FROM_PARTS; + // when creating a part or composing we include a precondition so that it can be retried + private static final Opts DOES_NOT_EXIST = + Opts.from(UnifiedOpts.generationMatch(0)); + + static { + //noinspection deprecation + Predicate tmp = + o -> + o instanceof GenerationMatch + || o instanceof GenerationNotMatch + || o instanceof MetagenerationMatch + || o instanceof MetagenerationNotMatch + || o instanceof SourceGenerationMatch + || o instanceof SourceGenerationNotMatch + || o instanceof SourceMetagenerationMatch + || o instanceof SourceMetagenerationNotMatch + || o instanceof Crc32cMatch + || o instanceof Md5Match + || o instanceof ResumableUploadExpectedObjectSize; + TO_EXCLUDE_FROM_PARTS = tmp.negate(); + } + + // immutable provided values + private final BufferHandlePool bufferPool; + private final Executor exec; + private final PartNamingStrategy partNamingStrategy; + private final PartCleanupStrategy partCleanupStrategy; + private final int maxElementsPerCompact; + private final PartMetadataFieldDecoratorInstance partMetadataFieldDecorator; + private final SettableApiFuture finalObject; + private final StorageInternal storage; + private final BlobInfo ultimateObject; + private final Opts opts; + + // immutable bootstrapped state + private final Opts partOpts; + private final Opts srcOpts; + private final AsyncAppendingQueue queue; + private final FailureForwarder failureForwarder; + // mutable running state + private final List> pendingParts; + private final List successfulParts; + private final Hasher cumulativeHasher; + private boolean open; + private long totalObjectOffset; + private PooledBuffer current; + + ParallelCompositeUploadWritableByteChannel( + BufferHandlePool bufferPool, + Executor exec, + PartNamingStrategy partNamingStrategy, + PartCleanupStrategy partCleanupStrategy, + int maxElementsPerCompact, + PartMetadataFieldDecoratorInstance partMetadataFieldDecorator, + SettableApiFuture finalObject, + StorageInternal storage, + BlobInfo ultimateObject, + Opts opts) { + this.bufferPool = bufferPool; + this.exec = Context.current().wrap(exec); + this.partNamingStrategy = partNamingStrategy; + this.partCleanupStrategy = partCleanupStrategy; + this.maxElementsPerCompact = maxElementsPerCompact; + this.partMetadataFieldDecorator = partMetadataFieldDecorator; + this.finalObject = finalObject; + this.storage = storage; + this.ultimateObject = ultimateObject; + this.opts = opts; + this.queue = AsyncAppendingQueue.of(this.exec, maxElementsPerCompact, this::compose); + this.pendingParts = new ArrayList<>(); + // this can be modified by another thread + this.successfulParts = Collections.synchronizedList(new ArrayList<>()); + this.open = true; + this.totalObjectOffset = 0; + + this.partOpts = getPartOpts(opts); + this.srcOpts = partOpts.transformTo(ObjectSourceOpt.class); + this.cumulativeHasher = Hashing.crc32c().newHasher(); + this.failureForwarder = new FailureForwarder(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + + int remaining = src.remaining(); + cumulativeHasher.putBytes(src.duplicate()); + while (src.hasRemaining()) { + if (current == null) { + current = bufferPool.getBuffer(); + } + + ByteBuffer buf = current.getBufferHandle().get(); + Buffers.copy(src, buf); + + if (!buf.hasRemaining()) { + internalFlush(buf); + } + } + + return remaining; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void flush() throws IOException { + if (current != null) { + ByteBuffer buf = current.getBufferHandle().get(); + internalFlush(buf); + } + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + open = false; + + flush(); + + try { + queue.close(); + } catch (NoSuchElementException e) { + // We never created any parts + // create an empty object + try { + BlobInfo blobInfo = storage.internalDirectUpload(ultimateObject, opts, Buffers.allocate(0)); + finalObject.set(blobInfo); + return; + } catch (StorageException se) { + finalObject.setException(se); + throw se; + } + } + + String expectedCrc32c = Utils.crc32cCodec.encode(cumulativeHasher.hash().asInt()); + ApiFuture closingTransform = + ApiFutures.transformAsync(queue.getResult(), this::cleanupParts, exec); + ApiFuture validatingTransform = + ApiFutures.transformAsync( + closingTransform, + finalInfo -> { + String crc32c = finalInfo.getCrc32c(); + if (expectedCrc32c.equals(crc32c)) { + return ApiFutures.immediateFuture(finalInfo); + } else { + return ApiFutures.immediateFailedFuture( + StorageException.coalesce( + buildParallelCompositeUploadException( + ApiExceptionFactory.createException( + String.format( + Locale.US, + "CRC32C Checksum mismatch. expected: [%s] but was: [%s]", + expectedCrc32c, + crc32c), + null, + GrpcStatusCode.of(Code.DATA_LOSS), + false), + exec, + pendingParts, + successfulParts))); + } + }, + exec); + + if (partCleanupStrategy.isDeleteAllOnError()) { + ApiFuture cleaningFuture = + ApiFutures.catchingAsync( + validatingTransform, Throwable.class, this::asyncCleanupAfterFailure, exec); + ApiFutures.addCallback(cleaningFuture, failureForwarder, exec); + } else { + ApiFutures.addCallback(validatingTransform, failureForwarder, exec); + } + + // we don't need the value from this, but we do need any exception that might be present + try { + ApiFutureUtils.await(validatingTransform); + } catch (Throwable t) { + AsynchronousCloseException e = new AsynchronousCloseException(); + e.initCause(t); + throw e; + } + } + + private void internalFlush(ByteBuffer buf) { + Buffers.flip(buf); + int pendingByteCount = buf.remaining(); + int partIndex = pendingParts.size() + 1; + BlobInfo partInfo = definePart(ultimateObject, PartRange.of(partIndex), totalObjectOffset); + ApiFuture partFuture = + ApiFutures.transform( + ApiFutures.immediateFuture(partInfo), + info -> { + try { + return storage.internalDirectUpload(info, partOpts, buf); + } catch (StorageException e) { + // a precondition failure usually means the part was created, but we didn't get the + // response. And when we tried to retry the object already exists. + if (e.getCode() == 412) { + return storage.internalObjectGet(info.getBlobId(), srcOpts); + } else { + throw e; + } + } + }, + exec); + + ApiFutures.addCallback( + partFuture, + new BufferHandleReleaser<>( + bufferPool, + current, + (OnSuccessApiFutureCallback) + result -> successfulParts.add(result.getBlobId())), + exec); + + pendingParts.add(partFuture); + try { + queue.append(partFuture); + totalObjectOffset += pendingByteCount; + } catch (ShortCircuitException e) { + open = false; + bufferPool.returnBuffer(current); + + // attempt to cancel any pending requests which haven't started yet + for (ApiFuture pendingPart : pendingParts) { + pendingPart.cancel(false); + } + + Throwable cause = e.getCause(); + BaseServiceException storageException; + if (partCleanupStrategy.isDeleteAllOnError()) { + storageException = StorageException.coalesce(cause); + ApiFuture cleanupFutures = asyncCleanupAfterFailure(storageException); + // asynchronously fail the finalObject future + CancellationException cancellationException = + new CancellationException(storageException.getMessage()); + cancellationException.initCause(storageException); + ApiFutures.addCallback( + cleanupFutures, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + cancellationException.addSuppressed(throwable); + failureForwarder.onFailure(cancellationException); + } + + @Override + public void onSuccess(Object o) { + failureForwarder.onFailure(cancellationException); + } + }, + exec); + // this will throw out if anything fails + ApiFutureUtils.await(cleanupFutures); + } else { + // create our exception containing information about the upload context + ParallelCompositeUploadException pcue = + buildParallelCompositeUploadException(cause, exec, pendingParts, successfulParts); + storageException = StorageException.coalesce(pcue); + // asynchronously fail the finalObject future + CancellationException cancellationException = + new CancellationException(storageException.getMessage()); + cancellationException.initCause(storageException); + ApiFutures.addCallback( + ApiFutures.immediateFailedFuture(cancellationException), + (OnFailureApiFutureCallback) failureForwarder::onFailure, + exec); + throw storageException; + } + } finally { + current = null; + } + } + + @SuppressWarnings("DataFlowIssue") + private BlobInfo compose(ImmutableList parts) { + ComposeRequest.Builder builder = ComposeRequest.newBuilder(); + + List sorted = parts.stream().sorted(comparator).collect(Collectors.toList()); + + sorted.stream() + .map(BlobInfo::getBlobId) + .forEach(id -> builder.addSource(id.getName(), id.getGeneration())); + + if (parts.size() == maxElementsPerCompact) { + // perform an intermediary compose + BlobInfo first = sorted.get(0); + BlobInfo last = sorted.get(sorted.size() - 1); + + long firstIdx = PART_INDEX.readFrom(first).getBegin(); + long lastIdx = PART_INDEX.readFrom(last).getEnd(); + long offset = OBJECT_OFFSET.readFrom(first); + BlobInfo newPart = definePart(ultimateObject, PartRange.of(firstIdx, lastIdx), offset); + builder.setTarget(newPart); + builder.setTargetOpts(partOpts); + } else { + // with this compose create the ultimate object + builder.setTarget(ultimateObject); + builder.setTargetOpts(opts); + } + + ComposeRequest composeRequest = builder.build(); + BlobInfo compose = storage.compose(composeRequest); + successfulParts.add(compose.getBlobId()); + return compose; + } + + private ApiFuture cleanupParts(BlobInfo finalInfo) { + if (!partCleanupStrategy.isDeletePartsOnSuccess()) { + return ApiFutures.immediateFuture(finalInfo); + } + List> deletes = + successfulParts.stream() + // make sure we don't delete the object we're wanting to create + .filter(id -> !id.equals(finalInfo.getBlobId())) + .map(this::deleteAsync) + .collect(Collectors.toList()); + + ApiFuture> deletes2 = ApiFutureUtils.quietAllAsList(deletes); + + return ApiFutures.catchingAsync( + ApiFutures.transform(deletes2, ignore -> finalInfo, exec), + Throwable.class, + cause -> ApiFutures.immediateFailedFuture(StorageException.coalesce(cause)), + exec); + } + + private BlobInfo definePart(BlobInfo ultimateObject, PartRange partRange, long offset) { + BlobId id = ultimateObject.getBlobId(); + BlobInfo.Builder b = ultimateObject.toBuilder().clearCrc32c().clearMd5(); + String partName = partNamingStrategy.fmtName(id.getName(), partRange); + b.setBlobId(BlobId.of(id.getBucket(), partName)); + ImmutableMap.Builder builder = ImmutableMap.builder(); + Map<@NonNull String, @Nullable String> metadata = ultimateObject.getMetadata(); + if (metadata != null) { + builder.putAll(metadata); + } + FINAL_OBJECT_NAME.appendTo(id.getName(), builder); + PART_INDEX.appendTo(partRange, builder); + OBJECT_OFFSET.appendTo(offset, builder); + b.setMetadata(builder.build()); + // the value of a kms key name will contain the exact version when read from gcs + // however, gcs will not accept that version resource identifier when creating a new object + // strip it out, so it can be included as a query string parameter instead + b.setKmsKeyName(null); + b = partMetadataFieldDecorator.apply(b); + return b.build(); + } + + private ApiFuture asyncCleanupAfterFailure(Throwable originalFailure) { + ApiFuture> pendingAndSuccessfulBlobIds = + getPendingAndSuccessfulBlobIds(exec, pendingParts, successfulParts); + return ApiFutures.transformAsync( + pendingAndSuccessfulBlobIds, + blobIds -> { + ImmutableList> pendingDeletes = + blobIds.stream().map(this::deleteAsync).collect(ImmutableList.toImmutableList()); + + ApiFuture> futureDeleteResults = + ApiFutures.successfulAsList(pendingDeletes); + + return ApiFutures.transformAsync( + futureDeleteResults, + deleteResults -> { + List failedDeletes = new ArrayList<>(); + for (int i = 0; i < blobIds.size(); i++) { + BlobId id = blobIds.get(i); + Boolean deleteResult = deleteResults.get(i); + // deleteResult not equal to true means the request completed but was + // unsuccessful + // deleteResult being null means the future failed + if (!Boolean.TRUE.equals(deleteResult)) { + failedDeletes.add(id); + } + } + + if (!failedDeletes.isEmpty()) { + String failedGsUris = + failedDeletes.stream() + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.joining(",\n", "[\n", "\n]")); + + String message = + String.format( + Locale.US, + "Incomplete parallel composite upload cleanup after previous error." + + " Unknown object ids: %s", + failedGsUris); + StorageException storageException = new StorageException(0, message, null); + originalFailure.addSuppressed(storageException); + } + return ApiFutures.immediateFailedFuture(originalFailure); + }, + exec); + }, + exec); + } + + @NonNull + private ApiFuture deleteAsync(BlobId id) { + return ApiFutures.transform( + ApiFutures.immediateFuture(id), + v -> { + try { + storage.internalObjectDelete(v, srcOpts); + return true; + } catch (NotFoundException e) { + // not found means the part doesn't exist, which is what we want + return true; + } catch (StorageException e) { + if (e.getCode() == 404) { + return true; + } else { + throw e; + } + } + }, + exec); + } + + @VisibleForTesting + @NonNull + static Opts getPartOpts(Opts opts) { + return opts.filter(TO_EXCLUDE_FROM_PARTS) + .prepend(DOES_NOT_EXIST) + // disable gzip transfer encoding for HTTP, it causes a significant bottleneck uploading + // the parts + .prepend(Opts.from(UnifiedOpts.disableGzipContent())); + } + + @VisibleForTesting + static final class BufferHandleReleaser implements ApiFutureCallback { + private final BufferHandlePool bufferManager; + private final ApiFutureCallback delegate; + private final PooledBuffer toRelease; + + @VisibleForTesting + BufferHandleReleaser( + BufferHandlePool bufferPool, PooledBuffer toRelease, ApiFutureCallback delegate) { + this.bufferManager = bufferPool; + this.delegate = delegate; + this.toRelease = toRelease; + } + + @Override + public void onFailure(Throwable t) { + try { + delegate.onFailure(t); + } finally { + bufferManager.returnBuffer(toRelease); + } + } + + @Override + public void onSuccess(T result) { + try { + delegate.onSuccess(result); + } finally { + bufferManager.returnBuffer(toRelease); + } + } + } + + private class FailureForwarder implements ApiFutureCallback { + + @Override + public void onFailure(Throwable t) { + finalObject.setException(t); + } + + @Override + public void onSuccess(BlobInfo result) { + finalObject.set(result); + } + } + + @VisibleForTesting + @NonNull + static ParallelCompositeUploadException buildParallelCompositeUploadException( + Throwable cause, + Executor exec, + List> pendingParts, + List successfulParts) { + ApiFuture> fCreatedObjects = + getPendingAndSuccessfulBlobIds(exec, pendingParts, successfulParts); + + return ParallelCompositeUploadException.of(cause, fCreatedObjects); + } + + @NonNull + private static ApiFuture> getPendingAndSuccessfulBlobIds( + Executor exec, List> pendingParts, List successfulParts) { + ApiFuture> successfulList = ApiFutures.successfulAsList(pendingParts); + // suppress any failure that might happen when waiting for any pending futures to resolve + ApiFuture> catching = + ApiFutures.catching(successfulList, Throwable.class, t2 -> ImmutableList.of(), exec); + + ApiFuture> fCreatedObjects = + ApiFutures.transform( + catching, + l -> + Stream.of( + l.stream() + // any failed future from successfulList will contain a null value + // filter out as that means an object wasn't created + .filter(Objects::nonNull) + .map(BlobInfo::getBlobId), + successfulParts.stream()) + .flatMap(Function.identity()) // .flatten() + // a successful value could be in successfulParts and pendingParts if + // pendingParts haven't compacted yet + .distinct() + .collect(ImmutableList.toImmutableList()), + exec); + return fCreatedObjects; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/PostPolicyV4.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/PostPolicyV4.java new file mode 100644 index 000000000000..d606c1492c04 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/PostPolicyV4.java @@ -0,0 +1,529 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; +import java.net.URI; +import java.net.URISyntaxException; +import java.text.SimpleDateFormat; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Presigned V4 post policy. Instances of {@code PostPolicyV4} include a URL and a map of fields + * that can be specified in an HTML form to submit a POST request to upload an object. + * + *

See POST Object for + * details of upload by using HTML forms. + * + *

See {@link Storage#generateSignedPostPolicyV4(BlobInfo, long, TimeUnit, + * PostPolicyV4.PostFieldsV4, PostPolicyV4.PostConditionsV4, Storage.PostPolicyV4Option...)} for + * example of usage. + */ +public final class PostPolicyV4 { + private final String url; + private final Map fields; + + private PostPolicyV4(String url, Map fields) { + try { + if (!new URI(url).isAbsolute()) { + throw new IllegalArgumentException(url + " is not an absolute URL"); + } + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + + this.url = url; + this.fields = Collections.unmodifiableMap(fields); + } + + /** + * Constructs {@code PostPolicyV4} instance of the given URL and fields map. + * + * @param url URL for the HTTP POST request + * @param fields HTML form fields + * @return constructed object + * @throws IllegalArgumentException if URL is malformed or fields are not valid + */ + public static PostPolicyV4 of(String url, Map fields) { + return new PostPolicyV4(url, fields); + } + + /** Returns the URL for the HTTP POST request */ + public String getUrl() { + return url; + } + + /** Returns the HTML form fields */ + public Map getFields() { + return fields; + } + + /** + * A helper class to define fields to be specified in a V4 POST request. Instance of this class + * helps to construct {@code PostPolicyV4} objects. Used in: {@link + * Storage#generateSignedPostPolicyV4(BlobInfo, long, TimeUnit, PostPolicyV4.PostFieldsV4, + * PostPolicyV4.PostConditionsV4, Storage.PostPolicyV4Option...)}. + * + * @see POST + * Object Form fields + */ + public static final class PostFieldsV4 { + private final Map fieldsMap; + + private PostFieldsV4(Builder builder) { + this(builder.fieldsMap); + } + + private PostFieldsV4(Map fields) { + this.fieldsMap = Collections.unmodifiableMap(fields); + } + + /** + * Constructs {@code PostPolicyV4.PostFieldsV4} object of the given field map. + * + * @param fields a map of the HTML form fields + * @return constructed object + * @throws IllegalArgumentException if an unsupported field is specified + */ + public static PostFieldsV4 of(Map fields) { + return new PostFieldsV4(fields); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Map getFieldsMap() { + return fieldsMap; + } + + public static class Builder { + private static final String CUSTOM_FIELD_PREFIX = "x-goog-meta-"; + private final Map fieldsMap; + + private Builder() { + this.fieldsMap = new HashMap<>(); + } + + public PostFieldsV4 build() { + return new PostFieldsV4(this); + } + + public Builder setAcl(String acl) { + fieldsMap.put("acl", acl); + return this; + } + + public Builder setCacheControl(String cacheControl) { + fieldsMap.put("cache-control", cacheControl); + return this; + } + + public Builder setContentDisposition(String contentDisposition) { + fieldsMap.put("content-disposition", contentDisposition); + return this; + } + + public Builder setContentEncoding(String contentEncoding) { + fieldsMap.put("content-encoding", contentEncoding); + return this; + } + + /** + * @deprecated Invocation of this method has no effect, because all valid HTML form fields + * except Content-Length can use exact matching. Use {@link + * PostPolicyV4.PostConditionsV4.Builder#addContentLengthRangeCondition(int, int)} to + * specify a range for the content-length. + */ + @Deprecated + public Builder setContentLength(int contentLength) { + return this; + } + + public Builder setContentType(String contentType) { + fieldsMap.put("content-type", contentType); + return this; + } + + /** + * @deprecated Use {@link #setExpires(String)}. + */ + @Deprecated + public Builder Expires(String expires) { + return setExpires(expires); + } + + public Builder setExpires(String expires) { + fieldsMap.put("expires", expires); + return this; + } + + public Builder setSuccessActionRedirect(String successActionRedirect) { + fieldsMap.put("success_action_redirect", successActionRedirect); + return this; + } + + public Builder setSuccessActionStatus(int successActionStatus) { + fieldsMap.put("success_action_status", "" + successActionStatus); + return this; + } + + /** + * @deprecated Use {@link #setCustomMetadataField(String, String)}. + */ + @Deprecated + public Builder AddCustomMetadataField(String field, String value) { + return setCustomMetadataField(field, value); + } + + public Builder setCustomMetadataField(String field, String value) { + if (!field.startsWith(CUSTOM_FIELD_PREFIX)) { + field = CUSTOM_FIELD_PREFIX + field; + } + fieldsMap.put(field, value); + return this; + } + } + } + + /** + * A helper class for specifying conditions in a V4 POST Policy document. Used in: {@link + * Storage#generateSignedPostPolicyV4(BlobInfo, long, TimeUnit, PostPolicyV4.PostFieldsV4, + * PostPolicyV4.PostConditionsV4, Storage.PostPolicyV4Option...)}. + * + * @see + * Policy document + */ + public static final class PostConditionsV4 { + private Set conditions; + + private static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + public PostConditionsV4(Builder builder) { + this.conditions = builder.conditions; + } + + public Builder toBuilder() { + return new Builder(conditions); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Set getConditions() { + return Collections.unmodifiableSet(conditions); + } + + public static class Builder { + private final Set conditions; + + private Builder() { + this(new LinkedHashSet()); + } + + private Builder(Set conditions) { + this.conditions = conditions; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public PostConditionsV4 build() { + return new PostConditionsV4(this); + } + + public Builder addAclCondition(ConditionV4Type type, String acl) { + checkType(type, "acl"); + conditions.add(new ConditionV4(type, "acl", acl)); + return this; + } + + public Builder addBucketCondition(ConditionV4Type type, String bucket) { + checkType(type, "bucket"); + conditions.add(new ConditionV4(type, "bucket", bucket)); + return this; + } + + public Builder addCacheControlCondition(ConditionV4Type type, String cacheControl) { + checkType(type, "cache-control"); + conditions.add(new ConditionV4(type, "cache-control", cacheControl)); + return this; + } + + public Builder addContentDispositionCondition( + ConditionV4Type type, String contentDisposition) { + checkType(type, "content-disposition"); + conditions.add(new ConditionV4(type, "content-disposition", contentDisposition)); + return this; + } + + public Builder addContentEncodingCondition(ConditionV4Type type, String contentEncoding) { + checkType(type, "content-encoding"); + conditions.add(new ConditionV4(type, "content-encoding", contentEncoding)); + return this; + } + + /** + * @deprecated Invocation of this method has no effect. Use {@link + * #addContentLengthRangeCondition(int, int)} to specify a range for the content-length. + */ + public Builder addContentLengthCondition(ConditionV4Type type, int contentLength) { + return this; + } + + public Builder addContentTypeCondition(ConditionV4Type type, String contentType) { + checkType(type, "content-type"); + conditions.add(new ConditionV4(type, "content-type", contentType)); + return this; + } + + /** + * @deprecated Use {@link #addExpiresCondition(long)} + */ + @Deprecated + public Builder addExpiresCondition(ConditionV4Type type, long expires) { + return addExpiresCondition(expires); + } + + /** + * @deprecated Use {@link #addExpiresCondition(String)} + */ + @Deprecated + public Builder addExpiresCondition(ConditionV4Type type, String expires) { + return addExpiresCondition(expires); + } + + public Builder addExpiresCondition(long expires) { + return addExpiresCondition(dateFormat.format(expires)); + } + + public Builder addExpiresCondition(String expires) { + conditions.add(new ConditionV4(ConditionV4Type.MATCHES, "expires", expires)); + return this; + } + + public Builder addKeyCondition(ConditionV4Type type, String key) { + checkType(type, "key"); + conditions.add(new ConditionV4(type, "key", key)); + return this; + } + + public Builder addSuccessActionRedirectUrlCondition( + ConditionV4Type type, String successActionRedirectUrl) { + checkType(type, "success_action_redirect"); + conditions.add(new ConditionV4(type, "success_action_redirect", successActionRedirectUrl)); + return this; + } + + /** + * @deprecated Use {@link #addSuccessActionStatusCondition(int)} + */ + @Deprecated + public Builder addSuccessActionStatusCondition(ConditionV4Type type, int status) { + return addSuccessActionStatusCondition(status); + } + + public Builder addSuccessActionStatusCondition(int status) { + conditions.add( + new ConditionV4(ConditionV4Type.MATCHES, "success_action_status", "" + status)); + return this; + } + + public Builder addContentLengthRangeCondition(int min, int max) { + conditions.add(new ConditionV4(ConditionV4Type.CONTENT_LENGTH_RANGE, "" + min, "" + max)); + return this; + } + + Builder addCustomCondition(ConditionV4Type type, String field, String value) { + conditions.add(new ConditionV4(type, field, value)); + return this; + } + + private void checkType(ConditionV4Type type, String field) { + if (type != ConditionV4Type.MATCHES && type != ConditionV4Type.STARTS_WITH) { + throw new IllegalArgumentException("Field " + field + " can't use " + type); + } + } + } + } + + /** + * Class for a V4 POST Policy document. Used by Storage to construct {@code PostPolicyV4} objects. + * + * @see + * Policy document + */ + public static final class PostPolicyV4Document { + private final String expiration; + private final PostConditionsV4 conditions; + + private PostPolicyV4Document(String expiration, PostConditionsV4 conditions) { + this.expiration = expiration; + this.conditions = conditions; + } + + public static PostPolicyV4Document of(String expiration, PostConditionsV4 conditions) { + return new PostPolicyV4Document(expiration, conditions); + } + + public String toJson() { + JsonObject object = new JsonObject(); + JsonArray conditions = new JsonArray(); + for (ConditionV4 condition : this.conditions.conditions) { + switch (condition.type) { + case MATCHES: + JsonObject match = new JsonObject(); + match.addProperty(condition.operand1, condition.operand2); + conditions.add(match); + break; + case STARTS_WITH: + JsonArray startsWith = new JsonArray(); + startsWith.add("starts-with"); + startsWith.add("$" + condition.operand1); + startsWith.add(condition.operand2); + conditions.add(startsWith); + break; + case CONTENT_LENGTH_RANGE: + JsonArray contentLengthRange = new JsonArray(); + contentLengthRange.add("content-length-range"); + contentLengthRange.add(Integer.parseInt(condition.operand1)); + contentLengthRange.add(Integer.parseInt(condition.operand2)); + conditions.add(contentLengthRange); + break; + } + } + object.add("conditions", conditions); + object.addProperty("expiration", expiration); + + String json = object.toString(); + StringBuilder escapedJson = new StringBuilder(); + + // Certain characters in a policy must be escaped + char[] jsonArray = json.toCharArray(); + for (int i = 0; i < jsonArray.length; i++) { + char c = jsonArray[i]; + if (c >= 128) { // is a unicode character + escapedJson.append(String.format(Locale.US, "\\u%04x", (int) c)); + } else { + switch (c) { + case '\\': + // The JsonObject/JsonArray operations above handle quote escapes, so leave any "\"" + // found alone + if (jsonArray[i + 1] == '"') { + escapedJson.append("\\"); + } else { + escapedJson.append("\\\\"); + } + break; + case '\b': + escapedJson.append("\\b"); + break; + case '\f': + escapedJson.append("\\f"); + break; + case '\n': + escapedJson.append("\\n"); + break; + case '\r': + escapedJson.append("\\r"); + break; + case '\t': + escapedJson.append("\\t"); + break; + case '\u000b': + escapedJson.append("\\v"); + break; + default: + escapedJson.append(c); + } + } + } + return escapedJson.toString(); + } + } + + public enum ConditionV4Type { + MATCHES("eq"), + STARTS_WITH("starts-with"), + CONTENT_LENGTH_RANGE("content-length-range"); + + private final String name; + + ConditionV4Type(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } + + /** + * Class for a specific POST policy document condition. + * + * @see + * Policy document + */ + public static final class ConditionV4 { + public final ConditionV4Type type; + public final String operand1; + public final String operand2; + + ConditionV4(ConditionV4Type type, String operand1, String operand2) { + this.type = type; + this.operand1 = operand1; + this.operand2 = operand2; + } + + @Override + public boolean equals(Object other) { + ConditionV4 condition = (ConditionV4) other; + return this.type == condition.type + && this.operand1.equals(condition.operand1) + && this.operand2.equals(condition.operand2); + } + + @Override + public int hashCode() { + return Objects.hash(type, operand1, operand2); + } + + /** + * Examples of returned strings: {@code ["eq", "$key", "test-object"]}, {@code ["starts-with", + * "$acl", "public"]}, {@code ["content-length-range", 246, 266]}. + */ + @Override + public String toString() { + String body = + type == ConditionV4Type.CONTENT_LENGTH_RANGE + ? operand1 + ", " + operand2 + : "\"$" + operand1 + "\", \"" + operand2 + "\""; + return "[\"" + type + "\", " + body + "]"; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpec.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpec.java new file mode 100644 index 000000000000..a945e3af7156 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpec.java @@ -0,0 +1,233 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.BetaApi; +import com.google.common.base.MoreObjects; +import java.util.Objects; +import java.util.OptionalLong; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Defines a range with a begin offset and optional maximum length. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public abstract class RangeSpec { + // seal this class to extension + private RangeSpec() {} + + /** + * The beginning of the range. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract long begin(); + + /** + * The max length of the range if defined. + * + * @see RangeSpecWithMaxLength + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract OptionalLong maxLength(); + + /** + * Create a new instance of {@link RangeSpec} keeping {@code this.begin()} and with {@code + * maxLength} as its new maxLength. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @NonNull + @BetaApi + public abstract RangeSpec withMaxLength(long maxLength); + + /** + * {@inheritDoc} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + public abstract boolean equals(Object o); + + /** + * {@inheritDoc} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + public abstract int hashCode(); + + /** + * {@inheritDoc} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @Override + public abstract String toString(); + + /** + * Create a new RangeSpec with the provided {@code begin}. + * + * @param begin The beginning of the range, must be >= 0 + * @throws IllegalArgumentException if begin is < 0 + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @NonNull + @BetaApi + public static RangeSpec beginAt(long begin) { + checkArgument(begin >= 0, "range being must be >= 0 (range begin = %s)", begin); + return new RangeSpecWithoutLimit(begin); + } + + /** + * Create a new RangeSpec with the provided {@code begin} and {@code maxLength}. + * + * @param begin The beginning of the range, must be >= 0 + * @param maxLength The max length of the range, must be >= 0. 0 means no limit. + * @throws IllegalArgumentException if begin is < 0, or if maxLength is < 0 + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @NonNull + @BetaApi + public static RangeSpec of(long begin, long maxLength) { + checkArgument(begin >= 0, "range being must be >= 0 (range begin = %s)", begin); + checkArgument(maxLength >= 0, "range maxLength must be >= 0 (range maxLength = %s)", maxLength); + if (maxLength == 0) { + return new RangeSpecWithoutLimit(begin); + } + return new RangeSpecWithMaxLength(begin, maxLength); + } + + /** + * A RangeSpec that represents to read from {@code 0} to {@code EOF} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @NonNull + @BetaApi + public static RangeSpec all() { + return RangeSpecWithoutLimit.ALL; + } + + static final class RangeSpecWithoutLimit extends RangeSpec { + private static final RangeSpecWithoutLimit ALL = new RangeSpecWithoutLimit(0); + private final long begin; + + private RangeSpecWithoutLimit(long begin) { + this.begin = begin; + } + + @Override + public long begin() { + return begin; + } + + @Override + public OptionalLong maxLength() { + return OptionalLong.empty(); + } + + @Override + @NonNull + public RangeSpec withMaxLength(long maxLength) { + checkArgument(maxLength >= 0, "range maxLength must be >= 0 (range limit = %s)", maxLength); + return new RangeSpecWithMaxLength(begin, maxLength); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RangeSpecWithoutLimit)) { + return false; + } + RangeSpecWithoutLimit that = (RangeSpecWithoutLimit) o; + return begin == that.begin; + } + + @Override + public int hashCode() { + return Objects.hashCode(begin); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("begin", begin).toString(); + } + } + + static final class RangeSpecWithMaxLength extends RangeSpec { + private final long begin; + private final long maxLength; + + private RangeSpecWithMaxLength(long begin, long maxLength) { + this.begin = begin; + this.maxLength = maxLength; + } + + @Override + public long begin() { + return begin; + } + + @Override + public OptionalLong maxLength() { + return OptionalLong.of(maxLength); + } + + @Override + @NonNull + public RangeSpec withMaxLength(long maxLength) { + checkArgument(maxLength >= 0, "range maxLength must be >= 0 (range limit = %s)", maxLength); + return new RangeSpecWithMaxLength(begin, maxLength); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RangeSpecWithMaxLength)) { + return false; + } + RangeSpecWithMaxLength that = (RangeSpecWithMaxLength) o; + return begin == that.begin && maxLength == that.maxLength; + } + + @Override + public int hashCode() { + return Objects.hash(begin, maxLength); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(RangeSpec.class) + .add("begin", begin) + .add("maxLength", maxLength) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpecFunction.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpecFunction.java new file mode 100644 index 000000000000..7fa1c0357491 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RangeSpecFunction.java @@ -0,0 +1,89 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import javax.annotation.concurrent.Immutable; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A specialized BiFunction to produce a {@link RangeSpec} given an offset and a possible previous + * {@code RangeSpec}. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +@InternalExtensionOnly +public abstract class RangeSpecFunction { + + RangeSpecFunction() {} + + /** + * Given an offset to read from, and the previously read {@link RangeSpec} return a new {@code + * RangeSpec} representing the range to read next. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + abstract RangeSpec apply(long offset, @Nullable RangeSpec prev); + + /** + * Returns a composed function that first applies this function to its input, and then applies the + * {@code then} function to the result. + * + *

Both functions will be called with the same {@code offset}. + * + *

The returned instance is equivalent to the following: + * + *

+   * {@code then.apply(offset, this.apply(offset, prev))}
+   * 
+ * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public RangeSpecFunction andThen(RangeSpecFunction then) { + requireNonNull(then, "then must be non null"); + return new AndThenRangeSpecFunction(this, then); + } + + /** + * Get the default instance of {@link LinearExponentialRangeSpecFunction}. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static LinearExponentialRangeSpecFunction linearExponential() { + return LinearExponentialRangeSpecFunction.INSTANCE; + } + + /** + * Produce a new {@link MaxLengthRangeSpecFunction} where the maximum possible length of any + * returned {@link RangeSpec} is set to the lesser of {@code prev.maxLength} and {@code + * this.maxLength}. + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static MaxLengthRangeSpecFunction maxLength(long maxLength) { + return MaxLengthRangeSpecFunction.INSTANCE.withMaxLength(maxLength); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsChannel.java new file mode 100644 index 000000000000..48f2e1582777 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsChannel.java @@ -0,0 +1,156 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.BetaApi; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.StreamingRead; +import com.google.cloud.storage.ReadProjectionConfigs.BaseConfig; +import com.google.common.base.MoreObjects; +import java.nio.channels.ScatteringByteChannel; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Read a range of {@code byte}s as a non-blocking {@link ScatteringByteChannel} + * + *

The returned channel will be non-blocking for all read calls. If bytes have not yet + * asynchronously been delivered from Google Cloud Storage the method will return rather than + * waiting for the bytes to arrive. + * + *

The resulting {@link ScatteringByteChannel} MUST be {@link ScatteringByteChannel#close() + * close()}ed to avoid leaking memory + * + *

Instances of this class are immutable and thread safe. + * + * @see ReadProjectionConfigs#asChannel() + * @see BlobReadSession#readAs(ReadProjectionConfig) + * @see ScatteringByteChannel + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class ReadAsChannel extends BaseConfig { + static final ReadAsChannel INSTANCE = new ReadAsChannel(RangeSpec.all(), Hasher.enabled()); + + private final RangeSpec range; + private final Hasher hasher; + + private ReadAsChannel(RangeSpec range, Hasher hasher) { + super(); + this.range = range; + this.hasher = hasher; + } + + /** + * The {@link RangeSpec} to be used for any read using this instance. + * + *

Default: {@link RangeSpec#all()} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public RangeSpec getRange() { + return range; + } + + /** + * Return an instance with the {@link RangeSpec} set to the specified value. + * + *

Default: {@link RangeSpec#all()} + * + * @param range The {@link RangeSpec} to be used for any read using the returned instance. Must be + * non-null. + * @see #getRange() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ReadAsChannel withRangeSpec(RangeSpec range) { + requireNonNull(range, "range must be non null"); + if (this.range.equals(range)) { + return this; + } + return new ReadAsChannel(range, hasher); + } + + /** + * Whether crc32c validation will be performed for bytes returned by Google Cloud Storage + * + *

Default: {@code true} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + boolean getCrc32cValidationEnabled() { + return Hasher.enabled().equals(hasher); + } + + /** + * Return an instance with crc32c validation enabled based on {@code enabled}. + * + *

Default: {@code true} + * + * @param enabled Whether crc32c validation will be performed for bytes returned by Google Cloud + * Storage + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + ReadAsChannel withCrc32cValidationEnabled(boolean enabled) { + if (enabled && Hasher.enabled().equals(hasher)) { + return this; + } else if (!enabled && Hasher.noop().equals(hasher)) { + return this; + } + return new ReadAsChannel(range, enabled ? Hasher.enabled() : Hasher.noop()); + } + + @Override + BaseConfig cast() { + return this; + } + + @Override + StreamingRead newRead(long readId, RetryContext retryContext) { + return ObjectReadSessionStreamRead.streamingRead(readId, range, hasher, retryContext); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReadAsChannel)) { + return false; + } + ReadAsChannel that = (ReadAsChannel) o; + return Objects.equals(range, that.range) && Objects.equals(hasher, that.hasher); + } + + @Override + public int hashCode() { + return Objects.hash(range, hasher); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("range", range) + .add("crc32cValidationEnabled", getCrc32cValidationEnabled()) + .toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureByteString.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureByteString.java new file mode 100644 index 000000000000..aa7eee536d1c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureByteString.java @@ -0,0 +1,157 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.ReadProjectionConfigs.BaseConfig; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.common.base.MoreObjects; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Read a range of {@code byte}s as an {@link ApiFuture}{@code <}{@link DisposableByteString}{@code + * >} + * + *

The resulting {@link DisposableByteString} MUST be {@link DisposableByteString#close() + * close()}ed to avoid leaking memory + * + *

Instances of this class are immutable and thread safe. + * + * @see ReadProjectionConfigs#asFutureBytes() + * @see BlobReadSession#readAs(ReadProjectionConfig) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class ReadAsFutureByteString + extends BaseConfig, AccumulatingRead> { + + static final ReadAsFutureByteString INSTANCE = + new ReadAsFutureByteString(RangeSpec.all(), Hasher.enabled()); + + private final RangeSpec range; + private final Hasher hasher; + + private ReadAsFutureByteString(RangeSpec range, Hasher hasher) { + super(); + this.range = range; + this.hasher = hasher; + } + + /** + * The {@link RangeSpec} to be used for any read using this instance. + * + *

Default: {@link RangeSpec#all()} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public RangeSpec getRange() { + return range; + } + + /** + * Return an instance with the {@link RangeSpec} set to the specified value. + * + *

Default: {@link RangeSpec#all()} + * + * @param range The {@link RangeSpec} to be used for any read using the returned instance. Must be + * non-null. + * @see #getRange() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ReadAsFutureByteString withRangeSpec(RangeSpec range) { + requireNonNull(range, "range must be non null"); + if (this.range.equals(range)) { + return this; + } + return new ReadAsFutureByteString(range, hasher); + } + + /** + * Whether crc32c validation will be performed for bytes returned by Google Cloud Storage + * + *

Default: {@code true} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + boolean getCrc32cValidationEnabled() { + return Hasher.enabled().equals(hasher); + } + + /** + * Return an instance with crc32c validation enabled based on {@code enabled}. + * + *

Default: {@code true} + * + * @param enabled Whether crc32c validation will be performed for bytes returned by Google Cloud + * Storage + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + ReadAsFutureByteString withCrc32cValidationEnabled(boolean enabled) { + if (enabled && Hasher.enabled().equals(hasher)) { + return this; + } else if (!enabled && Hasher.noop().equals(hasher)) { + return this; + } + return new ReadAsFutureByteString(range, enabled ? Hasher.enabled() : Hasher.noop()); + } + + @Override + BaseConfig, ?> cast() { + return this; + } + + @Override + AccumulatingRead newRead(long readId, RetryContext retryContext) { + return ObjectReadSessionStreamRead.createZeroCopyByteStringAccumulatingRead( + readId, range, hasher, retryContext); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReadAsFutureByteString)) { + return false; + } + ReadAsFutureByteString that = (ReadAsFutureByteString) o; + return Objects.equals(range, that.range) && Objects.equals(hasher, that.hasher); + } + + @Override + public int hashCode() { + return Objects.hash(range, hasher); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("range", range) + .add("crc32cValidationEnabled", getCrc32cValidationEnabled()) + .toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureBytes.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureBytes.java new file mode 100644 index 000000000000..722b01bc2972 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsFutureBytes.java @@ -0,0 +1,152 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.ReadProjectionConfigs.BaseConfig; +import com.google.common.base.MoreObjects; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Read a range of {@code byte}s as an {@link ApiFuture}{@code } + * + *

Instances of this class are immutable and thread safe. + * + * @see ReadProjectionConfigs#asFutureBytes() + * @see BlobReadSession#readAs(ReadProjectionConfig) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class ReadAsFutureBytes + extends BaseConfig, AccumulatingRead> { + + static final ReadAsFutureBytes INSTANCE = + new ReadAsFutureBytes(RangeSpec.all(), Hasher.enabled()); + + private final RangeSpec range; + private final Hasher hasher; + + private ReadAsFutureBytes(RangeSpec range, Hasher hasher) { + super(); + this.range = range; + this.hasher = hasher; + } + + /** + * The {@link RangeSpec} to be used for any read using this instance. + * + *

Default: {@link RangeSpec#all()} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public RangeSpec getRange() { + return range; + } + + /** + * Return an instance with the {@link RangeSpec} set to the specified value. + * + *

Default: {@link RangeSpec#all()} + * + * @param range The {@link RangeSpec} to be used for any read using the returned instance. Must be + * non-null. + * @see #getRange() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ReadAsFutureBytes withRangeSpec(RangeSpec range) { + requireNonNull(range, "range must be non null"); + if (this.range.equals(range)) { + return this; + } + return new ReadAsFutureBytes(range, hasher); + } + + /** + * Whether crc32c validation will be performed for bytes returned by Google Cloud Storage + * + *

Default: {@code true} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + boolean getCrc32cValidationEnabled() { + return Hasher.enabled().equals(hasher); + } + + /** + * Return an instance with crc32c validation enabled based on {@code enabled}. + * + *

Default: {@code true} + * + * @param enabled Whether crc32c validation will be performed for bytes returned by Google Cloud + * Storage + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + ReadAsFutureBytes withCrc32cValidationEnabled(boolean enabled) { + if (enabled && Hasher.enabled().equals(hasher)) { + return this; + } else if (!enabled && Hasher.noop().equals(hasher)) { + return this; + } + return new ReadAsFutureBytes(range, enabled ? Hasher.enabled() : Hasher.noop()); + } + + @Override + BaseConfig, ?> cast() { + return this; + } + + @Override + AccumulatingRead newRead(long readId, RetryContext retryContext) { + return ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + readId, range, hasher, retryContext); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReadAsFutureBytes)) { + return false; + } + ReadAsFutureBytes that = (ReadAsFutureBytes) o; + return Objects.equals(range, that.range) && Objects.equals(hasher, that.hasher); + } + + @Override + public int hashCode() { + return Objects.hash(range, hasher); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("range", range) + .add("crc32cValidationEnabled", getCrc32cValidationEnabled()) + .toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsSeekableChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsSeekableChannel.java new file mode 100644 index 000000000000..b0b098e7afee --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadAsSeekableChannel.java @@ -0,0 +1,151 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.BetaApi; +import com.google.common.base.MoreObjects; +import java.nio.channels.SeekableByteChannel; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; + +/** + * Read from the object as a {@link SeekableByteChannel} + * + *

The returned channel will be non-blocking for all read calls. If bytes have not yet + * asynchronously been delivered from Google Cloud Storage the method will return rather than + * waiting for the bytes to arrive. + * + *

The resulting {@link SeekableByteChannel} MUST be {@link SeekableByteChannel#close() + * close()}ed to avoid leaking memory + * + *

Instances of this class are immutable and thread safe. + * + * @see ReadProjectionConfigs#asSeekableChannel() + * @see BlobReadSession#readAs(ReadProjectionConfig) + * @see SeekableByteChannel + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@Immutable +public final class ReadAsSeekableChannel extends ReadProjectionConfig { + + static final ReadAsSeekableChannel INSTANCE = + new ReadAsSeekableChannel(Hasher.enabled(), LinearExponentialRangeSpecFunction.INSTANCE); + + private final Hasher hasher; + private final RangeSpecFunction rangeSpecFunction; + + private ReadAsSeekableChannel(Hasher hasher, RangeSpecFunction rangeSpecFunction) { + this.hasher = hasher; + this.rangeSpecFunction = rangeSpecFunction; + } + + /** + * Get the {@link RangeSpecFunction} this instance will use to generate {@link RangeSpec}s for + * reading from an object. + * + *

Default: {@link RangeSpecFunction#linearExponential()} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public RangeSpecFunction getRangeSpecFunction() { + return rangeSpecFunction; + } + + /** + * Return an instance with the {@code rangeSpecFunction} set to the specified value. + * + *

Default: {@link RangeSpecFunction#linearExponential()} + * + * @param rangeSpecFunction The {@link RangeSpecFunction} to use to generate {@link RangeSpec}s + * for reading from an object. + * @see #getRangeSpecFunction() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public ReadAsSeekableChannel withRangeSpecFunction(RangeSpecFunction rangeSpecFunction) { + requireNonNull(rangeSpecFunction, "rangeSpecFunction must be non null"); + return new ReadAsSeekableChannel(hasher, rangeSpecFunction); + } + + /** + * Whether crc32c validation will be performed for bytes returned by Google Cloud Storage + * + *

Default: {@code true} + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + boolean getCrc32cValidationEnabled() { + return Hasher.enabled().equals(hasher); + } + + /** + * Return an instance with crc32c validation enabled based on {@code enabled}. + * + *

Default: {@code true} + * + * @param enabled Whether crc32c validation will be performed for bytes returned by Google Cloud + * Storage + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + ReadAsSeekableChannel withCrc32cValidationEnabled(boolean enabled) { + if (enabled && Hasher.enabled().equals(hasher)) { + return this; + } else if (!enabled && Hasher.noop().equals(hasher)) { + return this; + } + return new ReadAsSeekableChannel(enabled ? Hasher.enabled() : Hasher.noop(), rangeSpecFunction); + } + + @Override + SeekableByteChannel project(ObjectReadSession session, IOAutoCloseable closeAlongWith) { + return StorageByteChannels.seekable( + new ObjectReadSessionSeekableByteChannel(session, this, closeAlongWith)); + } + + @Override + ProjectionType getType() { + return ProjectionType.SESSION_USER; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReadAsSeekableChannel)) { + return false; + } + ReadAsSeekableChannel that = (ReadAsSeekableChannel) o; + return Objects.equals(rangeSpecFunction, that.rangeSpecFunction); + } + + @Override + public int hashCode() { + return Objects.hashCode(rangeSpecFunction); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("rangeSpecFunction", rangeSpecFunction).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadCursor.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadCursor.java new file mode 100644 index 000000000000..286fe147b90b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadCursor.java @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import java.util.Locale; + +/** + * Shrink wraps a beginning, offset and ending for tracking state of an individual invocation of + * {@link #read} + */ +final class ReadCursor { + private final long begin; + private long position; + private final long end; + + ReadCursor(long begin, long end) { + this.end = end; + this.begin = begin; + this.position = begin; + } + + public boolean hasRemaining() { + return remaining() > 0; + } + + public long remaining() { + return end - position; + } + + public void advance(long incr) { + checkArgument(incr >= 0); + position += incr; + } + + public long read() { + return position - begin; + } + + public long begin() { + return begin; + } + + public long position() { + return position; + } + + public long end() { + return end; + } + + @Override + public String toString() { + return String.format( + Locale.US, "ReadCursor{begin=%d, position=%d, end=%d}", begin, position, end); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfig.java new file mode 100644 index 000000000000..4812adaa0ec1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfig.java @@ -0,0 +1,60 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.cloud.storage.ReadProjectionConfigs.BaseConfig; +import com.google.cloud.storage.Storage.BlobSourceOption; +import java.util.Locale; + +/** + * Base class to represent a config for reading from a {@link BlobReadSession}. + * + * @param The type used to provide access to the bytes being read + * @see ReadProjectionConfigs + * @see BlobReadSession + * @see Storage#blobReadSession(BlobId, BlobSourceOption...) + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +@InternalExtensionOnly +public abstract class ReadProjectionConfig { + + ReadProjectionConfig() {} + + BaseConfig cast() { + throw new UnsupportedOperationException(String.format("%s#cast()", this.getClass().getName())); + } + + abstract ProjectionType getType(); + + Projection project(ObjectReadSession session, IOAutoCloseable closeAlongWith) { + throw new UnsupportedOperationException( + String.format(Locale.US, "%s#project()", this.getClass().getName())); + } + + enum ProjectionType { + /** Those projections which translate to a direct read registered in the state of the stream */ + STREAM_READ, + /** + * Those projections which use an ObjectReadSession rather than directly registering a read in + * the stream state. + */ + SESSION_USER + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfigs.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfigs.java new file mode 100644 index 000000000000..1156cf7e39b9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadProjectionConfigs.java @@ -0,0 +1,128 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.SeekableByteChannel; + +/** + * Factory class to select {@link ReadProjectionConfig}s. + * + *

There are multiple projections which can be used to access the content of a {@link BlobInfo} + * in Google Cloud Storage. + * + * @see Storage#blobReadSession(BlobId, BlobSourceOption...) + * @see BlobReadSession + * @see ReadProjectionConfig + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +public final class ReadProjectionConfigs { + + private ReadProjectionConfigs() {} + + abstract static class BaseConfig> + extends ReadProjectionConfig { + + BaseConfig() {} + + abstract Read newRead(long readId, RetryContext retryContext); + + @Override + ProjectionType getType() { + return ProjectionType.STREAM_READ; + } + } + + /** + * Read a range as a non-blocking {@link ScatteringByteChannel}. + * + *

The returned channel will be non-blocking for all read calls. If bytes have not yet + * asynchronously been delivered from Google Cloud Storage the method will return rather than + * waiting for the bytes to arrive. + * + *

The resulting {@link ScatteringByteChannel} MUST be {@link ScatteringByteChannel#close() + * close()}ed to avoid leaking memory + * + * @see ReadAsChannel + * @see ScatteringByteChannel + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ReadAsChannel asChannel() { + return ReadAsChannel.INSTANCE; + } + + /** + * Read a range of {@code byte}s as an {@link ApiFuture}{@code } + * + *

The entire range will be accumulated in memory before the future will resolve. + * + *

If you do not want the entire range accumulated in memory, please use one of the other + * {@link ReadProjectionConfig}s available. + * + * @see ApiFuture + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ReadAsFutureBytes asFutureBytes() { + return ReadAsFutureBytes.INSTANCE; + } + + /** + * Read a range of {@code byte}s as an {@link ApiFuture}{@code <}{@link + * DisposableByteString}{@code >} + * + *

The resulting {@link DisposableByteString} MUST be {@link DisposableByteString#close() + * close()}ed to avoid leaking memory + * + *

The entire range will be accumulated in memory before the future will resolve. + * + *

If you do not want the entire range accumulated in memory, please use one of the other + * {@link ReadProjectionConfig}s available. + * + * @see ApiFuture + * @see com.google.protobuf.ByteString + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ReadAsFutureByteString asFutureByteString() { + return ReadAsFutureByteString.INSTANCE; + } + + /** + * Read from the object as a {@link SeekableByteChannel} + * + *

The returned channel will be non-blocking for all read calls. If bytes have not yet + * asynchronously been delivered from Google Cloud Storage the method will return rather than + * waiting for the bytes to arrive. + * + *

The resulting {@link SeekableByteChannel} MUST be {@link SeekableByteChannel#close() + * close()}ed to avoid leaking memory + * + * @see SeekableByteChannel + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public static ReadAsSeekableChannel asSeekableChannel() { + return ReadAsSeekableChannel.INSTANCE; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadableByteChannelSession.java new file mode 100644 index 000000000000..21899d867a2c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ReadableByteChannelSession.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.rpc.ApiExceptions; +import java.nio.channels.ReadableByteChannel; + +interface ReadableByteChannelSession { + + default RBC open() { + return ApiExceptions.callAndTranslateApiException(openAsync()); + } + + ApiFuture openAsync(); + + ApiFuture getResult(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFile.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFile.java new file mode 100644 index 000000000000..75884657f797 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFile.java @@ -0,0 +1,126 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Set; + +/** + * When uploading to GCS, there are times when memory buffers are not preferable. This class + * encapsulates the logic and lifecycle for a file written to local disk which can be used for + * upload recovery in the case an upload is interrupted. + */ +final class RecoveryFile implements AutoCloseable { + private static final Set writeOpsNew = + ImmutableSet.of( + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING); + private static final Set writeOpsExisting = + ImmutableSet.of( + StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.APPEND); + private static final Set readOps = ImmutableSet.of(StandardOpenOption.READ); + + private final Path path; + private final ThroughputSink throughputSink; + private final Runnable onCloseCallback; + + private boolean newFile; + + RecoveryFile(Path path, ThroughputSink throughputSink, Runnable onCloseCallback) { + this.path = path; + this.throughputSink = throughputSink; + this.onCloseCallback = onCloseCallback; + this.newFile = true; + } + + public Path getPath() { + return path; + } + + public Path touch() throws IOException { + newFile = false; + return Files.createFile(path); + } + + public SeekableByteChannel reader() throws IOException { + return Files.newByteChannel(path, readOps); + } + + public WritableByteChannel writer() throws IOException { + try { + return throughputSink.decorate( + Files.newByteChannel(path, newFile ? writeOpsNew : writeOpsExisting)); + } finally { + newFile = false; + } + } + + public GatheringByteChannel syncingChannel() throws IOException { + try { + return throughputSink.decorate( + new SyncingFileChannel(FileChannel.open(path, newFile ? writeOpsNew : writeOpsExisting))); + } finally { + newFile = false; + } + } + + @Override + public void close() throws IOException { + Files.deleteIfExists(path); + onCloseCallback.run(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("path", path) + .add("throughputSink", throughputSink) + .add("onCloseCallback", onCloseCallback) + .toString(); + } + + Unsafe unsafe() { + return new Unsafe(); + } + + final class Unsafe { + public Path touch() throws UnsafeIOException { + try { + return RecoveryFile.this.touch(); + } catch (IOException e) { + throw new UnsafeIOException(e); + } + } + } + + static final class UnsafeIOException extends RuntimeException { + private UnsafeIOException(IOException cause) { + super(cause); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFileManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFileManager.java new file mode 100644 index 000000000000..51b17128b1ed --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RecoveryFileManager.java @@ -0,0 +1,119 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.common.collect.ImmutableList; +import com.google.common.hash.HashCode; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +final class RecoveryFileManager { + + private final ImmutableList volumes; + + /** Keep track of active info and file */ + private final Map files; + + private final HashFunction hashFunction; + + /** + * Round-robin assign recovery files to the configured volumes. Use this index to keep track of + * which volume to assign to next. + */ + private int nextVolumeIndex; + + private RecoveryFileManager(List volumes) { + this.volumes = ImmutableList.copyOf(volumes); + this.files = Collections.synchronizedMap(new HashMap<>()); + this.nextVolumeIndex = 0; + this.hashFunction = Hashing.goodFastHash(64); + } + + @SuppressWarnings("UnstableApiUsage") + public RecoveryFile newRecoveryFile(BlobInfo info) { + int i = getNextVolumeIndex(); + RecoveryVolume v = volumes.get(i); + UUID uuid = UUID.randomUUID(); + String string = uuid.toString(); + Hasher hasher = hashFunction.newHasher(); + HashCode hash = hasher.putString(string, StandardCharsets.UTF_8).hash(); + String fileName = Base64.getUrlEncoder().encodeToString(hash.asBytes()); + Path path = v.basePath.resolve(fileName); + RecoveryFile recoveryFile = new RecoveryFile(path, v.sink, () -> files.remove(info)); + files.put(info, recoveryFile); + return recoveryFile; + } + + private synchronized int getNextVolumeIndex() { + return nextVolumeIndex = (nextVolumeIndex + 1) % volumes.size(); + } + + static RecoveryFileManager of(List volumes) throws IOException { + return of(volumes, p -> ThroughputSink.nullSink()); + } + + static RecoveryFileManager of(List volumes, RecoveryVolumeSinkFactory factory) + throws IOException { + checkArgument(!volumes.isEmpty(), "At least one volume must be specified"); + checkArgument( + volumes.stream().allMatch(p -> !Files.exists(p) || Files.isDirectory(p)), + "All provided volumes must either:\n1. Not yet exists\n2. Be directories"); + + for (Path v : volumes) { + if (!Files.exists(v)) { + Files.createDirectories(v); + } + } + ImmutableList recoveryVolumes = + volumes.stream() + .map(p -> RecoveryVolume.of(p, factory.apply(p))) + .collect(ImmutableList.toImmutableList()); + return new RecoveryFileManager(recoveryVolumes); + } + + @FunctionalInterface + interface RecoveryVolumeSinkFactory { + ThroughputSink apply(Path p); + } + + static final class RecoveryVolume { + private final Path basePath; + private final ThroughputSink sink; + + private RecoveryVolume(Path basePath, ThroughputSink sink) { + this.basePath = basePath; + this.sink = sink; + } + + public static RecoveryVolume of(Path basePath, ThroughputSink sink) { + return new RecoveryVolume(basePath, sink); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RequestBody.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RequestBody.java new file mode 100644 index 000000000000..6dfbff2eccdd --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RequestBody.java @@ -0,0 +1,64 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalExtensionOnly; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; + +/** + * The data of a single {@code UploadPart} in a GCS XML MPU. + * + *

Instances of this class are thread-safe and immutable. + * + * @see https://cloud.google.com/storage/docs/multipart-uploads#upload_parts + */ +@InternalExtensionOnly +public final class RequestBody { + + private final RewindableContent content; + + private RequestBody(RewindableContent content) { + this.content = content; + } + + RewindableContent getContent() { + return content; + } + + /** Create a new empty RequestBody. */ + public static RequestBody empty() { + return new RequestBody(RewindableContent.empty()); + } + + /** Create a new RequestBody from the given {@link ByteBuffer}s. */ + public static RequestBody of(ByteBuffer... buffers) { + return new RequestBody(RewindableContent.of(buffers)); + } + + /** Create a new RequestBody from the given {@link ByteBuffer}s. */ + public static RequestBody of(ByteBuffer[] srcs, int srcsOffset, int srcsLength) { + return new RequestBody(RewindableContent.of(srcs, srcsOffset, srcsLength)); + } + + /** Create a new RequestBody from the given {@link Path}. */ + public static RequestBody of(Path path) throws IOException { + return new RequestBody(RewindableContent.of(path)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleHandle.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleHandle.java new file mode 100644 index 000000000000..8b466f9b7a72 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleHandle.java @@ -0,0 +1,93 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class ResponseContentLifecycleHandle implements Closeable { + + private final Response response; + @Nullable private final Closeable dispose; + + private final AtomicBoolean open; + private final AtomicInteger refs; + + private ResponseContentLifecycleHandle(Response response, @Nullable Closeable dispose) { + this.response = response; + this.dispose = dispose; + this.open = new AtomicBoolean(true); + this.refs = new AtomicInteger(1); + } + + static ResponseContentLifecycleHandle create( + Response response, @Nullable Closeable dispose) { + return new ResponseContentLifecycleHandle<>(response, dispose); + } + + ChildRef borrow(Function toByteStringFunction) { + Preconditions.checkState(open.get(), "only able to borrow when open"); + Preconditions.checkNotNull(toByteStringFunction); + ChildRef childRef = new ChildRef(toByteStringFunction); + refs.incrementAndGet(); + return childRef; + } + + @Override + public void close() throws IOException { + if (open.getAndSet(false)) { + int newCount = refs.decrementAndGet(); + if (newCount == 0) { + dispose(); + } + } + } + + private void dispose() throws IOException { + if (dispose != null) { + dispose.close(); + } + } + + final class ChildRef implements Closeable, DisposableByteString { + + private final Function toByteStringFunction; + + private ChildRef(Function toByteStringFunction) { + this.toByteStringFunction = toByteStringFunction; + } + + @Override + public ByteString byteString() { + return toByteStringFunction.apply(response); + } + + @Override + public void close() throws IOException { + int newCount = refs.decrementAndGet(); + if (newCount == 0) { + ResponseContentLifecycleHandle.this.dispose(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleManager.java new file mode 100644 index 000000000000..c18d6327a8d7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResponseContentLifecycleManager.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.ReadObjectResponse; +import java.io.Closeable; +import java.io.IOException; + +interface ResponseContentLifecycleManager extends Closeable { + ResponseContentLifecycleHandle get(Response response); + + @Override + default void close() throws IOException {} + + static ResponseContentLifecycleManager noop() { + return response -> + ResponseContentLifecycleHandle.create( + response, + () -> { + // no-op + }); + } + + static ResponseContentLifecycleManager noopBidiReadObjectResponse() { + return response -> + ResponseContentLifecycleHandle.create( + response, + () -> { + // no-op + }); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java new file mode 100644 index 000000000000..4d1a53bba25a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java @@ -0,0 +1,107 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.spi.v1.StorageRpc; +import java.net.URL; +import java.util.Map; +import java.util.function.Supplier; + +final class ResumableMedia { + + static Supplier startUploadForBlobInfo( + final HttpStorageOptions storageOptions, + final BlobInfo blob, + final Map optionsMap, + final RetrierWithAlg retrier) { + return () -> + retrier.run( + () -> + storageOptions + .getStorageRpcV1() + .open(Conversions.json().blobInfo().encode(blob), optionsMap), + Decoder.identity()); + } + + static Supplier startUploadForSignedUrl( + final HttpStorageOptions storageOptions, final URL signedURL, final RetrierWithAlg retrier) { + if (!isValidSignedURL(signedURL.getQuery())) { + throw new StorageException(2, "invalid signedURL"); + } + return () -> + retrier.run( + () -> storageOptions.getStorageRpcV1().open(signedURL.toString()), Decoder.identity()); + } + + static GapicMediaSession gapic() { + return GapicMediaSession.INSTANCE; + } + + static HttpMediaSession http() { + return HttpMediaSession.INSTANCE; + } + + private static boolean isValidSignedURL(String signedURLQuery) { + boolean isValid = true; + if (signedURLQuery.startsWith("X-Goog-Algorithm=")) { + if (!signedURLQuery.contains("&X-Goog-Credential=") + || !signedURLQuery.contains("&X-Goog-Date=") + || !signedURLQuery.contains("&X-Goog-Expires=") + || !signedURLQuery.contains("&X-Goog-SignedHeaders=") + || !signedURLQuery.contains("&X-Goog-Signature=")) { + isValid = false; + } + } else if (signedURLQuery.startsWith("GoogleAccessId=")) { + if (!signedURLQuery.contains("&Expires=") || !signedURLQuery.contains("&Signature=")) { + isValid = false; + } + } else { + isValid = false; + } + return isValid; + } + + static final class GapicMediaSession { + private static final GapicMediaSession INSTANCE = new GapicMediaSession(); + + private GapicMediaSession() {} + + GapicUploadSessionBuilder write() { + return GapicUploadSessionBuilder.create(); + } + + GapicDownloadSessionBuilder read() { + return GapicDownloadSessionBuilder.create(); + } + } + + static final class HttpMediaSession { + private static final HttpMediaSession INSTANCE = new HttpMediaSession(); + + private HttpMediaSession() {} + + HttpUploadSessionBuilder write() { + return HttpUploadSessionBuilder.create(); + } + + HttpDownloadSessionBuilder read() { + return HttpDownloadSessionBuilder.create(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableOperationResult.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableOperationResult.java new file mode 100644 index 000000000000..88b5b7565eaa --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableOperationResult.java @@ -0,0 +1,90 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import org.checkerframework.checker.nullness.qual.Nullable; + +abstract class ResumableOperationResult<@Nullable T> { + + private ResumableOperationResult() {} + + abstract @Nullable T getObject(); + + abstract long getPersistedSize(); + + static ResumableOperationResult complete(T t, long persistedSize) { + return new CompletedResult<>(t, persistedSize); + } + + static <@Nullable T> ResumableOperationResult incremental(long persistedSize) { + return new IncrementalResult<>(persistedSize); + } + + private static final class CompletedResult extends ResumableOperationResult { + + private final long persistedSize; + private final T entity; + + private CompletedResult(T entity, long persistedSize) { + this.entity = entity; + this.persistedSize = persistedSize; + } + + @Override + public @Nullable T getObject() { + return entity; + } + + @Override + public long getPersistedSize() { + return persistedSize; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("persistedSize", persistedSize) + .add("entity", entity) + .toString(); + } + } + + private static final class IncrementalResult<@Nullable T> extends ResumableOperationResult { + + private final long persistedSize; + + private IncrementalResult(long persistedSize) { + this.persistedSize = persistedSize; + } + + @Override + public @Nullable T getObject() { + return null; + } + + @Override + public long getPersistedSize() { + return persistedSize; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("persistedSize", persistedSize).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSession.java new file mode 100644 index 000000000000..e2829d313b4b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSession.java @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; + +final class ResumableSession { + + private ResumableSession() {} + + static JsonResumableSession json( + HttpClientContext context, RetrierWithAlg retrier, JsonResumableWrite resumableWrite) { + return new JsonResumableSession(context, retrier, resumableWrite); + } + + static GrpcResumableSession grpc( + RetrierWithAlg retrier, + ClientStreamingCallable writeCallable, + UnaryCallable queryWriteStatusCallable, + ResumableWrite resumableWrite, + Hasher hasher) { + return new GrpcResumableSession( + retrier, writeCallable, queryWriteStatusCallable, resumableWrite, hasher); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableWrite.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableWrite.java new file mode 100644 index 000000000000..b7bee9854bc7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableWrite.java @@ -0,0 +1,87 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; + +import com.google.cloud.storage.WriteCtx.WriteObjectRequestBuilderFactory; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.WriteObjectRequest; +import java.util.Objects; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class ResumableWrite implements WriteObjectRequestBuilderFactory { + + private final StartResumableWriteRequest req; + private final StartResumableWriteResponse res; + + private final WriteObjectRequest writeRequest; + + public ResumableWrite( + StartResumableWriteRequest req, + StartResumableWriteResponse res, + Function f) { + this.req = req; + this.res = res; + this.writeRequest = f.apply(res.getUploadId()); + } + + public StartResumableWriteRequest getReq() { + return req; + } + + public StartResumableWriteResponse getRes() { + return res; + } + + @Override + public WriteObjectRequest.Builder newBuilder() { + return writeRequest.toBuilder(); + } + + @Override + public @Nullable String bucketName() { + if (req.hasWriteObjectSpec() && req.getWriteObjectSpec().hasResource()) { + return req.getWriteObjectSpec().getResource().getBucket(); + } + return null; + } + + @Override + public String toString() { + return "ResumableWrite{" + "req=" + fmtProto(req) + ", res=" + fmtProto(res) + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ResumableWrite)) { + return false; + } + ResumableWrite resumableWrite = (ResumableWrite) o; + return Objects.equals(req, resumableWrite.req) && Objects.equals(res, resumableWrite.res); + } + + @Override + public int hashCode() { + return Objects.hash(req, res); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java new file mode 100644 index 000000000000..caf814416858 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java @@ -0,0 +1,314 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.client.util.Sleeper; +import com.google.api.core.ApiClock; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.storage.Backoff.BackoffResult; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.common.primitives.Longs; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.Delayed; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.checkerframework.checker.nullness.qual.NonNull; + +@InternalApi +@InternalExtensionOnly +interface RetryContext { + + boolean inBackoff(); + + void reset(); + + void recordError(T t, OnSuccess onSuccess, OnFailure onFailure); + + static RetryContext of( + ScheduledExecutorService scheduledExecutorService, + RetryingDependencies retryingDependencies, + ResultRetryAlgorithm algorithm, + Jitterer jitterer) { + return new DefaultRetryContext( + scheduledExecutorService, retryingDependencies, algorithm, jitterer); + } + + static RetryContext neverRetry() { + return new DefaultRetryContext( + directScheduledExecutorService(), + RetryingDependencies.attemptOnce(), + Retrying.neverRetry(), + Jitterer.threadLocalRandom()); + } + + static RetryContextProvider providerFrom( + ScheduledExecutorService scheduledExecutorService, + RetryingDependencies deps, + ResultRetryAlgorithm alg) { + return () -> of(scheduledExecutorService, deps, alg, Jitterer.threadLocalRandom()); + } + + static ScheduledExecutorService directScheduledExecutorService() { + return DirectScheduledExecutorService.INSTANCE; + } + + @FunctionalInterface + interface RetryContextProvider { + RetryContext create(); + } + + @FunctionalInterface + interface OnSuccess { + void onSuccess(); + } + + @FunctionalInterface + interface OnFailure { + void onFailure(T t); + } + + /** + * Define a custom exception which can carry a comment about the budget exhaustion, so we can + * include it as a suppressed exception, but don't fill in any stack frames. This is a throwable + * only because it is the only way we can include it into an exception that will by default print + * with the exception stacktrace. + * + * @see Throwable#addSuppressed(Throwable) + */ + final class RetryBudgetExhaustedComment extends Throwable { + RetryBudgetExhaustedComment(String comment) { + super( + comment, + /* cause= */ null, + /* enableSuppression= */ true, + /* writableStackTrace= */ false); + } + } + + final class BackoffComment extends Throwable { + private BackoffComment(String message) { + super( + message, + /* cause= */ null, + /* enableSuppression= */ true, + /* writableStackTrace= */ false); + } + + static BackoffComment fromResult(BackoffResult result) { + return new BackoffComment( + String.format("backing off %s before next attempt", result.errorString())); + } + + static BackoffComment of(String message) { + return new BackoffComment(message); + } + } + + final class InterruptedBackoffComment extends Throwable { + InterruptedBackoffComment(@NonNull String message) { + super( + requireNonNull(message, "message must be non null"), + /* cause= */ null, + /* enableSuppression= */ true, + /* writableStackTrace= */ false); + } + } + + final class DirectScheduledExecutorService implements ScheduledExecutorService { + private static final DirectScheduledExecutorService INSTANCE = + new DirectScheduledExecutorService(Sleeper.DEFAULT, NanoClock.getDefaultClock()); + + private static final Comparator COMP = + Comparator.comparingLong(delay -> delay.getDelay(TimeUnit.NANOSECONDS)); + private final Sleeper sleeper; + private final ApiClock apiClock; + + private DirectScheduledExecutorService(Sleeper sleeper, ApiClock apiClock) { + this.sleeper = sleeper; + this.apiClock = apiClock; + } + + @Override + public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + return new DirectScheduledFuture(unit, delay, command); + } + + // + @Override + public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public ScheduledFuture scheduleAtFixedRate( + Runnable command, long initialDelay, long period, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public ScheduledFuture scheduleWithFixedDelay( + Runnable command, long initialDelay, long delay, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public void shutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public List shutdownNow() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isShutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isTerminated() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Callable task) { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Runnable task, T result) { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Runnable task) { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll(Collection> tasks) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll( + Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks) + throws InterruptedException, ExecutionException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + throw new UnsupportedOperationException(); + } + + @Override + public void execute(Runnable command) { + command.run(); + } + + // + + private final class DirectScheduledFuture implements ScheduledFuture { + + private final long origDelayNs; + private final long beginNs; + private final ApiFuture delegate; + + public DirectScheduledFuture(TimeUnit unit, long delay, Runnable command) { + origDelayNs = unit.toNanos(delay); + beginNs = apiClock.nanoTime(); + delegate = + ApiFutures.transformAsync( + ApiFutures.immediateFuture(null), + ignore -> { + sleeper.sleep(unit.toMillis(delay)); + command.run(); + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor()); + } + + @Override + public long getDelay(TimeUnit unit) { + long nowNs = apiClock.nanoTime(); + return Longs.max(0L, (nowNs - beginNs) - origDelayNs); + } + + @Override + public int compareTo(Delayed o) { + return COMP.compare(this, o); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return delegate.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return delegate.isCancelled(); + } + + @Override + public boolean isDone() { + return delegate.isDone(); + } + + @Override + public Object get() throws InterruptedException, ExecutionException { + return delegate.get(); + } + + @Override + public Object get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return delegate.get(timeout, unit); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Retrying.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Retrying.java new file mode 100644 index 000000000000..d2f46d959e4c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Retrying.java @@ -0,0 +1,286 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiClock; +import com.google.api.core.NanoClock; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.spi.v1.HttpRpcContext; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class Retrying { + + /** + * A simple interface to abstract the lifecycle of running an operation, and conditionally + * retrying if an error is encountered. + */ + interface Retrier { + /** + * A convenience method to invoke a callable, and possibly retry it again if and exception is + * thrown. If the result of {@code c} is non-null, Decoder {@code d} will be applied to the + * result before returning. + * + * @param The result type of {@code c} + * @param The result type of any mapping that takes place via {@code d} + * @param alg The {@link ResultRetryAlgorithm} to use when determining if a retry is possible + * @param c The {@link Callable} which will be passed to runWithRetries producing some {@code + * T}, can optionally return null + * @param d A post process mapping {@link Function} which can be used to transform the result + * from {@code c} if it is successful and non-null + * @return A {@code U} (possibly null) after applying {@code f} to the result of {@code c} + * @throws StorageException if {@code c} fails due to any retry exhaustion + */ + Model run( + ResultRetryAlgorithm alg, Callable c, Decoder d); + + /** + * Produce a new {@link RetrierWithAlg} where the provided ResultRetryAlgorithm is bound for the + * life of the produced instance. + */ + default RetrierWithAlg withAlg(ResultRetryAlgorithm alg) { + return new RetrierWithAlgImpl(this, alg); + } + + static Retrier attemptOnce() { + return AttemptOnceRetrier.INSTANCE; + } + } + + /** + * A specialization of {@link Retrier} where the {@link ResultRetryAlgorithm} is bound to the + * instance of this interface, and need to be supplied to the {@link #run(Callable, Decoder)} + * method. + */ + interface RetrierWithAlg extends Retrier { + + /** + * A convenience method to invoke a callable, and possibly retry it again if and exception is + * thrown. If the result of {@code c} is non-null, Decoder {@code d} will be applied to the + * result before returning. + * + * @param The result type of {@code c} + * @param The result type of any mapping that takes place via {@code d} + * @param c The {@link Callable} which will be passed to runWithRetries producing some {@code + * T}, can optionally return null + * @param d A post process mapping {@link Function} which can be used to transform the result + * from {@code c} if it is successful and non-null + * @return A {@code U} (possibly null) after applying {@code f} to the result of {@code c} + * @throws StorageException if {@code c} fails due to any retry exhaustion + */ + Model run(Callable c, Decoder d); + + static RetrierWithAlg attemptOnce() { + return AttemptOnceRetrier.INSTANCE_WITH_ALG; + } + } + + static final class AttemptOnceRetrier implements Retrier { + private static final AttemptOnceRetrier INSTANCE = new AttemptOnceRetrier(); + private static final RetrierWithAlg INSTANCE_WITH_ALG = INSTANCE.withAlg(neverRetry()); + + @Override + public Model run( + ResultRetryAlgorithm alg, Callable c, Decoder d) { + try { + Response call = c.call(); + return call == null ? null : d.decode(call); + } catch (Exception iae) { + throw StorageException.coalesce(iae); + } + } + } + + static final class RetrierWithAlgImpl implements RetrierWithAlg { + private final Retrier retrier; + private final ResultRetryAlgorithm alg; + + private RetrierWithAlgImpl(Retrier retrier, ResultRetryAlgorithm alg) { + this.retrier = retrier; + this.alg = alg; + } + + @Override + public Model run( + ResultRetryAlgorithm alg, Callable c, Decoder d) { + return retrier.run(alg, c, d); + } + + @Override + public Model run(Callable c, Decoder d) { + return retrier.run(alg, c, d); + } + } + + static final class DefaultRetrier implements Retrier { + private final UnaryOperator decorator; + private final RetryingDependencies deps; + + DefaultRetrier(UnaryOperator decorator, RetryingDependencies deps) { + this.decorator = decorator; + this.deps = deps; + } + + @Override + public Model run( + ResultRetryAlgorithm alg, Callable c, Decoder d) { + RetryContext ctx = + decorator.apply( + RetryContext.of( + RetryContext.directScheduledExecutorService(), + deps, + alg, + Jitterer.threadLocalRandom())); + AtomicReference failure = new AtomicReference<>(); + AtomicBoolean attemptAgain = new AtomicBoolean(false); + do { + attemptAgain.set(false); + try { + Response result = c.call(); + return result == null ? null : d.decode(result); + } catch (StorageException se) { + // we hope for this case + ctx.recordError(se, () -> attemptAgain.set(true), failure::set); + } catch (IllegalArgumentException iae) { + // IllegalArgumentException can happen if there is no json in the body and we try + // to parse it Our retry algorithms have special case for this, so in an effort to + // keep compatibility with those existing behaviors, explicitly rethrow an + // IllegalArgumentException that may have happened + ctx.recordError(iae, () -> attemptAgain.set(true), failure::set); + } catch (Exception e) { + // Wire in this fall through just in case. + // all of our retry algorithms are centered around StorageException so this helps + // those be more effective + ctx.recordError(StorageException.coalesce(e), () -> attemptAgain.set(true), failure::set); + } + } while (attemptAgain.get()); + + Exception throwable = failure.get(); + if (throwable instanceof StorageException) { + throw (StorageException) throwable; + } else { + throw StorageException.coalesce(throwable); + } + } + } + + static final class HttpRetrier implements Retrier { + private final Retrier delegate; + + HttpRetrier(Retrier delegate) { + this.delegate = delegate; + } + + @Override + public Model run( + ResultRetryAlgorithm alg, Callable c, Decoder d) { + HttpRpcContext httpRpcContext = HttpRpcContext.getInstance(); + try { + httpRpcContext.newInvocationId(); + return delegate.run(alg, c, d); + } finally { + httpRpcContext.clearInvocationId(); + } + } + } + + @NonNull + static GrpcCallContext newCallContext() { + return GrpcCallContext.createDefault() + .withExtraHeaders( + ImmutableMap.of( + "x-goog-gcs-idempotency-token", ImmutableList.of(UUID.randomUUID().toString()))); + } + + static ResultRetryAlgorithm neverRetry() { + return new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return false; + } + }; + } + + static ResultRetryAlgorithm alwaysRetry() { + return new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return true; + } + }; + } + + /** + * Rather than requiring a full set of {@link StorageOptions} to be passed specify what we + * actually need and have StorageOptions implement this interface. + */ + interface RetryingDependencies { + + RetrySettings getRetrySettings(); + + ApiClock getClock(); + + static RetryingDependencies attemptOnce() { + return RetryingDependencies.simple( + NanoClock.getDefaultClock(), RetrySettings.newBuilder().setMaxAttempts(1).build()); + } + + static RetryingDependencies simple(ApiClock clock, RetrySettings retrySettings) { + return new SimpleRetryingDependencies(clock, retrySettings); + } + } + + private static final class SimpleRetryingDependencies implements RetryingDependencies { + private final ApiClock clock; + private final RetrySettings retrySettings; + + private SimpleRetryingDependencies(ApiClock clock, RetrySettings retrySettings) { + this.retrySettings = retrySettings; + this.clock = clock; + } + + @Override + public ApiClock getClock() { + return clock; + } + + @Override + public RetrySettings getRetrySettings() { + return retrySettings; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("clock", clock) + .add("retrySettings", retrySettings) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java new file mode 100644 index 000000000000..e26d0cf558cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java @@ -0,0 +1,324 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.http.AbstractHttpContent; +import com.google.api.client.http.HttpMediaType; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Hasher.GuavaHasher; +import com.google.cloud.storage.Hasher.NoOpHasher; +import com.google.common.base.Preconditions; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Locale; +import org.checkerframework.checker.nullness.qual.Nullable; + +abstract class RewindableContent extends AbstractHttpContent { + + private RewindableContent() { + super((HttpMediaType) null); + } + + @Override + public abstract long getLength(); + + abstract void rewindTo(long offset); + + abstract long writeTo(WritableByteChannel gbc) throws IOException; + + abstract long writeTo(GatheringByteChannel gbc) throws IOException; + + abstract void flagDirty(); + + @Override + public final boolean retrySupported() { + return false; + } + + @Nullable + abstract Crc32cLengthKnown getCrc32c(); + + static RewindableContent empty() { + return EmptyRewindableContent.INSTANCE; + } + + static RewindableContent of(ByteBuffer... buffers) { + return new ByteBufferContent(buffers); + } + + public static RewindableContent of(ByteBuffer[] srcs, int srcsOffset, int srcsLength) { + Preconditions.checkNotNull(srcs, "srcs must be non null"); + if (!(0 <= srcsOffset && srcsOffset <= srcs.length)) { + throw new ArrayIndexOutOfBoundsException( + String.format( + Locale.US, + "srcsOffset out of bounds (0 <= %d && %d <= %d)", + srcsOffset, + srcsOffset, + srcs.length)); + } + Preconditions.checkArgument(srcsLength >= 0, "srcsLength >= 0 (%d >= 0)", srcsLength); + int end = srcsOffset + srcsLength; + return new ByteBufferContent(Arrays.copyOfRange(srcs, srcsOffset, end)); + } + + static RewindableContent of(Path path) throws IOException { + return new PathRewindableContent(path); + } + + private static final class EmptyRewindableContent extends RewindableContent { + private static final EmptyRewindableContent INSTANCE = new EmptyRewindableContent(); + + @Override + public long getLength() { + return 0L; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.flush(); + } + + @Override + long writeTo(WritableByteChannel gbc) { + return 0; + } + + @Override + long writeTo(GatheringByteChannel gbc) { + return 0; + } + + @Override + protected void rewindTo(long offset) {} + + @Override + void flagDirty() {} + + @Override + @Nullable Crc32cLengthKnown getCrc32c() { + return Hasher.defaultHasher().initialValue(); + } + } + + private static final class PathRewindableContent extends RewindableContent { + + private final Path path; + private final long size; + + private long readOffset; + + private PathRewindableContent(Path path) throws IOException { + this.path = path; + this.size = Files.size(path); + this.readOffset = 0; + } + + @Override + public long getLength() { + return size - readOffset; + } + + @Override + void rewindTo(long offset) { + Preconditions.checkArgument( + offset < size, "provided offset must be less than size (%d < %d)", offset, size); + this.readOffset = offset; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + try (SeekableByteChannel in = Files.newByteChannel(path, StandardOpenOption.READ)) { + in.position(readOffset); + ByteStreams.copy(in, Channels.newChannel(out)); + out.flush(); + } + } + + @Override + long writeTo(WritableByteChannel gbc) throws IOException { + try (SeekableByteChannel in = Files.newByteChannel(path, StandardOpenOption.READ)) { + in.position(readOffset); + return ByteStreams.copy(in, gbc); + } + } + + @Override + long writeTo(GatheringByteChannel gbc) throws IOException { + try (SeekableByteChannel in = Files.newByteChannel(path, StandardOpenOption.READ)) { + in.position(readOffset); + return ByteStreams.copy(in, gbc); + } + } + + @Override + void flagDirty() {} + + @Override + @Nullable Crc32cLengthKnown getCrc32c() { + GuavaHasher hasher; + { + Hasher defaultHasher = Hasher.defaultHasher(); + if (defaultHasher instanceof NoOpHasher) { + return null; + } else { + hasher = Hasher.enabled(); + } + } + Crc32cLengthKnown cumulative = Crc32cValue.zero(); + + int bufferSize = 8192; // 8KiB buffer for reading chunks + ByteBuffer buffer = ByteBuffer.allocate(bufferSize); + + try (SeekableByteChannel channel = Files.newByteChannel(path, StandardOpenOption.READ)) { + while (channel.read(buffer) != -1) { + buffer.flip(); + if (buffer.hasRemaining()) { + cumulative = cumulative.concat(hasher.hash(buffer::duplicate)); + } + buffer.clear(); + } + } catch (IOException e) { + throw new RuntimeException("Failed to read file for CRC32C calculation: " + path, e); + } + return cumulative; + } + } + + private static final class ByteBufferContent extends RewindableContent { + + private final ByteBuffer[] buffers; + // keep an array of the positions in case we need to rewind them for retries + // doing this is simpler than duplicating the buffers and using marks, as we don't need to + // advance the position of the original buffers upon success. + // We generally expect success, and in this case are planning in case of failure. + private final int[] positions; + private final long totalLength; + // track whether we have changed any state + private boolean dirty; + + private long offset; + + private ByteBufferContent(ByteBuffer[] buffers) { + this.buffers = buffers; + this.positions = Arrays.stream(buffers).mapToInt(Buffers::position).toArray(); + this.totalLength = Buffers.totalRemaining(buffers, 0, buffers.length); + this.dirty = false; + } + + @Override + public long getLength() { + return totalLength - offset; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + dirty = true; + WritableByteChannel c = Channels.newChannel(out); + for (ByteBuffer buffer : buffers) { + c.write(buffer); + } + out.flush(); + } + + @Override + long writeTo(WritableByteChannel gbc) throws IOException { + dirty = true; + int retVal = 0; + for (ByteBuffer buffer : buffers) { + retVal += gbc.write(buffer); + } + return retVal; + } + + @Override + long writeTo(GatheringByteChannel gbc) throws IOException { + dirty = true; + return gbc.write(buffers); + } + + @Override + void rewindTo(long offset) { + Preconditions.checkArgument( + offset <= totalLength, + "provided offset must be less than or equal to totalLength (%s <= %s)", + offset, + totalLength); + if (dirty || offset != this.offset) { + // starting from the end of our data, walk back the buffers updating their position + // to coincide with the rewind of the overall content + int idx = buffers.length - 1; + for (long currentOffset = totalLength; currentOffset > 0; ) { + int position = positions[idx]; + ByteBuffer buf = buffers[idx]; + + int origRemaining = buf.limit() - position; + + long begin = currentOffset - origRemaining; + + if (begin <= offset && offset < currentOffset) { + long diff = offset - begin; + Buffers.position(buf, position + Math.toIntExact(diff)); + } else if (offset >= currentOffset) { + // the desired offset is after this buf + // ensure it does not have any available + Buffers.position(buf, buf.limit()); + } else { + Buffers.position(buf, position); + } + + currentOffset = begin; + idx -= 1; + } + } + this.offset = offset; + } + + @Override + void flagDirty() { + this.dirty = true; + } + + @Override + @Nullable Crc32cLengthKnown getCrc32c() { + GuavaHasher hasher; + { + Hasher defaultHasher = Hasher.defaultHasher(); + if (defaultHasher instanceof NoOpHasher) { + return null; + } else { + hasher = Hasher.enabled(); + } + } + Crc32cLengthKnown cumulative = Crc32cValue.zero(); + for (ByteBuffer buffer : buffers) { + cumulative = cumulative.concat(hasher.hash(buffer::duplicate)); + } + return cumulative; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContentInputStream.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContentInputStream.java new file mode 100644 index 000000000000..0d2ff144b967 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContentInputStream.java @@ -0,0 +1,91 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; + +/** + * Facade which makes an instance of {@link RewindableContent} appear as an input stream. + * + *

It does this by calling {@link RewindableContent#writeTo(GatheringByteChannel)} on an + * anonymous channel which closes over the read destination. + */ +final class RewindableContentInputStream extends InputStream { + + private final RewindableContent content; + + RewindableContentInputStream(RewindableContent content) { + this.content = content; + } + + @Override + public int read() throws IOException { + byte[] tmp = new byte[1]; + int read = read(tmp); + if (read == -1) { + return -1; + } else { + return tmp[0] & 0xFF; + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + // define a byte buffer as the destination for our write + ByteBuffer dst = ByteBuffer.wrap(b, off, len); + int remaining = dst.remaining(); + if (remaining == 0) { + return 0; + } + long written = + content.writeTo( + new AnonWritableByteChannel() { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + // srcs here is the bytes of content + long total = 0; + for (int i = offset; i < length; i++) { + ByteBuffer src = srcs[i]; + // copy what we can from our src to the dst buffer + long written = Buffers.copy(src, dst); + total += written; + } + return total; + } + }); + // if the dst has space, but we didn't write anything means we didn't have anything to write + if (written == 0) { + return -1; + } + return Math.toIntExact(written); + } + + private abstract static class AnonWritableByteChannel implements UnbufferedWritableByteChannel { + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Rpo.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Rpo.java new file mode 100644 index 000000000000..8fbfbf4b3c91 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Rpo.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.api.core.ApiFunction; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; + +/** + * Enums for the Recovery Point Objective (RPO) of dual-region buckets, which determines how fast + * data is replicated between regions. + * + * @see https://cloud.google.com/storage/docs/turbo-replication + */ +public final class Rpo extends StringEnumValue { + + private static final long serialVersionUID = -2916656819456559679L; + + private Rpo(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = Rpo::new; + + private static final StringEnumType type = new StringEnumType<>(Rpo.class, CONSTRUCTOR); + + /** + * Default recovery point objective. With this setting, there is no guarantee on the amount of + * time it takes for data to replicate between regions. + */ + public static final Rpo DEFAULT = type.createAndRegister("DEFAULT"); + + /** + * Turbo recovery point objective. With this setting, data in a dual-region bucket will replicate + * between regions within 15 minutes. + */ + public static final Rpo ASYNC_TURBO = type.createAndRegister("ASYNC_TURBO"); + + /** + * Get the Rpo for the given String constant, and throw an exception if the constant is not + * recognized. + */ + public static Rpo valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** Get the Rpo for the given String constant, and allow unrecognized values. */ + public static Rpo valueOf(String constant) { + return type.valueOf(constant); + } + + /** Return the known values for Rpo. */ + public static Rpo[] values() { + return type.values(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ServiceAccount.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ServiceAccount.java new file mode 100644 index 000000000000..36be4f907500 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ServiceAccount.java @@ -0,0 +1,70 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.io.Serializable; +import java.util.Objects; + +/** + * A service account, with its specified scopes, authorized for this instance. + * + * @see Authenticating from Google + * Cloud Storage + */ +public final class ServiceAccount implements Serializable { + + private static final long serialVersionUID = -6492243440372543799L; + + private final String email; + + private ServiceAccount(String email) { + this.email = email; + } + + /** Returns the email address of the service account. */ + public String getEmail() { + return email; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("email", email).toString(); + } + + @Override + public int hashCode() { + return Objects.hash(email); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ServiceAccount)) { + return false; + } + ServiceAccount that = (ServiceAccount) o; + return Objects.equals(email, that.email); + } + + /** Returns a {@code ServiceAccount} object for the provided email. */ + public static ServiceAccount of(String email) { + return new ServiceAccount(email); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignatureInfo.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignatureInfo.java new file mode 100644 index 000000000000..9799496352d1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignatureInfo.java @@ -0,0 +1,410 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.SignedUrlEncodingHelper.Rfc3986UriEncode; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.hash.Hashing; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.TreeMap; + +/** + * Signature Info holds payload components of the string that requires signing. + * + * @see + * Components + */ +public class SignatureInfo { + + public static final char COMPONENT_SEPARATOR = '\n'; + public static final String GOOG4_RSA_SHA256 = "GOOG4-RSA-SHA256"; + public static final String SCOPE = "/auto/storage/goog4_request"; + private static final List RESERVED_PARAMS_LOWER = + ImmutableList.of( + // V2: + "expires", + "googleaccessid", + // V4: + "x-goog-algorithm", + "x-goog-credential", + "x-goog-date", + "x-goog-expires", + "x-goog-signedheaders"); + + private final HttpMethod httpVerb; + private final String contentMd5; + private final String contentType; + private final long expiration; + private final Map canonicalizedExtensionHeaders; + private final Map queryParams; + private final URI canonicalizedResource; + private final Storage.SignUrlOption.SignatureVersion signatureVersion; + private final String accountEmail; + private final long timestamp; + + private final String yearMonthDay; + private final String exactDate; + + private SignatureInfo(Builder builder) { + this.httpVerb = builder.httpVerb; + this.contentMd5 = builder.contentMd5; + this.contentType = builder.contentType; + this.expiration = builder.expiration; + this.canonicalizedResource = builder.canonicalizedResource; + this.signatureVersion = builder.signatureVersion; + this.accountEmail = builder.accountEmail; + this.timestamp = builder.timestamp; + + ImmutableMap.Builder headerBuilder = + new ImmutableMap.Builder().putAll(builder.canonicalizedExtensionHeaders); + // The "host" header only needs to be present and signed if using V4. + if (Storage.SignUrlOption.SignatureVersion.V4.equals(signatureVersion) + && (!builder.canonicalizedExtensionHeaders.containsKey("host"))) { + headerBuilder.put("host", "storage.googleapis.com"); + } + canonicalizedExtensionHeaders = headerBuilder.build(); + + queryParams = ImmutableMap.copyOf(builder.queryParams); + + Date date = new Date(timestamp); + + SimpleDateFormat yearMonthDayFormat = new SimpleDateFormat("yyyyMMdd"); + SimpleDateFormat exactDateFormat = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); + + yearMonthDayFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + exactDateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + + yearMonthDay = yearMonthDayFormat.format(date); + exactDate = exactDateFormat.format(date); + } + + /** + * Constructs payload to be signed. + * + * @return payload to sign + * @see Signed URLs + */ + public String constructUnsignedPayload() { + // TODO reverse order when V4 becomes default + if (Storage.SignUrlOption.SignatureVersion.V4.equals(signatureVersion)) { + return constructV4UnsignedPayload(); + } + return constructV2UnsignedPayload(); + } + + private String constructV2UnsignedPayload() { + StringBuilder payload = new StringBuilder(); + + payload.append(httpVerb.name()).append(COMPONENT_SEPARATOR); + if (contentMd5 != null) { + payload.append(contentMd5); + } + payload.append(COMPONENT_SEPARATOR); + + if (contentType != null) { + payload.append(contentType); + } + payload.append(COMPONENT_SEPARATOR); + payload.append(expiration).append(COMPONENT_SEPARATOR); + + if (canonicalizedExtensionHeaders.size() > 0) { + payload.append( + new CanonicalExtensionHeadersSerializer(Storage.SignUrlOption.SignatureVersion.V2) + .serialize(canonicalizedExtensionHeaders)); + } + + payload.append(canonicalizedResource); + + return payload.toString(); + } + + private String constructV4UnsignedPayload() { + StringBuilder payload = new StringBuilder(); + + payload.append(GOOG4_RSA_SHA256).append(COMPONENT_SEPARATOR); + payload.append(exactDate).append(COMPONENT_SEPARATOR); + payload.append(yearMonthDay).append(SCOPE).append(COMPONENT_SEPARATOR); + payload.append(constructV4CanonicalRequestHash()); + + return payload.toString(); + } + + private String constructV4CanonicalRequestHash() { + StringBuilder canonicalRequest = new StringBuilder(); + + CanonicalExtensionHeadersSerializer serializer = + new CanonicalExtensionHeadersSerializer(Storage.SignUrlOption.SignatureVersion.V4); + + canonicalRequest.append(httpVerb.name()).append(COMPONENT_SEPARATOR); + canonicalRequest.append(canonicalizedResource).append(COMPONENT_SEPARATOR); + canonicalRequest.append(constructV4QueryString()).append(COMPONENT_SEPARATOR); + canonicalRequest + .append(serializer.serialize(canonicalizedExtensionHeaders)) + .append(COMPONENT_SEPARATOR); + canonicalRequest + .append(serializer.serializeHeaderNames(canonicalizedExtensionHeaders)) + .append(COMPONENT_SEPARATOR); + + String userProvidedHash = null; + for (Map.Entry entry : canonicalizedExtensionHeaders.entrySet()) { + if ("X-Goog-Content-SHA256".equalsIgnoreCase(entry.getKey())) { + userProvidedHash = entry.getValue(); + break; + } + } + canonicalRequest.append(userProvidedHash == null ? "UNSIGNED-PAYLOAD" : userProvidedHash); + + return Hashing.sha256() + .hashString(canonicalRequest.toString(), StandardCharsets.UTF_8) + .toString(); + } + + /** + * Returns a TreeMap containing the user-supplied query parameters that do not have reserved keys. + */ + private TreeMap getNonReservedUserQueryParams() { + TreeMap sortedParamMap = new TreeMap(); + + // Skip any instances of well-known required headers that might have been supplied by the + // caller. + for (Map.Entry entry : queryParams.entrySet()) { + // Convert to (and check for the existence of) lowercase keys to prevent cases like a user + // supplying "x-goog-algorithm", in order to prevent the resulting query string from + // containing "x-goog-algorithm" and "X-Goog-Algorithm". + if (!RESERVED_PARAMS_LOWER.contains(entry.getKey().toLowerCase())) { + // URI encode user-supplied parameter, both the name and the value. + sortedParamMap.put( + Rfc3986UriEncode(entry.getKey(), true), Rfc3986UriEncode(entry.getValue(), true)); + } + } + + return sortedParamMap; + } + + private String queryStringFromParamMap(Map map) { + StringBuilder queryStringBuilder = new StringBuilder(); + + String sep = ""; + for (Map.Entry entry : map.entrySet()) { + queryStringBuilder.append(sep); + sep = "&"; + queryStringBuilder.append(entry.getKey()).append('=').append(entry.getValue()); + } + + return queryStringBuilder.toString(); + } + + /** + * Returns a query string constructed from this object's stored query parameters, sorted in code + * point order. Note that these query parameters are not used when constructing the URL's + * signature. The returned value does not include the leading ? character, as this is not part of + * a query string. + * + * @return A URI query string. Returns an empty string if the user supplied no query parameters. + */ + public String constructV2QueryString() { + TreeMap sortedParamMap = getNonReservedUserQueryParams(); + // The "GoogleAccessId", "Expires", and "Signature" params are not included here. + return queryStringFromParamMap(sortedParamMap); + } + + /** + * Returns a query string constructed from this object's stored query parameters, sorted in code + * point order so that the query string can be used in a V4 canonical request string. The returned + * value does not include the leading ? character, as this is not part of a query string. + * + * @see + * Canonical Requests + */ + public String constructV4QueryString() { + TreeMap sortedParamMap = getNonReservedUserQueryParams(); + + // Add in the reserved auth-specific query params. + sortedParamMap.put("X-Goog-Algorithm", Rfc3986UriEncode(GOOG4_RSA_SHA256, true)); + sortedParamMap.put( + "X-Goog-Credential", Rfc3986UriEncode(accountEmail + "/" + yearMonthDay + SCOPE, true)); + sortedParamMap.put("X-Goog-Date", Rfc3986UriEncode(exactDate, true)); + sortedParamMap.put("X-Goog-Expires", Rfc3986UriEncode(Long.toString(expiration), true)); + StringBuilder signedHeadersBuilder = + new CanonicalExtensionHeadersSerializer(Storage.SignUrlOption.SignatureVersion.V4) + .serializeHeaderNames(canonicalizedExtensionHeaders); + sortedParamMap.put( + "X-Goog-SignedHeaders", Rfc3986UriEncode(signedHeadersBuilder.toString(), true)); + + // The "X-Goog-Signature" param is not included here. + return queryStringFromParamMap(sortedParamMap); + } + + public HttpMethod getHttpVerb() { + return httpVerb; + } + + public String getContentMd5() { + return contentMd5; + } + + public String getContentType() { + return contentType; + } + + public long getExpiration() { + return expiration; + } + + public Map getCanonicalizedExtensionHeaders() { + return canonicalizedExtensionHeaders; + } + + public Map getQueryParams() { + return queryParams; + } + + public URI getCanonicalizedResource() { + return canonicalizedResource; + } + + public Storage.SignUrlOption.SignatureVersion getSignatureVersion() { + return signatureVersion; + } + + public long getTimestamp() { + return timestamp; + } + + public String getAccountEmail() { + return accountEmail; + } + + public static final class Builder { + + private final HttpMethod httpVerb; + private String contentMd5; + private String contentType; + private final long expiration; + private Map canonicalizedExtensionHeaders; + private Map queryParams; + private final URI canonicalizedResource; + private Storage.SignUrlOption.SignatureVersion signatureVersion; + private String accountEmail; + private long timestamp; + + /** + * Constructs builder. + * + * @param httpVerb the HTTP method + * @param expiration the EPOX expiration date + * @param canonicalizedResource the resource URI + * @throws IllegalArgumentException if required field is not provided. + */ + public Builder(HttpMethod httpVerb, long expiration, URI canonicalizedResource) { + this.httpVerb = httpVerb; + this.expiration = expiration; + this.canonicalizedResource = canonicalizedResource; + } + + public Builder(SignatureInfo signatureInfo) { + this.httpVerb = signatureInfo.httpVerb; + this.contentMd5 = signatureInfo.contentMd5; + this.contentType = signatureInfo.contentType; + this.expiration = signatureInfo.expiration; + this.canonicalizedExtensionHeaders = signatureInfo.canonicalizedExtensionHeaders; + this.queryParams = signatureInfo.queryParams; + this.canonicalizedResource = signatureInfo.canonicalizedResource; + this.signatureVersion = signatureInfo.signatureVersion; + this.accountEmail = signatureInfo.accountEmail; + this.timestamp = signatureInfo.timestamp; + } + + public Builder setContentMd5(String contentMd5) { + this.contentMd5 = contentMd5; + + return this; + } + + public Builder setContentType(String contentType) { + this.contentType = contentType; + + return this; + } + + public Builder setCanonicalizedExtensionHeaders( + Map canonicalizedExtensionHeaders) { + this.canonicalizedExtensionHeaders = canonicalizedExtensionHeaders; + + return this; + } + + public Builder setCanonicalizedQueryParams(Map queryParams) { + this.queryParams = queryParams; + + return this; + } + + public Builder setSignatureVersion(Storage.SignUrlOption.SignatureVersion signatureVersion) { + this.signatureVersion = signatureVersion; + + return this; + } + + public Builder setAccountEmail(String accountEmail) { + this.accountEmail = accountEmail; + + return this; + } + + public Builder setTimestamp(long timestamp) { + this.timestamp = timestamp; + + return this; + } + + /** Creates an {@code SignatureInfo} object from this builder. */ + public SignatureInfo build() { + checkArgument(httpVerb != null, "Required HTTP method"); + checkArgument(canonicalizedResource != null, "Required canonicalized resource"); + checkArgument(expiration >= 0, "Expiration must be greater than or equal to zero"); + + if (Storage.SignUrlOption.SignatureVersion.V4.equals(signatureVersion)) { + checkArgument(accountEmail != null, "Account email required to use V4 signing"); + checkArgument(timestamp > 0, "Timestamp required to use V4 signing"); + checkArgument( + expiration <= 604800, "Expiration can't be longer than 7 days to use V4 signing"); + } + + if (canonicalizedExtensionHeaders == null) { + canonicalizedExtensionHeaders = new HashMap<>(); + } + + if (queryParams == null) { + queryParams = new HashMap<>(); + } + + return new SignatureInfo(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignedUrlEncodingHelper.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignedUrlEncodingHelper.java new file mode 100644 index 000000000000..ed6954edb1a7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SignedUrlEncodingHelper.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; + +/** Helper for encoding URI segments appropriately when creating a Signed URL. */ +class SignedUrlEncodingHelper { + + static String Rfc3986UriEncode(final String segment, final boolean encodeForwardSlash) { + String encodedSegment; + try { + encodedSegment = URLEncoder.encode(segment, "UTF-8"); + } catch (UnsupportedEncodingException exception) { + throw new RuntimeException(exception); + } + // URLEncoder.encode() does mostly what we want, with the exception of a few characters that + // we fix in a second phase: + encodedSegment = + encodedSegment + .replace("*", "%2A") // Asterisks should be encoded. + .replace("+", "%20") // Spaces should be encoded as %20 instead of a plus sign. + .replace("%7E", "~"); // Tildes should not be encoded. + // Forward slashes should NOT be encoded in the segment of the URI that represents the + // object's name, but should be encoded for all other segments. + if (!encodeForwardSlash) { + encodedSegment = encodedSegment.replace("%2F", "/"); + } + return encodedSegment; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Storage.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Storage.java new file mode 100644 index 000000000000..12ac95dff7c4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Storage.java @@ -0,0 +1,5990 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.CrossTransportUtils.fmtMethodName; +import static com.google.cloud.storage.CrossTransportUtils.throwGrpcOnly; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.api.gax.paging.Page; +import com.google.auth.ServiceAccountSigner; +import com.google.auth.ServiceAccountSigner.SigningException; +import com.google.cloud.FieldSelector; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.Service; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.BucketListOpt; +import com.google.cloud.storage.UnifiedOpts.BucketSourceOpt; +import com.google.cloud.storage.UnifiedOpts.BucketTargetOpt; +import com.google.cloud.storage.UnifiedOpts.HmacKeyListOpt; +import com.google.cloud.storage.UnifiedOpts.HmacKeySourceOpt; +import com.google.cloud.storage.UnifiedOpts.HmacKeyTargetOpt; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.cloud.storage.UnifiedOpts.NestedNamedField; +import com.google.cloud.storage.UnifiedOpts.ObjectListOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.common.collect.Streams; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.OutputStream; +import java.io.Serializable; +import java.math.BigInteger; +import java.net.URL; +import java.net.URLConnection; +import java.nio.file.Path; +import java.security.Key; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * An interface for Google Cloud Storage. + * + * @see Google Cloud Storage + */ +@InternalExtensionOnly +public interface Storage extends Service, AutoCloseable { + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + enum PredefinedAcl { + AUTHENTICATED_READ("authenticatedRead", "authenticated-read"), + ALL_AUTHENTICATED_USERS("allAuthenticatedUsers", "all-authenticated-users"), + PRIVATE("private", "private"), + PROJECT_PRIVATE("projectPrivate", "project-private"), + PUBLIC_READ("publicRead", "public-read"), + PUBLIC_READ_WRITE("publicReadWrite", "public-read-write"), + BUCKET_OWNER_READ("bucketOwnerRead", "bucket-owner-read"), + BUCKET_OWNER_FULL_CONTROL("bucketOwnerFullControl", "bucket-owner-full-control"); + + private final String entry; + private final String xmlEntry; + + PredefinedAcl(String entry, String xmlEntry) { + this.entry = entry; + this.xmlEntry = xmlEntry; + } + + String getEntry() { + return entry; + } + + String getXmlEntry() { + return xmlEntry; + } + } + + enum BucketField implements FieldSelector, NamedField { + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ID("id", "bucket_id", String.class), + @TransportCompatibility(Transport.HTTP) + SELF_LINK("selfLink", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + NAME("name", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + TIME_CREATED("timeCreated", "create_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + METAGENERATION("metageneration", Long.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ACL("acl", ArrayList.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + DEFAULT_OBJECT_ACL("defaultObjectAcl", "default_object_acl", ArrayList.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + OWNER("owner", com.google.api.services.storage.model.Bucket.Owner.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + LABELS("labels", HashMap.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + LOCATION("location", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + LOCATION_TYPE("locationType", "location_type", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + WEBSITE("website", com.google.api.services.storage.model.Bucket.Website.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + VERSIONING("versioning", com.google.api.services.storage.model.Bucket.Versioning.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CORS("cors", ArrayList.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + LIFECYCLE("lifecycle", com.google.api.services.storage.model.Bucket.Lifecycle.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + STORAGE_CLASS("storageClass", "storage_class", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ETAG("etag", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ENCRYPTION("encryption", com.google.api.services.storage.model.Bucket.Encryption.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + BILLING("billing", com.google.api.services.storage.model.Bucket.Billing.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + DEFAULT_EVENT_BASED_HOLD("defaultEventBasedHold", "default_event_based_hold", Boolean.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + RETENTION_POLICY( + "retentionPolicy", + "retention_policy", + com.google.api.services.storage.model.Bucket.RetentionPolicy.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + IAMCONFIGURATION( + "iamConfiguration", + "iam_config", + com.google.api.services.storage.model.Bucket.IamConfiguration.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + LOGGING("logging", com.google.api.services.storage.model.Bucket.Logging.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + UPDATED("updated", "update_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + RPO("rpo", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CUSTOM_PLACEMENT_CONFIG( + "customPlacementConfig", + "custom_placement_config", + com.google.api.services.storage.model.Bucket.CustomPlacementConfig.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + AUTOCLASS("autoclass", com.google.api.services.storage.model.Bucket.Autoclass.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + HIERARCHICAL_NAMESPACE( + "hierarchicalNamespace", + "hierarchical_namespace", + com.google.api.services.storage.model.Bucket.HierarchicalNamespace.class), + @TransportCompatibility({Transport.HTTP}) + OBJECT_RETENTION( + "objectRetention", com.google.api.services.storage.model.Bucket.ObjectRetention.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + SOFT_DELETE_POLICY( + "softDeletePolicy", + "soft_delete_policy", + com.google.api.services.storage.model.Bucket.SoftDeletePolicy.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + PROJECT("projectNumber", "project", BigInteger.class), + /** + * @since 2.54.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + IP_FILTER("ipFilter", "ip_filter", com.google.api.services.storage.model.Bucket.IpFilter.class), + ; + + static final List REQUIRED_FIELDS = ImmutableList.of(NAME); + private static final Map JSON_FIELD_NAME_INDEX; + + static { + ImmutableMap.Builder tmp = ImmutableMap.builder(); + for (BucketField field : values()) { + tmp.put(field.selector, field); + } + JSON_FIELD_NAME_INDEX = Utils.mapBuild(tmp); + } + + private final String selector; + private final String grpcFieldName; + private final Class jsonClass; + + BucketField(String selector, Class jsonClass) { + this(selector, selector, jsonClass); + } + + BucketField(String selector, String grpcFieldName, Class jsonClass) { + this.selector = selector; + this.grpcFieldName = grpcFieldName; + this.jsonClass = jsonClass; + } + + @Override + public String getSelector() { + return selector; + } + + @Override + public String getApiaryName() { + return selector; + } + + @Override + public String getGrpcName() { + return grpcFieldName; + } + + Class getJsonClass() { + return jsonClass; + } + + @Nullable + static BucketField lookup(NamedField nf) { + NamedField lookup = nf; + if (nf instanceof NestedNamedField) { + NestedNamedField nested = (NestedNamedField) nf; + lookup = nested.getParent(); + } + return JSON_FIELD_NAME_INDEX.get(lookup.getApiaryName()); + } + } + + enum BlobField implements FieldSelector, NamedField { + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ACL("acl", com.google.api.services.storage.model.ObjectAccessControl.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + BUCKET("bucket", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CACHE_CONTROL("cacheControl", "cache_control", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + COMPONENT_COUNT("componentCount", "component_count", Integer.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CONTENT_DISPOSITION("contentDisposition", "content_disposition", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CONTENT_ENCODING("contentEncoding", "content_encoding", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CONTENT_LANGUAGE("contentLanguage", "content_language", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CONTENT_TYPE("contentType", "content_type", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CRC32C("crc32c", "checksums.crc32c", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ETAG("etag", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + GENERATION("generation", Long.class), + @TransportCompatibility(Transport.HTTP) + ID("id", String.class), + /** {@code kind} is not exposed in {@link BlobInfo} or {@link Blob} no need to select it */ + @Deprecated + @TransportCompatibility(Transport.HTTP) + KIND("kind", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + MD5HASH("md5Hash", "checksums.md5_hash", String.class), + @TransportCompatibility(Transport.HTTP) + MEDIA_LINK("mediaLink", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + METADATA("metadata", HashMap.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + METAGENERATION("metageneration", Long.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + NAME("name", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + OWNER("owner", com.google.api.services.storage.model.StorageObject.Owner.class), + @TransportCompatibility(Transport.HTTP) + SELF_LINK("selfLink", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + SIZE("size", java.math.BigInteger.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + STORAGE_CLASS("storageClass", "storage_class", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + TIME_DELETED("timeDeleted", "delete_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + TIME_CREATED("timeCreated", "create_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + KMS_KEY_NAME("kmsKeyName", "kms_key", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + EVENT_BASED_HOLD("eventBasedHold", "event_based_hold", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + TEMPORARY_HOLD("temporaryHold", "temporary_hold", String.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + RETENTION_EXPIRATION_TIME( + "retentionExpirationTime", + "retention_expire_time", + com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + UPDATED("updated", "update_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CUSTOM_TIME("customTime", "custom_time", com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + TIME_STORAGE_CLASS_UPDATED( + "timeStorageClassUpdated", + "update_storage_class_time", + com.google.api.client.util.DateTime.class), + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CUSTOMER_ENCRYPTION("customerEncryption", "customer_encryption", String.class), + @TransportCompatibility({Transport.HTTP}) + RETENTION("retention", com.google.api.services.storage.model.StorageObject.Retention.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + SOFT_DELETE_TIME( + "softDeleteTime", "soft_delete_time", com.google.api.client.util.DateTime.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + HARD_DELETE_TIME( + "hardDeleteTime", "hard_delete_time", com.google.api.client.util.DateTime.class), + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + OBJECT_CONTEXTS( + "contexts", "contexts", com.google.api.services.storage.model.StorageObject.Contexts.class); + + static final List REQUIRED_FIELDS = ImmutableList.of(BUCKET, NAME); + private static final Map JSON_FIELD_NAME_INDEX; + + static { + ImmutableMap.Builder tmp = ImmutableMap.builder(); + for (BlobField field : values()) { + tmp.put(field.selector, field); + } + JSON_FIELD_NAME_INDEX = Utils.mapBuild(tmp); + } + + private final String selector; + private final String grpcFieldName; + private final Class jsonClass; + + BlobField(String selector, Class jsonClass) { + this(selector, selector, jsonClass); + } + + BlobField(String selector, String grpcFieldName, Class jsonClass) { + this.selector = selector; + this.grpcFieldName = grpcFieldName; + this.jsonClass = jsonClass; + } + + @Override + public String getSelector() { + return selector; + } + + @Override + public String getApiaryName() { + return selector; + } + + @Override + public String getGrpcName() { + return grpcFieldName; + } + + Class getJsonClass() { + return jsonClass; + } + + @Nullable + static BlobField lookup(NamedField nf) { + NamedField lookup = nf; + if (nf instanceof NestedNamedField) { + NestedNamedField nested = (NestedNamedField) nf; + lookup = nested.getParent(); + } + return JSON_FIELD_NAME_INDEX.get(lookup.getApiaryName()); + } + } + + enum UriScheme { + HTTP("http"), + HTTPS("https"); + + private final String scheme; + + UriScheme(String scheme) { + this.scheme = scheme; + } + + public String getScheme() { + return scheme; + } + } + + /** Class for specifying bucket target options. */ + class BucketTargetOption extends Option { + + private static final long serialVersionUID = 6699243191830059404L; + + private BucketTargetOption(BucketTargetOpt opt) { + super(opt); + } + + /** Returns an option for specifying bucket's predefined ACL configuration. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption predefinedAcl(@NonNull PredefinedAcl acl) { + return new BucketTargetOption(UnifiedOpts.predefinedAcl(acl)); + } + + /** Returns an option for specifying bucket's default ACL configuration for blobs. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption predefinedDefaultObjectAcl(@NonNull PredefinedAcl acl) { + return new BucketTargetOption(UnifiedOpts.predefinedDefaultObjectAcl(acl)); + } + + /** + * Returns an option for enabling Object Retention on this bucket. Enabling this will create an + * ObjectRetention object in the created bucket (You must use this option, creating your own + * ObjectRetention object in the request won't work). + */ + @TransportCompatibility({Transport.HTTP}) + public static BucketTargetOption enableObjectRetention(boolean enable) { + return new BucketTargetOption(UnifiedOpts.enableObjectRetention(enable)); + } + + /** + * Returns an option for bucket's metageneration match. If this option is used the request will + * fail if metageneration does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption metagenerationMatch() { + return new BucketTargetOption(UnifiedOpts.metagenerationMatchExtractor()); + } + + /** + * Returns an option for bucket's metageneration mismatch. If this option is used the request + * will fail if metageneration matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption metagenerationNotMatch() { + return new BucketTargetOption(UnifiedOpts.metagenerationNotMatchExtractor()); + } + + /** + * Returns an option to define the billing user project. This option is required by buckets with + * `requester_pays` flag enabled to assign operation costs. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption userProject(@NonNull String userProject) { + return new BucketTargetOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to define the projection in the API request. In some cases this option may + * be needed to be set to `noAcl` to omit ACL data from the response. The default value is + * `full` + * + * @see Buckets: + * patch + */ + @TransportCompatibility({Transport.HTTP}) + public static BucketTargetOption projection(@NonNull String projection) { + return new BucketTargetOption(UnifiedOpts.projection(projection)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketTargetOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BucketTargetOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BucketTargetOption[] dedupe(BucketTargetOption... os) { + return Option.dedupe(BucketTargetOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketTargetOption[] dedupe( + Collection collection, BucketTargetOption... os) { + return Option.dedupe(BucketTargetOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketTargetOption[] dedupe( + BucketTargetOption[] array, BucketTargetOption... os) { + return Option.dedupe(BucketTargetOption[]::new, array, os); + } + } + + /** Class for specifying bucket source options. */ + class BucketSourceOption extends Option { + + private static final long serialVersionUID = 3808812145390746748L; + + BucketSourceOption(BucketSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for bucket's metageneration match. If this option is used the request will + * fail if bucket's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption metagenerationMatch(long metageneration) { + return new BucketSourceOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for bucket's metageneration mismatch. If this option is used the request + * will fail if bucket's metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption metagenerationNotMatch(long metageneration) { + return new BucketSourceOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option for bucket's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption userProject(@NonNull String userProject) { + return new BucketSourceOption(UnifiedOpts.userProject(userProject)); + } + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption requestedPolicyVersion(long version) { + return new BucketSourceOption(UnifiedOpts.requestedPolicyVersion(version)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketSourceOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BucketSourceOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BucketSourceOption[] dedupe(BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketSourceOption[] dedupe( + Collection collection, BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketSourceOption[] dedupe( + BucketSourceOption[] array, BucketSourceOption... os) { + return Option.dedupe(BucketSourceOption[]::new, array, os); + } + } + + /** Class for specifying listHmacKeys options */ + class ListHmacKeysOption extends Option { + + private ListHmacKeysOption(HmacKeyListOpt opt) { + super(opt); + } + + /** + * Returns an option for the Service Account whose keys to list. If this option is not used, + * keys for all accounts will be listed. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption serviceAccount(@NonNull ServiceAccount serviceAccount) { + return new ListHmacKeysOption(UnifiedOpts.serviceAccount(serviceAccount)); + } + + /** Returns an option for the maximum amount of HMAC keys returned per page. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption maxResults(long pageSize) { + return new ListHmacKeysOption(UnifiedOpts.pageSize(pageSize)); + } + + /** Returns an option to specify the page token from which to start listing HMAC keys. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption pageToken(@NonNull String pageToken) { + return new ListHmacKeysOption(UnifiedOpts.pageToken(pageToken)); + } + + /** + * Returns an option to specify whether to show deleted keys in the result. This option is false + * by default. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption showDeletedKeys(boolean showDeletedKeys) { + return new ListHmacKeysOption(UnifiedOpts.showDeletedKeys(showDeletedKeys)); + } + + /** + * Returns an option to specify the project to be billed for this request. Required for + * Requester Pays buckets. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption userProject(@NonNull String userProject) { + return new ListHmacKeysOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to specify the Project ID for this request. If not specified, defaults to + * Application Default Credentials. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption projectId(@NonNull String projectId) { + return new ListHmacKeysOption(UnifiedOpts.projectId(projectId)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ListHmacKeysOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new ListHmacKeysOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static ListHmacKeysOption[] dedupe(ListHmacKeysOption... os) { + return Option.dedupe(ListHmacKeysOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static ListHmacKeysOption[] dedupe( + Collection collection, ListHmacKeysOption... os) { + return Option.dedupe(ListHmacKeysOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static ListHmacKeysOption[] dedupe( + ListHmacKeysOption[] array, ListHmacKeysOption... os) { + return Option.dedupe(ListHmacKeysOption[]::new, array, os); + } + } + + /** Class for specifying createHmacKey options */ + class CreateHmacKeyOption extends Option { + + private CreateHmacKeyOption(HmacKeyTargetOpt opt) { + super(opt); + } + + /** + * Returns an option to specify the project to be billed for this request. Required for + * Requester Pays buckets. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static CreateHmacKeyOption userProject(@NonNull String userProject) { + return new CreateHmacKeyOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to specify the Project ID for this request. If not specified, defaults to + * Application Default Credentials. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static CreateHmacKeyOption projectId(@NonNull String projectId) { + return new CreateHmacKeyOption(UnifiedOpts.projectId(projectId)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static CreateHmacKeyOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new CreateHmacKeyOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static CreateHmacKeyOption[] dedupe(CreateHmacKeyOption... os) { + return Option.dedupe(CreateHmacKeyOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static CreateHmacKeyOption[] dedupe( + Collection collection, CreateHmacKeyOption... os) { + return Option.dedupe(CreateHmacKeyOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static CreateHmacKeyOption[] dedupe( + CreateHmacKeyOption[] array, CreateHmacKeyOption... os) { + return Option.dedupe(CreateHmacKeyOption[]::new, array, os); + } + } + + /** Class for specifying getHmacKey options */ + class GetHmacKeyOption extends Option { + + private GetHmacKeyOption(HmacKeySourceOpt opt) { + super(opt); + } + + /** + * Returns an option to specify the project to be billed for this request. Required for + * Requester Pays buckets. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static GetHmacKeyOption userProject(@NonNull String userProject) { + return new GetHmacKeyOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to specify the Project ID for this request. If not specified, defaults to + * Application Default Credentials. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static GetHmacKeyOption projectId(@NonNull String projectId) { + return new GetHmacKeyOption(UnifiedOpts.projectId(projectId)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static GetHmacKeyOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new GetHmacKeyOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static GetHmacKeyOption[] dedupe(GetHmacKeyOption... os) { + return Option.dedupe(GetHmacKeyOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static GetHmacKeyOption[] dedupe( + Collection collection, GetHmacKeyOption... os) { + return Option.dedupe(GetHmacKeyOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static GetHmacKeyOption[] dedupe(GetHmacKeyOption[] array, GetHmacKeyOption... os) { + return Option.dedupe(GetHmacKeyOption[]::new, array, os); + } + } + + /** Class for specifying deleteHmacKey options */ + class DeleteHmacKeyOption extends Option { + + private DeleteHmacKeyOption(HmacKeyTargetOpt opt) { + super(opt); + } + + /** + * Returns an option to specify the project to be billed for this request. Required for + * Requester Pays buckets. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static DeleteHmacKeyOption userProject(@NonNull String userProject) { + return new DeleteHmacKeyOption(UnifiedOpts.userProject(userProject)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static DeleteHmacKeyOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new DeleteHmacKeyOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static DeleteHmacKeyOption[] dedupe(DeleteHmacKeyOption... os) { + return Option.dedupe(DeleteHmacKeyOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static DeleteHmacKeyOption[] dedupe( + Collection collection, DeleteHmacKeyOption... os) { + return Option.dedupe(DeleteHmacKeyOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static DeleteHmacKeyOption[] dedupe( + DeleteHmacKeyOption[] array, DeleteHmacKeyOption... os) { + return Option.dedupe(DeleteHmacKeyOption[]::new, array, os); + } + } + + /** Class for specifying updateHmacKey options */ + class UpdateHmacKeyOption extends Option { + + private UpdateHmacKeyOption(HmacKeyTargetOpt opt) { + super(opt); + } + + /** + * Returns an option to specify the project to be billed for this request. Required for + * Requester Pays buckets. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static UpdateHmacKeyOption userProject(@NonNull String userProject) { + return new UpdateHmacKeyOption(UnifiedOpts.userProject(userProject)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static UpdateHmacKeyOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new UpdateHmacKeyOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static UpdateHmacKeyOption[] dedupe(UpdateHmacKeyOption... os) { + return Option.dedupe(UpdateHmacKeyOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static UpdateHmacKeyOption[] dedupe( + Collection collection, UpdateHmacKeyOption... os) { + return Option.dedupe(UpdateHmacKeyOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static UpdateHmacKeyOption[] dedupe( + UpdateHmacKeyOption[] array, UpdateHmacKeyOption... os) { + return Option.dedupe(UpdateHmacKeyOption[]::new, array, os); + } + } + + /** Class for specifying bucket get options. */ + class BucketGetOption extends Option { + + private static final long serialVersionUID = -669900932880354035L; + + BucketGetOption(BucketSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for bucket's metageneration match. If this option is used the request will + * fail if bucket's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketGetOption metagenerationMatch(long metageneration) { + return new BucketGetOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for bucket's metageneration mismatch. If this option is used the request + * will fail if bucket's metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketGetOption metagenerationNotMatch(long metageneration) { + return new BucketGetOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option for bucket's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketGetOption userProject(@NonNull String userProject) { + return new BucketGetOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to specify the bucket's fields to be returned by the RPC call. If this + * option is not provided all bucket's fields are returned. {@code BucketGetOption.fields}) can + * be used to specify only the fields of interest. Bucket name is always returned, even if not + * specified. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketGetOption fields(BucketField... fields) { + requireNonNull(fields, "fields must be non null"); + ImmutableSet set = + ImmutableSet.builder() + .addAll(BucketField.REQUIRED_FIELDS) + .add(fields) + .build(); + return new BucketGetOption(UnifiedOpts.fields(set)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketGetOption extraHeaders(@NonNull ImmutableMap extraHeaders) { + return new BucketGetOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BucketGetOption[] dedupe(BucketGetOption... os) { + return Option.dedupe(BucketGetOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketGetOption[] dedupe( + Collection collection, BucketGetOption... os) { + return Option.dedupe(BucketGetOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketGetOption[] dedupe(BucketGetOption[] array, BucketGetOption... os) { + return Option.dedupe(BucketGetOption[]::new, array, os); + } + } + + /** Class for specifying blob target options. */ + class BlobTargetOption extends Option { + + private static final long serialVersionUID = -5554842495450599563L; + + BlobTargetOption(ObjectTargetOpt opt) { + super(opt); + } + + /** Returns an option for specifying blob's predefined ACL configuration. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption predefinedAcl(@NonNull PredefinedAcl acl) { + return new BlobTargetOption(UnifiedOpts.predefinedAcl(acl)); + } + + /** + * Returns an option that causes an operation to succeed only if the target blob does not exist. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption doesNotExist() { + return new BlobTargetOption(UnifiedOpts.doesNotExist()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if generation does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationMatch() { + return new BlobTargetOption(UnifiedOpts.generationMatchExtractor()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationMatch(long generation) { + return new BlobTargetOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if generation matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationNotMatch(long generation) { + return new BlobTargetOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if generation matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption generationNotMatch() { + return new BlobTargetOption(UnifiedOpts.generationNotMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if metageneration does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationMatch() { + return new BlobTargetOption(UnifiedOpts.metagenerationMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if blob's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationMatch(long metageneration) { + return new BlobTargetOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationNotMatch(long metageneration) { + return new BlobTargetOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption metagenerationNotMatch() { + return new BlobTargetOption(UnifiedOpts.metagenerationNotMatchExtractor()); + } + + /** + * Returns an option for blob's data disabledGzipContent. If this option is used, the request + * will create a blob with disableGzipContent; at present, this is only for upload. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption disableGzipContent() { + return new BlobTargetOption(UnifiedOpts.disableGzipContent()); + } + + /** + * Returns an option for detecting content type. If this option is used, the content type is + * detected from the blob name if not explicitly set. This option is on the client side only, it + * does not appear in a RPC call. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption detectContentType() { + return new BlobTargetOption(UnifiedOpts.detectContentType()); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption encryptionKey(@NonNull Key key) { + return new BlobTargetOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption userProject(@NonNull String userProject) { + return new BlobTargetOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption encryptionKey(@NonNull String key) { + return new BlobTargetOption(UnifiedOpts.encryptionKey(key)); + } + + /** Returns an option to set a customer-managed key for server-side encryption of the blob. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption kmsKeyName(@NonNull String kmsKeyName) { + return new BlobTargetOption(UnifiedOpts.kmsKeyName(kmsKeyName)); + } + + /** + * Returns an option for overriding an Unlocked Retention policy. This must be set to true in + * order to change a policy from Unlocked to Locked, to set it to null, or to reduce its + * retainUntilTime attribute. + */ + @TransportCompatibility({Transport.HTTP}) + public static BlobTargetOption overrideUnlockedRetention(boolean overrideUnlockedRetention) { + return new BlobTargetOption(UnifiedOpts.overrideUnlockedRetention(overrideUnlockedRetention)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobTargetOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BlobTargetOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobTargetOption[] dedupe(BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobTargetOption[] dedupe( + Collection collection, BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobTargetOption[] dedupe(BlobTargetOption[] array, BlobTargetOption... os) { + return Option.dedupe(BlobTargetOption[]::new, array, os); + } + } + + /** Class for specifying blob write options. */ + class BlobWriteOption extends Option implements Serializable { + + private static final long serialVersionUID = 5536338021856320475L; + + BlobWriteOption(ObjectTargetOpt opt) { + super(opt); + } + + /** Returns an option for specifying blob's predefined ACL configuration. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption predefinedAcl(@NonNull PredefinedAcl acl) { + return new BlobWriteOption(UnifiedOpts.predefinedAcl(acl)); + } + + /** + * Returns an option that causes an operation to succeed only if the target blob does not exist. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption doesNotExist() { + return new BlobWriteOption(UnifiedOpts.doesNotExist()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if generation does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationMatch() { + return new BlobWriteOption(UnifiedOpts.generationMatchExtractor()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationMatch(long generation) { + return new BlobWriteOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if generation matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationNotMatch() { + return new BlobWriteOption(UnifiedOpts.generationNotMatchExtractor()); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption generationNotMatch(long generation) { + return new BlobWriteOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if metageneration does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationMatch() { + return new BlobWriteOption(UnifiedOpts.metagenerationMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationMatch(long metageneration) { + return new BlobWriteOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if metageneration matches. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationNotMatch() { + return new BlobWriteOption(UnifiedOpts.metagenerationNotMatchExtractor()); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption metagenerationNotMatch(long metageneration) { + return new BlobWriteOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option for blob's data MD5 hash match. If this option is used the request will + * fail if blobs' data MD5 hash does not match. + * + * @deprecated Please compute and use a crc32c checksum instead. {@link #crc32cMatch()} + */ + @Deprecated + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption md5Match() { + return new BlobWriteOption(UnifiedOpts.md5MatchExtractor()); + } + + /** + * Returns an option for blob's data CRC32C checksum match. If this option is used the request + * will fail if blobs' data CRC32C checksum does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption crc32cMatch() { + return new BlobWriteOption(UnifiedOpts.crc32cMatchExtractor()); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption encryptionKey(@NonNull Key key) { + return new BlobWriteOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption encryptionKey(@NonNull String key) { + return new BlobWriteOption(UnifiedOpts.encryptionKey(key)); + } + + /** + * Returns an option to set a customer-managed KMS key for server-side encryption of the blob. + * + * @param kmsKeyName the KMS key resource id + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption kmsKeyName(@NonNull String kmsKeyName) { + return new BlobWriteOption(UnifiedOpts.kmsKeyName(kmsKeyName)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption userProject(@NonNull String userProject) { + return new BlobWriteOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option that signals automatic gzip compression should not be performed en route to + * the bucket. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption disableGzipContent() { + return new BlobWriteOption(UnifiedOpts.disableGzipContent()); + } + + /** + * Returns an option for detecting content type. If this option is used, the content type is + * detected from the blob name if not explicitly set. This option is on the client side only, it + * does not appear in a RPC call. + * + *

Content type detection is based on the database presented by {@link + * URLConnection#getFileNameMap()} + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption detectContentType() { + return new BlobWriteOption(UnifiedOpts.detectContentType()); + } + + /** + * Set a precondition on the number of bytes that GCS should expect for a resumable upload. See + * the docs for X-Upload-Content-Length + * for more detail. + * + *

If the method invoked with this option does not perform a resumable upload, this option + * will be ignored. + * + * @since 2.42.0 + */ + @BetaApi + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption expectedObjectSize(long objectContentSize) { + return new BlobWriteOption(UnifiedOpts.resumableUploadExpectedObjectSize(objectContentSize)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobWriteOption extraHeaders(@NonNull ImmutableMap extraHeaders) { + return new BlobWriteOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobWriteOption[] dedupe(BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobWriteOption[] dedupe( + Collection collection, BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobWriteOption[] dedupe(BlobWriteOption[] array, BlobWriteOption... os) { + return Option.dedupe(BlobWriteOption[]::new, array, os); + } + } + + /** Class for specifying blob source options. */ + class BlobSourceOption extends Option { + + private static final long serialVersionUID = -8626355836092280204L; + + BlobSourceOption(ObjectSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match. The generation value to compare with the actual + * blob's generation is taken from a source {@link BlobId} object. When this option is passed to + * a {@link Storage} method and {@link BlobId#getGeneration()} is {@code null} or no {@link + * BlobId} is provided an exception is thrown. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationMatch() { + return new BlobSourceOption(UnifiedOpts.generationMatchExtractor()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationMatch(long generation) { + return new BlobSourceOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches. The generation value to compare with the actual + * blob's generation is taken from a source {@link BlobId} object. When this option is passed to + * a {@link Storage} method and {@link BlobId#getGeneration()} is {@code null} or no {@link + * BlobId} is provided an exception is thrown. + * + * @deprecated This option is invalid, and can never result in a valid response from the server. + * use {@link #generationNotMatch(long)} instead. + */ + @Deprecated + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationNotMatch() { + return new BlobSourceOption(UnifiedOpts.generationNotMatchExtractor()); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption generationNotMatch(long generation) { + return new BlobSourceOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if blob's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption metagenerationMatch(long metageneration) { + return new BlobSourceOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if blob's metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption metagenerationNotMatch(long metageneration) { + return new BlobSourceOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption decryptionKey(@NonNull Key key) { + return new BlobSourceOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side encryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption decryptionKey(@NonNull String key) { + return new BlobSourceOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption userProject(@NonNull String userProject) { + return new BlobSourceOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option for whether the request should return the raw input stream, instead of + * automatically decompressing the content. By default, this is false for Blob.downloadTo(), but + * true for ReadChannel.read(). + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption shouldReturnRawInputStream(boolean shouldReturnRawInputStream) { + return new BlobSourceOption(UnifiedOpts.returnRawInputStream(shouldReturnRawInputStream)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobSourceOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BlobSourceOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobSourceOption[] dedupe(BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobSourceOption[] dedupe( + Collection collection, BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobSourceOption[] dedupe(BlobSourceOption[] array, BlobSourceOption... os) { + return Option.dedupe(BlobSourceOption[]::new, array, os); + } + } + + /** Class for specifying blob get options. */ + class BlobGetOption extends Option { + + private static final long serialVersionUID = -2857961421224394114L; + + BlobGetOption(ObjectSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match. The generation value to compare with the actual + * blob's generation is taken from a source {@link BlobId} object. When this option is passed to + * a {@link Storage} method and {@link BlobId#getGeneration()} is {@code null} or no {@link + * BlobId} is provided an exception is thrown. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption generationMatch() { + return new BlobGetOption(UnifiedOpts.generationMatchExtractor()); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if blob's generation does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption generationMatch(long generation) { + return new BlobGetOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches. The generation value to compare with the actual + * blob's generation is taken from a source {@link BlobId} object. When this option is passed to + * a {@link Storage} method and {@link BlobId#getGeneration()} is {@code null} or no {@link + * BlobId} is provided an exception is thrown. + * + * @deprecated This option is invalid, and can never result in a valid response from the server. + * use {@link #generationNotMatch(long)} instead. + */ + @Deprecated + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption generationNotMatch() { + return new BlobGetOption(UnifiedOpts.generationNotMatchExtractor()); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption generationNotMatch(long generation) { + return new BlobGetOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if blob's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption metagenerationMatch(long metageneration) { + return new BlobGetOption(UnifiedOpts.metagenerationMatch(metageneration)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if blob's metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption metagenerationNotMatch(long metageneration) { + return new BlobGetOption(UnifiedOpts.metagenerationNotMatch(metageneration)); + } + + /** + * Returns an option to specify the blob's fields to be returned by the RPC call. If this option + * is not provided all blob's fields are returned. {@code BlobGetOption.fields}) can be used to + * specify only the fields of interest. Blob name and bucket are always returned, even if not + * specified. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption fields(BlobField... fields) { + requireNonNull(fields, "fields must be non null"); + ImmutableSet set = + ImmutableSet.builder().addAll(BlobField.REQUIRED_FIELDS).add(fields).build(); + return new BlobGetOption(UnifiedOpts.fields(set)); + } + + /** + * Returns an option for blob's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption userProject(@NonNull String userProject) { + return new BlobGetOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side decryption of the + * blob. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption decryptionKey(@NonNull Key key) { + return new BlobGetOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option to set a customer-supplied AES256 key for server-side decryption of the + * blob. + * + * @param key the AES256 encoded in base64 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption decryptionKey(@NonNull String key) { + return new BlobGetOption(UnifiedOpts.decryptionKey(key)); + } + + /** + * Returns an option for whether the request should return the raw input stream, instead of + * automatically decompressing the content. By default, this is false for Blob.downloadTo(), but + * true for ReadChannel.read(). + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption shouldReturnRawInputStream(boolean shouldReturnRawInputStream) { + return new BlobGetOption(UnifiedOpts.returnRawInputStream(shouldReturnRawInputStream)); + } + + /** + * Returns an option for whether the request should return a soft-deleted object. If an object + * has been soft-deleted (Deleted while a Soft Delete Policy) is in place, this must be true or + * the request will return null. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption softDeleted(boolean softDeleted) { + return new BlobGetOption(UnifiedOpts.softDeleted(softDeleted)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobGetOption extraHeaders(@NonNull ImmutableMap extraHeaders) { + return new BlobGetOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobGetOption[] dedupe(BlobGetOption... os) { + return Option.dedupe(BlobGetOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobGetOption[] dedupe( + Collection collection, BlobGetOption... os) { + return Option.dedupe(BlobGetOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobGetOption[] dedupe(BlobGetOption[] array, BlobGetOption... os) { + return Option.dedupe(BlobGetOption[]::new, array, os); + } + } + + /** Class for specifying blob restore options * */ + class BlobRestoreOption extends Option { + + private static final long serialVersionUID = 1922118465380110958L; + + BlobRestoreOption(ObjectSourceOpt opt) { + super(opt); + } + + /** + * Returns an option for blob's data generation match. If this option is used the request will + * fail if generation does not match. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption generationMatch(long generation) { + return new BlobRestoreOption(UnifiedOpts.generationMatch(generation)); + } + + /** + * Returns an option for blob's data generation mismatch. If this option is used the request + * will fail if blob's generation matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption generationNotMatch(long generation) { + return new BlobRestoreOption(UnifiedOpts.generationNotMatch(generation)); + } + + /** + * Returns an option for blob's metageneration match. If this option is used the request will + * fail if blob's metageneration does not match the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption metagenerationMatch(long generation) { + return new BlobRestoreOption(UnifiedOpts.metagenerationMatch(generation)); + } + + /** + * Returns an option for blob's metageneration mismatch. If this option is used the request will + * fail if blob's metageneration matches the provided value. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption metagenerationNotMatch(long generation) { + return new BlobRestoreOption(UnifiedOpts.metagenerationNotMatch(generation)); + } + + /** + * Returns an option for whether the restored object should copy the access controls of the + * source object. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption copySourceAcl(boolean copySourceAcl) { + return new BlobRestoreOption(UnifiedOpts.copySourceAcl(copySourceAcl)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobRestoreOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BlobRestoreOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + } + + /** Class for specifying bucket list options. */ + class BucketListOption extends Option { + + private static final long serialVersionUID = 6388807550815607557L; + + private BucketListOption(BucketListOpt opt) { + super(opt); + } + + /** Returns an option to specify the maximum number of buckets returned per page. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption pageSize(long pageSize) { + return new BucketListOption(UnifiedOpts.pageSize(pageSize)); + } + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption returnPartialSuccess(boolean returnPartialSuccess) { + return new BucketListOption(UnifiedOpts.returnPartialSuccess(returnPartialSuccess)); + } + + /** Returns an option to specify the page token from which to start listing buckets. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption pageToken(@NonNull String pageToken) { + return new BucketListOption(UnifiedOpts.pageToken(pageToken)); + } + + /** + * Returns an option to set a prefix to filter results to buckets whose names begin with this + * prefix. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption prefix(@NonNull String prefix) { + return new BucketListOption(UnifiedOpts.prefix(prefix)); + } + + /** + * Returns an option for bucket's billing user project. This option is only used by the buckets + * with 'requester_pays' flag. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption userProject(@NonNull String userProject) { + return new BucketListOption(UnifiedOpts.userProject(userProject)); + } + + /** + * Returns an option to specify the bucket's fields to be returned by the RPC call. If this + * option is not provided all bucket's fields are returned. {@code BucketListOption.fields}) can + * be used to specify only the fields of interest. Bucket name is always returned, even if not + * specified. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption fields(BucketField... fields) { + requireNonNull(fields, "fields must be non null"); + ImmutableSet set = + Streams.concat( + Stream.of(NamedField.literal("nextPageToken")), + Streams.concat(BucketField.REQUIRED_FIELDS.stream(), Arrays.stream(fields)) + .map(f -> NamedField.prefixed("items/", f))) + .collect(ImmutableSet.toImmutableSet()); + return new BucketListOption(UnifiedOpts.fields(set)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BucketListOption extraHeaders( + @NonNull ImmutableMap extraHeaders) { + return new BucketListOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BucketListOption[] dedupe(BucketListOption... os) { + return Option.dedupe(BucketListOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketListOption[] dedupe( + Collection collection, BucketListOption... os) { + return Option.dedupe(BucketListOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BucketListOption[] dedupe(BucketListOption[] array, BucketListOption... os) { + return Option.dedupe(BucketListOption[]::new, array, os); + } + } + + /** Class for specifying blob list options. */ + class BlobListOption extends Option { + + private static final long serialVersionUID = 5216908055423927281L; + + private BlobListOption(ObjectListOpt opt) { + super(opt); + } + + /** Returns an option to specify the maximum number of blobs returned per page. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption pageSize(long pageSize) { + return new BlobListOption(UnifiedOpts.pageSize(pageSize)); + } + + /** Returns an option to specify the page token from which to start listing blobs. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption pageToken(@NonNull String pageToken) { + return new BlobListOption(UnifiedOpts.pageToken(pageToken)); + } + + /** + * Returns an option to set a prefix to filter results to blobs whose names begin with this + * prefix. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption prefix(@NonNull String prefix) { + return new BlobListOption(UnifiedOpts.prefix(prefix)); + } + + /** + * If specified, results are returned in a directory-like mode. Blobs whose names, after a + * possible {@link #prefix(String)}, do not contain the '/' delimiter are returned as is. Blobs + * whose names, after a possible {@link #prefix(String)}, contain the '/' delimiter, will have + * their name truncated after the delimiter and will be returned as {@link Blob} objects where + * only {@link Blob#getBlobId()}, {@link Blob#getSize()} and {@link Blob#isDirectory()} are set. + * For such directory blobs, ({@link BlobId#getGeneration()} returns {@code null}), {@link + * Blob#getSize()} returns {@code 0} while {@link Blob#isDirectory()} returns {@code true}. + * Duplicate directory blobs are omitted. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption currentDirectory() { + return new BlobListOption(UnifiedOpts.currentDirectory()); + } + + /** + * Returns an option to set a delimiter. + * + * @param delimiter generally '/' is the one used most often, but you can used other delimiters + * as well. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption delimiter(@NonNull String delimiter) { + return new BlobListOption(UnifiedOpts.delimiter(delimiter)); + } + + /** + * Returns an option to set a startOffset to filter results to objects whose names are + * lexicographically equal to or after startOffset. If endOffset is also set, the objects listed + * have names between startOffset (inclusive) and endOffset (exclusive). + * + * @param startOffset startOffset to filter the results + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption startOffset(@NonNull String startOffset) { + return new BlobListOption(UnifiedOpts.startOffset(startOffset)); + } + + /** + * Returns an option to set a endOffset to filter results to objects whose names are + * lexicographically before endOffset. If startOffset is also set, the objects listed have names + * between startOffset (inclusive) and endOffset (exclusive). + * + * @param endOffset endOffset to filter the results + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption endOffset(@NonNull String endOffset) { + return new BlobListOption(UnifiedOpts.endOffset(endOffset)); + } + + /** + * Returns an option to set a glob pattern to filter results to blobs that match the pattern. + * + * @see List + * Objects + */ + @BetaApi + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption matchGlob(@NonNull String glob) { + return new BlobListOption(UnifiedOpts.matchGlob(glob)); + } + + /** + * Returns an option for whether to include all Folders (including empty Folders) in response. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption includeFolders(boolean includeFolders) { + return new BlobListOption(UnifiedOpts.includeFoldersAsPrefixes(includeFolders)); + } + + /** + * Returns an option which will cause blobs that end in exactly one instance of `delimiter` will + * have their metadata included rather than being synthetic objects. + * + * @since 2.52.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption includeTrailingDelimiter() { + return new BlobListOption(UnifiedOpts.includeTrailingDelimiter()); + } + + /** + * Returns an option to define the billing user project. This option is required by buckets with + * `requester_pays` flag enabled to assign operation costs. + * + * @param userProject projectId of the billing user project. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption userProject(@NonNull String userProject) { + return new BlobListOption(UnifiedOpts.userProject(userProject)); + } + + /** + * If set to {@code true}, lists all versions of a blob. The default is {@code false}. + * + * @see Object Versioning + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption versions(boolean versions) { + return new BlobListOption(UnifiedOpts.versionsFilter(versions)); + } + + /** + * Returns an option to specify the blob's fields to be returned by the RPC call. If this option + * is not provided all blob's fields are returned. {@code BlobListOption.fields}) can be used to + * specify only the fields of interest. Blob name and bucket are always returned, even if not + * specified. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption fields(BlobField... fields) { + requireNonNull(fields, "fields must be non null"); + ImmutableSet set = + Streams.concat( + Stream.of(NamedField.literal("nextPageToken"), NamedField.literal("prefixes")), + Streams.concat(BlobField.REQUIRED_FIELDS.stream(), Arrays.stream(fields)) + .map(f -> NamedField.prefixed("items/", f))) + .collect(ImmutableSet.toImmutableSet()); + return new BlobListOption(UnifiedOpts.fields(set)); + } + + /** Returns an option for whether the list result should include soft-deleted objects. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption softDeleted(boolean softDeleted) { + return new BlobListOption(UnifiedOpts.softDeleted(softDeleted)); + } + + /** + * Returns an option to filter list results based on object attributes, such as object contexts. + * + * @param filter The filter string. + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption filter(String filter) { + return new BlobListOption(UnifiedOpts.objectFilter(filter)); + } + + /** + * A set of extra headers to be set for all requests performed within the scope of the operation + * this option is passed to (a get, read, resumable upload etc). + * + *

If the same header name is specified across multiple options provided to a method, the + * first occurrence will be the value included in the request(s). + * + *

The following headers are not allowed to be specified, and will result in an {@link + * IllegalArgumentException}. + * + *

    + *
  1. {@code Accept-Encoding} + *
  2. {@code Cache-Control} + *
  3. {@code Connection} + *
  4. {@code Content-ID} + *
  5. {@code Content-Length} + *
  6. {@code Content-Range} + *
  7. {@code Content-Transfer-Encoding} + *
  8. {@code Content-Type} + *
  9. {@code Date} + *
  10. {@code ETag} + *
  11. {@code If-Match} + *
  12. {@code If-None-Match} + *
  13. {@code Keep-Alive} + *
  14. {@code Range} + *
  15. {@code TE} + *
  16. {@code Trailer} + *
  17. {@code Transfer-Encoding} + *
  18. {@code User-Agent} + *
  19. {@code X-Goog-Api-Client} + *
  20. {@code X-Goog-Content-Length-Range} + *
  21. {@code X-Goog-Copy-Source-Encryption-Algorithm} + *
  22. {@code X-Goog-Copy-Source-Encryption-Key} + *
  23. {@code X-Goog-Copy-Source-Encryption-Key-Sha256} + *
  24. {@code X-Goog-Encryption-Algorithm} + *
  25. {@code X-Goog-Encryption-Key} + *
  26. {@code X-Goog-Encryption-Key-Sha256} + *
  27. {@code X-Goog-Gcs-Idempotency-Token} + *
  28. {@code X-Goog-Meta-*} + *
  29. {@code X-Goog-User-Project} + *
  30. {@code X-HTTP-Method-Override} + *
  31. {@code X-Upload-Content-Length} + *
  32. {@code X-Upload-Content-Type} + *
+ * + * @since 2.49.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static BlobListOption extraHeaders(@NonNull ImmutableMap extraHeaders) { + return new BlobListOption(UnifiedOpts.extraHeaders(extraHeaders)); + } + + /** + * Deduplicate any options which are the same parameter. The value which comes last in {@code + * os} will be the value included in the return. + */ + public static BlobListOption[] dedupe(BlobListOption... os) { + return Option.dedupe(BlobListOption[]::new, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobListOption[] dedupe( + Collection collection, BlobListOption... os) { + return Option.dedupe(BlobListOption[]::new, collection, os); + } + + /** + * Deduplicate any options which are the same parameter. + * + *

The value which comes last in {@code collection} and {@code os} will be the value included + * in the return. All options from {@code os} will override their counterparts in {@code + * collection}. + */ + public static BlobListOption[] dedupe(BlobListOption[] array, BlobListOption... os) { + return Option.dedupe(BlobListOption[]::new, array, os); + } + } + + /** Class for specifying Post Policy V4 options. * */ + class PostPolicyV4Option implements Serializable { + private static final long serialVersionUID = -1592545784993528897L; + private final PostPolicyV4Option.Option option; + private final Object value; + + @TransportCompatibility(Transport.HTTP) + enum Option { + PATH_STYLE, + VIRTUAL_HOSTED_STYLE, + BUCKET_BOUND_HOST_NAME, + SERVICE_ACCOUNT_CRED + } + + private PostPolicyV4Option(Option option, Object value) { + this.option = option; + this.value = value; + } + + PostPolicyV4Option.Option getOption() { + return option; + } + + Object getValue() { + return value; + } + + /** + * Provides a service account signer to sign the policy. If not provided an attempt is made to + * get it from the environment. + * + * @see Service + * Accounts + */ + @TransportCompatibility(Transport.HTTP) + public static PostPolicyV4Option signWith(ServiceAccountSigner signer) { + return new PostPolicyV4Option(PostPolicyV4Option.Option.SERVICE_ACCOUNT_CRED, signer); + } + + /** + * Use a virtual hosted-style hostname, which adds the bucket into the host portion of the URI + * rather than the path, e.g. 'https://mybucket.storage.googleapis.com/...'. The bucket name is + * obtained from the resource passed in. + * + * @see Request Endpoints + */ + @TransportCompatibility(Transport.HTTP) + public static PostPolicyV4Option withVirtualHostedStyle() { + return new PostPolicyV4Option(PostPolicyV4Option.Option.VIRTUAL_HOSTED_STYLE, ""); + } + + /** + * Generates a path-style URL, which places the bucket name in the path portion of the URL + * instead of in the hostname, e.g 'https://storage.googleapis.com/mybucket/...'. Note that this + * cannot be used alongside {@code withVirtualHostedStyle()}. Virtual hosted-style URLs, which + * can be used via the {@code withVirtualHostedStyle()} method, should generally be preferred + * instead of path-style URLs. + * + * @see Request Endpoints + */ + @TransportCompatibility(Transport.HTTP) + public static PostPolicyV4Option withPathStyle() { + return new PostPolicyV4Option(PostPolicyV4Option.Option.PATH_STYLE, ""); + } + + /** + * Use a bucket-bound hostname, which replaces the storage.googleapis.com host with the name of + * a CNAME bucket, e.g. a bucket named 'gcs-subdomain.my.domain.tld', or a Google Cloud Load + * Balancer which routes to a bucket you own, e.g. 'my-load-balancer-domain.tld'. Note that this + * cannot be used alongside {@code withVirtualHostedStyle()} or {@code withPathStyle()}. This + * method signature uses HTTP for the URI scheme, and is equivalent to calling {@code + * withBucketBoundHostname("...", UriScheme.HTTP).} + * + * @see CNAME + * Redirects + * @see + * GCLB Redirects + */ + @TransportCompatibility(Transport.HTTP) + public static PostPolicyV4Option withBucketBoundHostname(String bucketBoundHostname) { + return withBucketBoundHostname(bucketBoundHostname, Storage.UriScheme.HTTP); + } + + /** + * Use a bucket-bound hostname, which replaces the storage.googleapis.com host with the name of + * a CNAME bucket, e.g. a bucket named 'gcs-subdomain.my.domain.tld', or a Google Cloud Load + * Balancer which routes to a bucket you own, e.g. 'my-load-balancer-domain.tld'. Note that this + * cannot be used alongside {@code withVirtualHostedStyle()} or {@code withPathStyle()}. The + * bucket name itself should not include the URI scheme (http or https), so it is specified via + * a local enum. + * + * @see CNAME + * Redirects + * @see + * GCLB Redirects + */ + @TransportCompatibility(Transport.HTTP) + public static PostPolicyV4Option withBucketBoundHostname( + String bucketBoundHostname, Storage.UriScheme uriScheme) { + return new PostPolicyV4Option( + PostPolicyV4Option.Option.BUCKET_BOUND_HOST_NAME, + uriScheme.getScheme() + "://" + bucketBoundHostname); + } + } + + /** Class for specifying signed URL options. */ + class SignUrlOption implements Serializable { + + private static final long serialVersionUID = -3165388740755311106L; + + private final Option option; + private final Object value; + + @TransportCompatibility(Transport.HTTP) + enum Option { + HTTP_METHOD, + CONTENT_TYPE, + MD5, + EXT_HEADERS, + SERVICE_ACCOUNT_CRED, + SIGNATURE_VERSION, + HOST_NAME, + PATH_STYLE, + VIRTUAL_HOSTED_STYLE, + BUCKET_BOUND_HOST_NAME, + QUERY_PARAMS + } + + @TransportCompatibility(Transport.HTTP) + enum SignatureVersion { + V2, + V4 + } + + private SignUrlOption(Option option, Object value) { + this.option = option; + this.value = value; + } + + Option getOption() { + return option; + } + + Object getValue() { + return value; + } + + /** + * The HTTP method to be used with the signed URL. If this method is not called, defaults to + * GET. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption httpMethod(HttpMethod httpMethod) { + return new SignUrlOption(Option.HTTP_METHOD, httpMethod); + } + + /** + * Use it if signature should include the blob's content-type. When used, users of the signed + * URL should include the blob's content-type with their request. If using this URL from a + * browser, you must include a content type that matches what the browser will send. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withContentType() { + return new SignUrlOption(Option.CONTENT_TYPE, true); + } + + /** + * Use it if signature should include the blob's md5. When used, users of the signed URL should + * include the blob's md5 with their request. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withMd5() { + return new SignUrlOption(Option.MD5, true); + } + + /** + * Use it if signature should include the blob's canonicalized extended headers. When used, + * users of the signed URL should include the canonicalized extended headers with their request. + * + * @see Request + * Headers + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withExtHeaders(Map extHeaders) { + return new SignUrlOption(Option.EXT_HEADERS, extHeaders); + } + + /** + * Use if signature version should be V2. This is the default if neither this or {@code + * withV4Signature()} is called. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withV2Signature() { + return new SignUrlOption(Option.SIGNATURE_VERSION, SignatureVersion.V2); + } + + /** + * Use if signature version should be V4. Note that V4 Signed URLs can't have an expiration + * longer than 7 days. V2 will be the default if neither this or {@code withV2Signature()} is + * called. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withV4Signature() { + return new SignUrlOption(Option.SIGNATURE_VERSION, SignatureVersion.V4); + } + + /** + * Provides a service account signer to sign the URL. If not provided an attempt is made to get + * it from the environment. + * + * @see Service + * Accounts + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption signWith(ServiceAccountSigner signer) { + return new SignUrlOption(Option.SERVICE_ACCOUNT_CRED, signer); + } + + /** + * Use a different host name than the default host name 'storage.googleapis.com'. This option is + * particularly useful for developers to point requests to an alternate endpoint (e.g. a staging + * environment or sending requests through VPC). If using this with the {@code + * withVirtualHostedStyle()} method, you should omit the bucket name from the hostname, as it + * automatically gets prepended to the hostname for virtual hosted-style URLs. + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withHostName(String hostName) { + return new SignUrlOption(Option.HOST_NAME, hostName); + } + + /** + * Use a virtual hosted-style hostname, which adds the bucket into the host portion of the URI + * rather than the path, e.g. 'https://mybucket.storage.googleapis.com/...'. The bucket name is + * obtained from the resource passed in. For V4 signing, this also sets the "host" header in the + * canonicalized extension headers to the virtual hosted-style host, unless that header is + * supplied via the {@code withExtHeaders()} method. + * + * @see Request Endpoints + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withVirtualHostedStyle() { + return new SignUrlOption(Option.VIRTUAL_HOSTED_STYLE, ""); + } + + /** + * Generates a path-style URL, which places the bucket name in the path portion of the URL + * instead of in the hostname, e.g 'https://storage.googleapis.com/mybucket/...'. This cannot be + * used alongside {@code withVirtualHostedStyle()}. Virtual hosted-style URLs, which can be used + * via the {@code withVirtualHostedStyle()} method, should generally be preferred instead of + * path-style URLs. + * + * @see Request Endpoints + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withPathStyle() { + return new SignUrlOption(Option.PATH_STYLE, ""); + } + + /** + * Use a bucket-bound hostname, which replaces the storage.googleapis.com host with the name of + * a CNAME bucket, e.g. a bucket named 'gcs-subdomain.my.domain.tld', or a Google Cloud Load + * Balancer which routes to a bucket you own, e.g. 'my-load-balancer-domain.tld'. This cannot be + * used alongside {@code withVirtualHostedStyle()} or {@code withPathStyle()}. This method + * signature uses HTTP for the URI scheme, and is equivalent to calling {@code + * withBucketBoundHostname("...", UriScheme.HTTP).} + * + * @see CNAME + * Redirects + * @see + * GCLB Redirects + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withBucketBoundHostname(String bucketBoundHostname) { + return withBucketBoundHostname(bucketBoundHostname, UriScheme.HTTP); + } + + /** + * Use a bucket-bound hostname, which replaces the storage.googleapis.com host with the name of + * a CNAME bucket, e.g. a bucket named 'gcs-subdomain.my.domain.tld', or a Google Cloud Load + * Balancer which routes to a bucket you own, e.g. 'my-load-balancer-domain.tld'. Note that this + * cannot be used alongside {@code withVirtualHostedStyle()} or {@code withPathStyle()}. The + * bucket name itself should not include the URI scheme (http or https), so it is specified via + * a local enum. + * + * @see CNAME + * Redirects + * @see + * GCLB Redirects + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withBucketBoundHostname( + String bucketBoundHostname, UriScheme uriScheme) { + return new SignUrlOption( + Option.BUCKET_BOUND_HOST_NAME, uriScheme.getScheme() + "://" + bucketBoundHostname); + } + + /** + * Use if the URL should contain additional query parameters. + * + *

Warning: For V2 Signed URLs, it is possible for query parameters to be altered after the + * URL has been signed, as the parameters are not used to compute the signature. The V4 signing + * method should be preferred when supplying additional query parameters, as the parameters + * cannot be added, removed, or otherwise altered after a V4 signature is generated. + * + * @see + * Canonical Requests + * @see V2 Signing + * Process + */ + @TransportCompatibility(Transport.HTTP) + public static SignUrlOption withQueryParams(Map queryParams) { + return new SignUrlOption(Option.QUERY_PARAMS, queryParams); + } + } + + /** + * A class to contain all information needed for a Google Cloud Storage Compose operation. + * + * @see Compose + * Operation + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + class ComposeRequest implements Serializable { + + private static final long serialVersionUID = 6612204553167273919L; + + private final List sourceBlobs; + private final BlobInfo target; + private final List targetOptions; + + private transient Opts targetOpts; + + /** Class for Compose source blobs. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static class SourceBlob implements Serializable { + + private static final long serialVersionUID = -157636474404489874L; + + final String name; + final Long generation; + + SourceBlob(String name) { + this(name, null); + } + + SourceBlob(String name, Long generation) { + this.name = name; + this.generation = generation; + } + + public String getName() { + return name; + } + + public Long getGeneration() { + return generation; + } + } + + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static class Builder { + + private final List sourceBlobs = new LinkedList<>(); + private final Set targetOptions = new LinkedHashSet<>(); + private BlobInfo target; + private Opts opts = Opts.empty(); + + /** Add source blobs for compose operation. */ + public Builder addSource(Iterable blobs) { + for (String blob : blobs) { + sourceBlobs.add(new SourceBlob(blob)); + } + return this; + } + + /** Add source blobs for compose operation. */ + public Builder addSource(String... blobs) { + return addSource(Arrays.asList(blobs)); + } + + /** Add a source with a specific generation to match. */ + public Builder addSource(String blob, long generation) { + sourceBlobs.add(new SourceBlob(blob, generation)); + return this; + } + + /** Sets compose operation's target blob. */ + public Builder setTarget(BlobInfo target) { + this.target = target; + return this; + } + + Builder setTargetOpts(Opts opts) { + this.opts = opts; + return this; + } + + /** Sets compose operation's target blob options. */ + public Builder setTargetOptions(BlobTargetOption... options) { + Collections.addAll(targetOptions, options); + return this; + } + + /** Sets compose operation's target blob options. */ + public Builder setTargetOptions(Iterable options) { + Iterables.addAll(targetOptions, options); + return this; + } + + /** Creates a {@code ComposeRequest} object. */ + public ComposeRequest build() { + checkArgument(!sourceBlobs.isEmpty()); + checkNotNull(target); + checkNotNull(opts); + return new ComposeRequest(this); + } + } + + private ComposeRequest(Builder builder) { + sourceBlobs = ImmutableList.copyOf(builder.sourceBlobs); + target = builder.target; + // keep targetOptions for serialization even though we will read targetOpts + targetOptions = ImmutableList.copyOf(builder.targetOptions); + targetOpts = builder.opts.prepend(Opts.unwrap(targetOptions).resolveFrom(target)); + } + + /** Returns compose operation's source blobs. */ + public List getSourceBlobs() { + return sourceBlobs; + } + + /** Returns compose operation's target blob. */ + public BlobInfo getTarget() { + return target; + } + + /** Returns compose operation's target blob's options. */ + public List getTargetOptions() { + return targetOptions; + } + + @InternalApi + Opts getTargetOpts() { + return targetOpts; + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + if (this.targetOptions != null) { + this.targetOpts = Opts.unwrap(this.targetOptions); + } else { + this.targetOpts = Opts.empty(); + } + } + + /** + * Creates a {@code ComposeRequest} object. + * + * @param sources source blobs names + * @param target target blob + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ComposeRequest of(Iterable sources, BlobInfo target) { + return newBuilder().setTarget(target).addSource(sources).build(); + } + + /** + * Creates a {@code ComposeRequest} object. + * + * @param bucket name of the bucket where the compose operation takes place + * @param sources source blobs names + * @param target target blob name + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static ComposeRequest of(String bucket, Iterable sources, String target) { + return of(sources, BlobInfo.newBuilder(BlobId.of(bucket, target)).build()); + } + + /** Returns a {@code ComposeRequest} builder. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + public static Builder newBuilder() { + return new Builder(); + } + } + + /** A class to contain all information needed for a Google Cloud Storage Copy operation. */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + class CopyRequest implements Serializable { + + private static final long serialVersionUID = 5670794463350011330L; + + private final BlobId source; + private final List sourceOptions; + private final boolean overrideInfo; + private final BlobInfo target; + private final List targetOptions; + private final Long megabytesCopiedPerChunk; + + public static class Builder { + + private final Set sourceOptions = new LinkedHashSet<>(); + private final Set targetOptions = new LinkedHashSet<>(); + private BlobId source; + private boolean overrideInfo; + private BlobInfo target; + private Long megabytesCopiedPerChunk; + + /** + * Sets the blob to copy given bucket and blob name. + * + * @return the builder + */ + public Builder setSource(String bucket, String blob) { + this.source = BlobId.of(bucket, blob); + return this; + } + + /** + * Sets the blob to copy given a {@link BlobId}. + * + * @return the builder + */ + public Builder setSource(BlobId source) { + this.source = source; + return this; + } + + /** + * Sets blob's source options. + * + * @return the builder + */ + public Builder setSourceOptions(BlobSourceOption... options) { + Collections.addAll(sourceOptions, options); + return this; + } + + /** + * Sets blob's source options. + * + * @return the builder + */ + public Builder setSourceOptions(Iterable options) { + Iterables.addAll(sourceOptions, options); + return this; + } + + /** + * Sets the copy target. Target blob information is copied from source. + * + * @return the builder + */ + public Builder setTarget(BlobId targetId) { + this.overrideInfo = false; + this.target = BlobInfo.newBuilder(targetId).build(); + return this; + } + + /** + * Sets the copy target. Target blob information is copied from source, except for those + * options specified in {@code options}. + * + * @return the builder + */ + public Builder setTarget(BlobId targetId, BlobTargetOption... options) { + this.overrideInfo = false; + this.target = BlobInfo.newBuilder(targetId).build(); + Collections.addAll(targetOptions, options); + return this; + } + + /** + * Sets the copy target and target options. {@code target} parameter is used to override + * source blob information (e.g. {@code contentType}, {@code contentLanguage}). Target blob + * information is set exactly to {@code target}, no information is inherited from the source + * blob. + * + * @return the builder + */ + public Builder setTarget(BlobInfo target, BlobTargetOption... options) { + this.overrideInfo = true; + this.target = checkNotNull(target); + Collections.addAll(targetOptions, options); + return this; + } + + /** + * Sets the copy target and target options. {@code target} parameter is used to override + * source blob information (e.g. {@code contentType}, {@code contentLanguage}). Target blob + * information is set exactly to {@code target}, no information is inherited from the source + * blob. + * + * @return the builder + */ + public Builder setTarget(BlobInfo target, Iterable options) { + this.overrideInfo = true; + this.target = checkNotNull(target); + Iterables.addAll(targetOptions, options); + return this; + } + + /** + * Sets the copy target and target options. Target blob information is copied from source, + * except for those options specified in {@code options}. + * + * @return the builder + */ + public Builder setTarget(BlobId targetId, Iterable options) { + this.overrideInfo = false; + this.target = BlobInfo.newBuilder(targetId).build(); + Iterables.addAll(targetOptions, options); + return this; + } + + /** + * Sets the maximum number of megabytes to copy for each RPC call. This parameter is ignored + * if source and target blob share the same location and storage class as copy is made with + * one single RPC. + * + * @return the builder + */ + public Builder setMegabytesCopiedPerChunk(Long megabytesCopiedPerChunk) { + this.megabytesCopiedPerChunk = megabytesCopiedPerChunk; + return this; + } + + /** Creates a {@code CopyRequest} object. */ + public CopyRequest build() { + return new CopyRequest(this); + } + } + + private CopyRequest(Builder builder) { + source = checkNotNull(builder.source); + sourceOptions = ImmutableList.copyOf(builder.sourceOptions); + overrideInfo = builder.overrideInfo; + target = checkNotNull(builder.target); + targetOptions = ImmutableList.copyOf(builder.targetOptions); + megabytesCopiedPerChunk = builder.megabytesCopiedPerChunk; + } + + /** Returns the blob to copy, as a {@link BlobId}. */ + public BlobId getSource() { + return source; + } + + /** Returns blob's source options. */ + public List getSourceOptions() { + return sourceOptions; + } + + /** Returns the {@link BlobInfo} for the target blob. */ + public BlobInfo getTarget() { + return target; + } + + /** + * Returns whether to override the target blob information with {@link #getTarget()}. If {@code + * true}, the value of {@link #getTarget()} is used to replace source blob information (e.g. + * {@code contentType}, {@code contentLanguage}). Target blob information is set exactly to this + * value, no information is inherited from the source blob. If {@code false}, target blob + * information is inherited from the source blob. + */ + public boolean overrideInfo() { + return overrideInfo; + } + + /** Returns blob's target options. */ + public List getTargetOptions() { + return targetOptions; + } + + /** + * Returns the maximum number of megabytes to copy for each RPC call. This parameter is ignored + * if source and target blob share the same location and storage class as copy is made with one + * single RPC. + */ + public Long getMegabytesCopiedPerChunk() { + return megabytesCopiedPerChunk; + } + + /** + * Creates a copy request. {@code target} parameter is used to override source blob information + * (e.g. {@code contentType}, {@code contentLanguage}). + * + * @param sourceBucket name of the bucket containing the source blob + * @param sourceBlob name of the source blob + * @param target a {@code BlobInfo} object for the target blob + * @return a copy request + */ + public static CopyRequest of(String sourceBucket, String sourceBlob, BlobInfo target) { + return newBuilder().setSource(sourceBucket, sourceBlob).setTarget(target).build(); + } + + /** + * Creates a copy request. {@code target} parameter is used to replace source blob information + * (e.g. {@code contentType}, {@code contentLanguage}). Target blob information is set exactly + * to {@code target}, no information is inherited from the source blob. + * + * @param sourceBlobId a {@code BlobId} object for the source blob + * @param target a {@code BlobInfo} object for the target blob + * @return a copy request + */ + public static CopyRequest of(BlobId sourceBlobId, BlobInfo target) { + return newBuilder().setSource(sourceBlobId).setTarget(target).build(); + } + + /** + * Creates a copy request. Target blob information is copied from source. + * + * @param sourceBucket name of the bucket containing both the source and the target blob + * @param sourceBlob name of the source blob + * @param targetBlob name of the target blob + * @return a copy request + */ + public static CopyRequest of(String sourceBucket, String sourceBlob, String targetBlob) { + return CopyRequest.newBuilder() + .setSource(sourceBucket, sourceBlob) + .setTarget(BlobId.of(sourceBucket, targetBlob)) + .build(); + } + + /** + * Creates a copy request. Target blob information is copied from source. + * + * @param sourceBucket name of the bucket containing the source blob + * @param sourceBlob name of the source blob + * @param target a {@code BlobId} object for the target blob + * @return a copy request + */ + public static CopyRequest of(String sourceBucket, String sourceBlob, BlobId target) { + return newBuilder().setSource(sourceBucket, sourceBlob).setTarget(target).build(); + } + + /** + * Creates a copy request. Target blob information is copied from source. + * + * @param sourceBlobId a {@code BlobId} object for the source blob + * @param targetBlob name of the target blob, in the same bucket of the source blob + * @return a copy request + */ + public static CopyRequest of(BlobId sourceBlobId, String targetBlob) { + return CopyRequest.newBuilder() + .setSource(sourceBlobId) + .setTarget(BlobId.of(sourceBlobId.getBucket(), targetBlob)) + .build(); + } + + /** + * Creates a copy request. Target blob information is copied from source. + * + * @param sourceBlobId a {@code BlobId} object for the source blob + * @param targetBlobId a {@code BlobId} object for the target blob + * @return a copy request + */ + public static CopyRequest of(BlobId sourceBlobId, BlobId targetBlobId) { + return CopyRequest.newBuilder().setSource(sourceBlobId).setTarget(targetBlobId).build(); + } + + /** Creates a builder for {@code CopyRequest} objects. */ + public static Builder newBuilder() { + return new Builder(); + } + } + + /** + * A class to contain all information needed for a Google Cloud Storage Object Move. + * + * @since 2.48.0 + * @see Storage#moveBlob(MoveBlobRequest) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + final class MoveBlobRequest { + private final BlobId source; + private final BlobId target; + private final ImmutableList sourceOptions; + private final ImmutableList targetOptions; + + MoveBlobRequest( + BlobId source, + BlobId target, + ImmutableList sourceOptions, + ImmutableList targetOptions) { + this.source = source; + this.target = target; + this.sourceOptions = sourceOptions; + this.targetOptions = targetOptions; + } + + public BlobId getSource() { + return source; + } + + public BlobId getTarget() { + return target; + } + + public List getSourceOptions() { + return sourceOptions; + } + + public List getTargetOptions() { + return targetOptions; + } + + public Builder toBuilder() { + return new Builder(source, target, sourceOptions, targetOptions); + } + + public static Builder newBuilder() { + return new Builder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MoveBlobRequest)) { + return false; + } + MoveBlobRequest that = (MoveBlobRequest) o; + return Objects.equals(source, that.source) + && Objects.equals(target, that.target) + && Objects.equals(sourceOptions, that.sourceOptions) + && Objects.equals(targetOptions, that.targetOptions); + } + + @Override + public int hashCode() { + return Objects.hash(source, target, sourceOptions, targetOptions); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("source", source) + .add("target", target) + .add("sourceOptions", sourceOptions) + .add("targetOptions", targetOptions) + .toString(); + } + + public static final class Builder { + + private BlobId source; + private BlobId target; + private ImmutableList sourceOptions; + private ImmutableList targetOptions; + + private Builder() { + this(null, null, ImmutableList.of(), ImmutableList.of()); + } + + private Builder( + BlobId source, + BlobId target, + ImmutableList sourceOptions, + ImmutableList targetOptions) { + this.source = source; + this.target = target; + this.sourceOptions = sourceOptions; + this.targetOptions = targetOptions; + } + + public Builder setSource(BlobId source) { + this.source = requireNonNull(source, "source must be non null"); + return this; + } + + public Builder setTarget(BlobId target) { + this.target = requireNonNull(target, "target must be non null"); + return this; + } + + public Builder setSourceOptions(Iterable sourceOptions) { + this.sourceOptions = + ImmutableList.copyOf(requireNonNull(sourceOptions, "sourceOptions must be non null")); + return this; + } + + public Builder setTargetOptions(Iterable targetOptions) { + this.targetOptions = + ImmutableList.copyOf(requireNonNull(targetOptions, "targetOptions must be non null")); + return this; + } + + public Builder setSourceOptions(BlobSourceOption... sourceOptions) { + this.sourceOptions = + ImmutableList.copyOf(requireNonNull(sourceOptions, "sourceOptions must be non null")); + return this; + } + + public Builder setTargetOptions(BlobTargetOption... targetOptions) { + this.targetOptions = + ImmutableList.copyOf(requireNonNull(targetOptions, "targetOptions must be non null")); + return this; + } + + public MoveBlobRequest build() { + return new MoveBlobRequest( + requireNonNull(source, "source must be non null"), + requireNonNull(target, "target must be non null"), + sourceOptions, + targetOptions); + } + } + } + + /** + * Creates a new bucket. + * + *

Accepts an optional userProject {@link BucketTargetOption} option which defines the project + * id to assign operational costs. + * + *

Example of creating a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Bucket bucket = storage.create(BucketInfo.of(bucketName));
+   * }
+ * + *

Example of creating a bucket with storage class and location. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Bucket bucket = storage.create(BucketInfo.newBuilder(bucketName)
+   *     // See here for possible values: http://g.co/cloud/storage/docs/storage-classes
+   *     .setStorageClass(StorageClass.COLDLINE)
+   *     // Possible values: http://g.co/cloud/storage/docs/bucket-locations#location-mr
+   *     .setLocation("asia")
+   *     .build());
+   * }
+ * + * @return a complete bucket + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Bucket create(BucketInfo bucketInfo, BucketTargetOption... options); + + /** + * Creates a new blob with no content. Note that all non-editable metadata, such as + * generation or metageneration, will be ignored even if it's present in the provided BlobInfo + * object. + * + *

Example of creating a blob with no content. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Blob blob = storage.create(blobInfo);
+   * }
+ * + * @return a {@code Blob} with complete information + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob create(BlobInfo blobInfo, BlobTargetOption... options); + + /** + * Creates a new blob. Direct upload is used to upload {@code content}. For large content, {@link + * #writer} is recommended as it uses resumable upload. MD5 and CRC32C hashes of {@code content} + * are computed and used for validating transferred data. Accepts an optional userProject {@link + * BlobGetOption} option which defines the project id to assign operational costs. The content + * type is detected from the blob name if not explicitly set. Note that all non-editable metadata, such as + * generation or metageneration, will be ignored even if it's present in the provided BlobInfo + * object. + * + *

Example of creating a blob from a byte array: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Blob blob = storage.create(blobInfo, "Hello, World!".getBytes(UTF_8));
+   * }
+ * + * @return a {@code Blob} with complete information + * @throws StorageException upon failure + * @see Hashes and ETags + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options); + + /** + * Creates a new blob with the sub array of the given byte array. Direct upload is used to upload + * {@code content}. For large content, {@link #writer} is recommended as it uses resumable upload. + * MD5 and CRC32C hashes of {@code content} are computed and used for validating transferred data. + * Accepts a userProject {@link BlobGetOption} option, which defines the project id to assign + * operational costs. Note that all non-editable metadata, such as + * generation or metageneration, will be ignored even if it's present in the provided BlobInfo + * object. + * + *

Example of creating a blob from a byte array: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Blob blob = storage.create(blobInfo, "Hello, World!".getBytes(UTF_8), 7, 5);
+   * }
+ * + * @return a {@code Blob} with complete information + * @throws StorageException upon failure + * @see Hashes and ETags + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob create( + BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options); + + /** + * Creates a new blob. Direct upload is used to upload {@code content}. For large content, {@link + * #writer} is recommended as it uses resumable upload. By default any MD5 and CRC32C values in + * the given {@code blobInfo} are ignored unless requested via the {@code + * BlobWriteOption.md5Match} and {@code BlobWriteOption.crc32cMatch} options. The given input + * stream is closed upon success. Note that all non-editable metadata, such as + * generation or metageneration, will be ignored even if it's present in the provided BlobInfo + * object. + * + *

This method is marked as {@link Deprecated} because it cannot safely retry, given that it + * accepts an {@link InputStream} which can only be consumed once. + * + *

Example of creating a blob from an input stream. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * InputStream content = new ByteArrayInputStream("Hello, World!".getBytes(UTF_8));
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Blob blob = storage.create(blobInfo, content);
+   * }
+ * + *

Example of uploading an encrypted blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String encryptionKey = "my_encryption_key";
+   * InputStream content = new ByteArrayInputStream("Hello, World!".getBytes(UTF_8));
+   *
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId)
+   *     .setContentType("text/plain")
+   *     .build();
+   * Blob blob = storage.create(blobInfo, content, BlobWriteOption.encryptionKey(encryptionKey));
+   * }
+ * + * @return a {@code Blob} with complete information + * @throws StorageException upon failure + */ + @Deprecated + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options); + + /** + * Uploads {@code path} to the blob using {@link #writer}. By default any MD5 and CRC32C values in + * the given {@code blobInfo} are ignored unless requested via the {@link + * BlobWriteOption#md5Match()} and {@link BlobWriteOption#crc32cMatch()} options. Folder upload is + * not supported. Note that all + * non-editable metadata, such as generation or metageneration, will be ignored even if it's + * present in the provided BlobInfo object. + * + *

Example of uploading a file: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String fileName = "readme.txt";
+   * BlobId blobId = BlobId.of(bucketName, fileName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * storage.createFrom(blobInfo, Paths.get(fileName));
+   * }
+ * + * @param blobInfo blob to create + * @param path file to upload + * @param options blob write options + * @return a {@code Blob} with complete information + * @throws IOException on I/O error + * @throws StorageException on server side error + * @see #createFrom(BlobInfo, Path, int, BlobWriteOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob createFrom(BlobInfo blobInfo, Path path, BlobWriteOption... options) throws IOException; + + /** + * Uploads {@code path} to the blob using {@code ResumableSession}. By default any MD5 and CRC32C + * values in the given {@code blobInfo} are ignored unless requested via the {@link + * BlobWriteOption#md5Match()} and {@link BlobWriteOption#crc32cMatch()} options. Folder upload is + * not supported. Note that all + * non-editable metadata, such as generation or metageneration, will be ignored even if it's + * present in the provided BlobInfo object. + * + *

This method used to preallocate a buffer, but since v2.25.0, it uses a ResumableSession and + * no longer needs it. The bufferSize parameter is still present for binary compatibility, but is + * now ignored. + * + *

Example of uploading a humongous file: + * + *

{@code
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("video/webm").build();
+   *
+   * Path file = Paths.get("humongous.file");
+   * storage.createFrom(blobInfo, file, 0);
+   * }
+ * + * @param blobInfo blob to create + * @param path file to upload + * @param bufferSize ignored field, still present for compatibility purposes + * @param options blob write options + * @return a {@code Blob} with complete information + * @throws IOException on I/O error + * @throws StorageException on server side error + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob createFrom(BlobInfo blobInfo, Path path, int bufferSize, BlobWriteOption... options) + throws IOException; + + /** + * Reads bytes from an input stream and uploads those bytes to the blob using {@link #writer}. By + * default any MD5 and CRC32C values in the given {@code blobInfo} are ignored unless requested + * via the {@link BlobWriteOption#md5Match()} and {@link BlobWriteOption#crc32cMatch()} options. + * Note that all non-editable + * metadata, such as generation or metageneration, will be ignored even if it's present in the + * provided BlobInfo object. + * + *

Example of uploading data with CRC32C checksum: + * + *

{@code
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * byte[] content = "Hello, world".getBytes(StandardCharsets.UTF_8);
+   * Hasher hasher = Hashing.crc32c().newHasher().putBytes(content);
+   * String crc32c = BaseEncoding.base64().encode(Ints.toByteArray(hasher.hash().asInt()));
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(crc32c).build();
+   * storage.createFrom(blobInfo, new ByteArrayInputStream(content), Storage.BlobWriteOption.crc32cMatch());
+   * }
+ * + * @param blobInfo blob to create + * @param content input stream to read from + * @param options blob write options + * @return a {@code Blob} with complete information + * @throws IOException on I/O error + * @throws StorageException on server side error + * @see #createFrom(BlobInfo, InputStream, int, BlobWriteOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob createFrom(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) + throws IOException; + + /** + * Reads bytes from an input stream and uploads those bytes to the blob using {@link #writer} and + * {@code bufferSize}. By default any MD5 and CRC32C values in the given {@code blobInfo} are + * ignored unless requested via the {@link BlobWriteOption#md5Match()} and {@link + * BlobWriteOption#crc32cMatch()} options. Note that all non-editable metadata, such as + * generation or metageneration, will be ignored even if it's present in the provided BlobInfo + * object. + * + *

{@link #createFrom(BlobInfo, InputStream, BlobWriteOption...)} )} invokes this method with a + * buffer size of 15 MiB. Users can pass alternative values. Larger buffer sizes might improve the + * upload performance but require more memory. This can cause an OutOfMemoryError or add + * significant garbage collection overhead. Smaller buffer sizes reduce memory consumption, that + * is noticeable when uploading many objects in parallel. Buffer sizes less than 256 KiB are + * treated as 256 KiB. + * + * @param blobInfo blob to create + * @param content input stream to read from + * @param bufferSize size of the buffer I/O operations + * @param options blob write options + * @return a {@code Blob} with complete information + * @throws IOException on I/O error + * @throws StorageException on server side error + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob createFrom( + BlobInfo blobInfo, InputStream content, int bufferSize, BlobWriteOption... options) + throws IOException; + + /** + * Returns the requested bucket or {@code null} if not found. + * + *

Accepts an optional userProject {@link BucketGetOption} option which defines the project id + * to assign operational costs. + * + *

Example of getting information on a bucket, only if its metageneration matches a value, + * otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * long bucketMetageneration = 42;
+   * Bucket bucket = storage.get(bucketName,
+   *     BucketGetOption.metagenerationMatch(bucketMetageneration));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Bucket get(String bucket, BucketGetOption... options); + + /** + * Locks bucket retention policy. Requires a local metageneration value in the request. Review + * example below. + * + *

Accepts an optional userProject {@link BucketTargetOption} option which defines the project + * id to assign operational costs. + * + *

Warning: Once a retention policy is locked, it can't be unlocked, removed, or shortened. + * + *

Example of locking a retention policy on a bucket, only if its local metageneration value + * matches the bucket's service metageneration otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Bucket bucket = storage.get(bucketName, BucketGetOption.fields(BucketField.METAGENERATION));
+   * storage.lockRetentionPolicy(bucket, BucketTargetOption.metagenerationMatch());
+   * }
+ * + * @return a {@code Bucket} object of the locked bucket + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Bucket lockRetentionPolicy(BucketInfo bucket, BucketTargetOption... options); + + /** + * Returns the requested blob or {@code null} if not found. + * + *

Accepts an optional userProject {@link BlobGetOption} option which defines the project id to + * assign operational costs. + * + *

Example of getting information on a blob, only if its metageneration matches a value, + * otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobMetageneration = 42;
+   * Blob blob = storage.get(bucketName, blobName,
+   *     BlobGetOption.metagenerationMatch(blobMetageneration));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob get(String bucket, String blob, BlobGetOption... options); + + /** + * Returns the requested blob or {@code null} if not found. + * + *

Accepts an optional userProject {@link BlobGetOption} option which defines the project id to + * assign operational costs. + * + *

Example of getting information on a blob, only if its metageneration matches a value, + * otherwise a {@link StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobMetageneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Blob blob = storage.get(blobId, BlobGetOption.metagenerationMatch(blobMetageneration));
+   * }
+ * + *

Example of getting information on a blob encrypted using Customer Supplied Encryption Keys, + * only if supplied Decrpytion Key decrypts the blob successfully, otherwise a {@link + * StorageException} is thrown. For more information review + * + * @see Encrypted + * Elements + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String blobEncryptionKey = "";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Blob blob = storage.get(blobId, BlobGetOption.decryptionKey(blobEncryptionKey));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob get(BlobId blob, BlobGetOption... options); + + /** + * Returns the requested blob or {@code null} if not found. + * + *

Example of getting information on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Blob blob = storage.get(blobId);
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob get(BlobId blob); + + /** + * Restores a soft-deleted object to full object status and returns the object. Note that you must + * specify a generation to use this method. + * + *

Example of restoring an object. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long generation = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, gen);
+   * Blob blob = storage.restore(blobId);
+   * }
+ */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob restore(BlobId blob, BlobRestoreOption... options); + + /** + * Lists the project's buckets. + * + *

Example of listing buckets, specifying the page size and a name prefix. + * + *

{@code
+   * String prefix = "bucket_";
+   * Page buckets = storage.list(BucketListOption.pageSize(100),
+   *     BucketListOption.prefix(prefix));
+   * Iterator bucketIterator = buckets.iterateAll().iterator();
+   * while (bucketIterator.hasNext()) {
+   *   Bucket bucket = bucketIterator.next();
+   *   // do something with the bucket
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Page list(BucketListOption... options); + + /** + * Lists the bucket's blobs. If the {@link BlobListOption#currentDirectory()} option is provided, + * results are returned in a directory-like mode. + * + *

Example of listing blobs in a provided directory. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String directory = "my_directory/";
+   * Page blobs = storage.list(bucketName, BlobListOption.currentDirectory(),
+   *     BlobListOption.prefix(directory));
+   * Iterator blobIterator = blobs.iterateAll().iterator();
+   * while (blobIterator.hasNext()) {
+   *   Blob blob = blobIterator.next();
+   *   // do something with the blob
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Page list(String bucket, BlobListOption... options); + + /** + * Updates bucket information. + * + *

Accepts an optional userProject {@link BucketTargetOption} option which defines the project + * id to assign operational costs. + * + *

Example of updating bucket information. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * BucketInfo bucketInfo = BucketInfo.newBuilder(bucketName).setVersioningEnabled(true).build();
+   * Bucket bucket = storage.update(bucketInfo);
+   * }
+ * + * @return the updated bucket + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Bucket update(BucketInfo bucketInfo, BucketTargetOption... options); + + /** + * Updates the blob properties if the preconditions specified by {@code options} are met. The + * property update works as described in {@link #update(BlobInfo)}. + * + *

{@code options} parameter can contain the preconditions for applying the update. E.g. update + * of the blob properties might be required only if the properties have not been updated + * externally. {@code StorageException} with the code {@code 412} is thrown if preconditions fail. + * + *

Example of updating the content type only if the properties are not updated externally: + * + *

{@code
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Blob blob = storage.create(blobInfo);
+   *
+   * doSomething();
+   *
+   * BlobInfo update = blob.toBuilder().setContentType("multipart/form-data").build();
+   * Storage.BlobTargetOption option = Storage.BlobTargetOption.metagenerationMatch();
+   * try {
+   *   storage.update(update, option);
+   * } catch (StorageException e) {
+   *   if (e.getCode() == 412) {
+   *     // the properties were updated externally
+   *   } else {
+   *     throw e;
+   *   }
+   * }
+   * }
+ * + * @param blobInfo information to update + * @param options preconditions to apply the update + * @return the updated blob + * @throws StorageException upon failure + * @see https://cloud.google.com/storage/docs/json_api/v1/objects/update + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob update(BlobInfo blobInfo, BlobTargetOption... options); + + /** + * Updates the properties of the blob. This method issues an RPC request to merge the current blob + * properties with the properties in the provided {@code blobInfo}. Properties not defined in + * {@code blobInfo} will not be updated. To unset a blob property this property in {@code + * blobInfo} should be explicitly set to {@code null}. + * + *

Bucket or blob's name cannot be changed by this method. If you want to rename the blob or + * move it to a different bucket use the {@link Blob#copyTo} and {@link #delete} operations. + * + *

Property update alters the blob metadata generation and doesn't alter the blob generation. + * + *

Example of how to update blob's user provided metadata and unset the content type: + * + *

{@code
+   * Map metadataUpdate = new HashMap<>();
+   * metadataUpdate.put("keyToAdd", "new value");
+   * metadataUpdate.put("keyToRemove", null);
+   * BlobInfo blobUpdate = BlobInfo.newBuilder(bucketName, blobName)
+   *     .setMetadata(metadataUpdate)
+   *     .setContentType(null)
+   *     .build();
+   * Blob blob = storage.update(blobUpdate);
+   * }
+ * + * @param blobInfo information to update + * @return the updated blob + * @throws StorageException upon failure + * @see https://cloud.google.com/storage/docs/json_api/v1/objects/update + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob update(BlobInfo blobInfo); + + /** + * Deletes the requested bucket. + * + *

Accepts an optional userProject {@link BucketSourceOption} option which defines the project + * id to assign operational costs. + * + *

Example of deleting a bucket, only if its metageneration matches a value, otherwise a {@link + * StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * long bucketMetageneration = 42;
+   * boolean deleted = storage.delete(bucketName,
+   *     BucketSourceOption.metagenerationMatch(bucketMetageneration));
+   * if (deleted) {
+   *   // the bucket was deleted
+   * } else {
+   *   // the bucket was not found
+   * }
+   * }
+ * + * @return {@code true} if bucket was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean delete(String bucket, BucketSourceOption... options); + + /** + * Deletes the requested blob. + * + *

Example of deleting a blob, only if its generation matches a value, otherwise a {@link + * StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * boolean deleted = storage.delete(bucketName, blobName,
+   *     BlobSourceOption.generationMatch(blobGeneration));
+   * if (deleted) {
+   *   // the blob was deleted
+   * } else {
+   *   // the blob was not found
+   * }
+   * }
+ * + * @return {@code true} if blob was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean delete(String bucket, String blob, BlobSourceOption... options); + + /** + * Deletes the requested blob. + * + *

Accepts an optional userProject {@link BlobSourceOption} option which defines the project id + * to assign operational costs. + * + *

Example of deleting a blob, only if its generation matches a value, otherwise a {@link + * StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * boolean deleted = storage.delete(blobId, BlobSourceOption.generationMatch(blobGeneration));
+   * if (deleted) {
+   *   // the blob was deleted
+   * } else {
+   *   // the blob was not found
+   * }
+   * }
+ * + * @return {@code true} if blob was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean delete(BlobId blob, BlobSourceOption... options); + + /** + * Deletes the requested blob. + * + *

Example of deleting a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * boolean deleted = storage.delete(blobId);
+   * if (deleted) {
+   *   // the blob was deleted
+   * } else {
+   *   // the blob was not found
+   * }
+   * }
+ * + * @return {@code true} if blob was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean delete(BlobId blob); + + /** + * Sends a compose request. + * + *

Accepts an optional userProject {@link BlobTargetOption} option which defines the project id + * to assign operational costs. + * + *

Example of composing two blobs. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String sourceBlob1 = "source_blob_1";
+   * String sourceBlob2 = "source_blob_2";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * ComposeRequest request = ComposeRequest.newBuilder()
+   *     .setTarget(blobInfo)
+   *     .addSource(sourceBlob1)
+   *     .addSource(sourceBlob2)
+   *     .build();
+   * Blob blob = storage.compose(request);
+   * }
+ * + * @return the composed blob + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob compose(ComposeRequest composeRequest); + + /** + * Sends a copy request. This method copies both blob's data and information. To override source + * blob's information supply a {@code BlobInfo} to the {@code CopyRequest} using either {@link + * Storage.CopyRequest.Builder#setTarget(BlobInfo, Storage.BlobTargetOption...)} or {@link + * Storage.CopyRequest.Builder#setTarget(BlobInfo, Iterable)}. + * + *

This method returns a {@link CopyWriter} object for the provided {@code CopyRequest}. If + * source and destination objects share the same location and storage class the source blob is + * copied with one request and {@link CopyWriter#getResult()} immediately returns, regardless of + * the {@link CopyRequest#megabytesCopiedPerChunk} parameter. If source and destination have + * different location or storage class {@link CopyWriter#getResult()} might issue multiple RPC + * calls depending on blob's size. + * + *

Example of copying a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String copyBlobName = "copy_blob_name";
+   * CopyRequest request = CopyRequest.newBuilder()
+   *     .setSource(BlobId.of(bucketName, blobName))
+   *     .setTarget(BlobId.of(bucketName, copyBlobName))
+   *     .build();
+   * Blob blob = storage.copy(request).getResult();
+   * }
+ * + *

Example of copying a blob in chunks. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String copyBlobName = "copy_blob_name";
+   * CopyRequest request = CopyRequest.newBuilder()
+   *     .setSource(BlobId.of(bucketName, blobName))
+   *     .setTarget(BlobId.of(bucketName, copyBlobName))
+   *     .build();
+   * CopyWriter copyWriter = storage.copy(request);
+   * while (!copyWriter.isDone()) {
+   *   copyWriter.copyChunk();
+   * }
+   * Blob blob = copyWriter.getResult();
+   * }
+ * + *

Example of rotating the encryption key of a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String oldEncryptionKey = "old_encryption_key";
+   * String newEncryptionKey = "new_encryption_key";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * CopyRequest request = CopyRequest.newBuilder()
+   *     .setSource(blobId)
+   *     .setSourceOptions(BlobSourceOption.decryptionKey(oldEncryptionKey))
+   *     .setTarget(blobId, BlobTargetOption.encryptionKey(newEncryptionKey))
+   *     .build();
+   * Blob blob = storage.copy(request).getResult();
+   * }
+ * + * @return a {@link CopyWriter} object that can be used to get information on the newly created + * blob or to complete the copy if more than one RPC request is needed + * @throws StorageException upon failure + * @see Rewrite + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + CopyWriter copy(CopyRequest copyRequest); + + /** + * Reads all the bytes from a blob. + * + *

Example of reading all bytes of a blob, if generation matches a value, otherwise a {@link + * StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42";
+   * byte[] content = storage.readAllBytes(bucketName, blobName,
+   *     BlobSourceOption.generationMatch(blobGeneration));
+   * }
+ * + * @return the blob's content + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options); + + /** + * Reads all the bytes from a blob. + * + *

Example of reading all bytes of a blob's specific generation, otherwise a {@link + * StorageException} is thrown. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * byte[] content = storage.readAllBytes(blobId);
+   * }
+ * + *

Example of reading all bytes of an encrypted blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String decryptionKey = "my_encryption_key";
+   * byte[] content = storage.readAllBytes(
+   *     bucketName, blobName, BlobSourceOption.decryptionKey(decryptionKey));
+   * }
+ * + * @return the blob's content + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + byte[] readAllBytes(BlobId blob, BlobSourceOption... options); + + /** + * Creates a new empty batch for grouping multiple service calls in one underlying RPC call. + * + *

Example of using a batch request to delete, update and get a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * StorageBatch batch = storage.batch();
+   * BlobId firstBlob = BlobId.of(bucketName, blobName1);
+   * BlobId secondBlob = BlobId.of(bucketName, blobName2);
+   * batch.delete(firstBlob).notify(new BatchResult.Callback() {
+   *   public void success(Boolean result) {
+   *     // deleted successfully
+   *   }
+   *
+   *   public void error(StorageException exception) {
+   *     // delete failed
+   *   }
+   * });
+   * batch.update(BlobInfo.newBuilder(secondBlob).setContentType("text/plain").build());
+   * StorageBatchResult result = batch.get(secondBlob);
+   * batch.submit();
+   * Blob blob = result.get(); // returns get result or throws StorageException
+   * }
+ */ + @TransportCompatibility(Transport.HTTP) + StorageBatch batch(); + + /** + * Returns a channel for reading the blob's content. The blob's latest generation is read. If the + * blob changes while reading (i.e. {@link BlobInfo#getEtag()} changes), subsequent calls to + * {@code blobReadChannel.read(ByteBuffer)} may throw {@link StorageException}. + * + *

Example of reading a blob's content through a reader. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * try (ReadChannel reader = storage.reader(bucketName, blobName)) {
+   *   ByteBuffer bytes = ByteBuffer.allocate(64 * 1024);
+   *   while (reader.read(bytes) > 0) {
+   *     bytes.flip();
+   *     // do something with bytes
+   *     bytes.clear();
+   *   }
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ReadChannel reader(String bucket, String blob, BlobSourceOption... options); + + /** + * Returns a channel for reading the blob's content. If {@code blob.generation()} is set data + * corresponding to that generation is read. If {@code blob.generation()} is {@code null} the + * blob's latest generation is read. If the blob changes while reading (i.e. {@link + * BlobInfo#getEtag()} changes), subsequent calls to {@code blobReadChannel.read(ByteBuffer)} may + * throw {@link StorageException}. + * + *

The {@link BlobSourceOption#generationMatch()} and {@link + * BlobSourceOption#generationMatch(long)} options can be used to ensure that {@code + * blobReadChannel.read(ByteBuffer)} calls will throw {@link StorageException} if the blob`s + * generation differs from the expected one. + * + *

Example of reading a blob's content through a reader. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * try (ReadChannel reader = storage.reader(blobId)) {
+   *   ByteBuffer bytes = ByteBuffer.allocate(64 * 1024);
+   *   while (reader.read(bytes) > 0) {
+   *     bytes.flip();
+   *     // do something with bytes
+   *     bytes.clear();
+   *   }
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + ReadChannel reader(BlobId blob, BlobSourceOption... options); + + /** + * Downloads the given blob to the given path using specified blob read options. + * + *
{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Path destination = Paths.get("my-blob-destination.txt");
+   * downloadTo(blobId, destination);
+   * // do stuff with destination
+   * }
+ * + * @param blob + * @param path + * @param options + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + void downloadTo(BlobId blob, Path path, BlobSourceOption... options); + + /** + * Downloads the given blob to the given output stream using specified blob read options. + * + *
{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Path destination = Paths.get("my-blob-destination.txt");
+   * try (OutputStream outputStream = Files.newOutputStream(path)) {
+   *  downloadTo(blob, outputStream);
+   *  // do stuff with destination
+   * }
+   * }
+ * + * @param blob + * @param outputStream + * @param options + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + void downloadTo(BlobId blob, OutputStream outputStream, BlobSourceOption... options); + + /** + * Creates a blob and returns a channel for writing its content. By default any MD5 and CRC32C + * values in the given {@code blobInfo} are ignored unless requested via the {@code + * BlobWriteOption.md5Match} and {@code BlobWriteOption.crc32cMatch} options. + * + *

Example of writing a blob's content through a writer: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * byte[] content = "Hello, World!".getBytes(UTF_8);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * try (WriteChannel writer = storage.writer(blobInfo)) {
+   *     writer.write(ByteBuffer.wrap(content, 0, content.length));
+   * } catch (IOException ex) {
+   *   // handle exception
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options); + + /** + * Accepts signed URL and return a channel for writing content. + * + *

Example of writing content through a writer using signed URL. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * byte[] content = "Hello, World!".getBytes(UTF_8);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * URL signedURL = storage.signUrl(
+   *     blobInfo,
+   *     1, TimeUnit.HOURS,
+   *     Storage.SignUrlOption.httpMethod(HttpMethod.POST));
+   * try (WriteChannel writer = storage.writer(signedURL)) {
+   *    writer.write(ByteBuffer.wrap(content, 0, content.length));
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility(Transport.HTTP) + WriteChannel writer(URL signedURL); + + /** + * Generates a signed URL for a blob. If you have a blob that you want to allow access to for a + * fixed amount of time, you can use this method to generate a URL that is only valid within a + * certain time period. This is particularly useful if you don't want publicly accessible blobs, + * but also don't want to require users to explicitly log in. Signing a URL requires a service + * account signer. If an instance of {@link com.google.auth.ServiceAccountSigner} was passed to + * {@link StorageOptions}' builder via {@code setCredentials(Credentials)} or the default + * credentials are being used and the environment variable {@code GOOGLE_APPLICATION_CREDENTIALS} + * is set or your application is running in App Engine, then {@code signUrl} will use that + * credentials to sign the URL. If the credentials passed to {@link StorageOptions} do not + * implement {@link ServiceAccountSigner} (this is the case, for instance, for Google Cloud SDK + * credentials) then {@code signUrl} will throw an {@link IllegalStateException} unless an + * implementation of {@link ServiceAccountSigner} is passed using the {@link + * SignUrlOption#signWith(ServiceAccountSigner)} option. + * + *

A service account signer is looked for in the following order: + * + *

    + *
  1. The signer passed with the option {@link SignUrlOption#signWith(ServiceAccountSigner)} + *
  2. The credentials passed to {@link StorageOptions} + *
  3. The default credentials, if no credentials were passed to {@link StorageOptions} + *
+ * + *

Example of creating a signed URL that is valid for 1 week, using the default credentials for + * signing the URL, the default signing method (V2), and the default URL style (path-style): + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName).build(),
+   *     7, TimeUnit.DAYS);
+   * }
+ * + *

Example of creating a signed URL passing the {@link SignUrlOption#withV4Signature()} option, + * which enables V4 signing: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName).build(),
+   *     7, TimeUnit.DAYS,
+   *     Storage.SignUrlOption.withV4Signature());
+   * }
+ * + *

Example of creating a signed URL passing the {@link SignUrlOption#withVirtualHostedStyle()} + * option, which specifies the bucket name in the hostname of the URI, rather than in the path: + * + *

{@code
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName).build(),
+   *     1, TimeUnit.DAYS,
+   *     Storage.SignUrlOption.withVirtualHostedStyle());
+   * }
+ * + *

Example of creating a signed URL passing the {@link SignUrlOption#withPathStyle()} option, + * which specifies the bucket name in path portion of the URI, rather than in the hostname: + * + *

{@code
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName).build(),
+   *     1, TimeUnit.DAYS,
+   *     Storage.SignUrlOption.withPathStyle());
+   * }
+ * + *

Example of creating a signed URL passing the {@link + * SignUrlOption#signWith(ServiceAccountSigner)} option, that will be used for signing the URL: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String kfPath = "/path/to/keyfile.json";
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName).build(),
+   *     7, TimeUnit.DAYS,
+   *     SignUrlOption.signWith(ServiceAccountCredentials.fromStream(new FileInputStream(kfPath))));
+   * }
+ * + *

Note that the {@link ServiceAccountSigner} may require additional configuration to enable + * URL signing. See the documentation for the implementation for more details. + * + *

Example of creating a signed URL for a blob with generation: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long generation = 1576656755290328L;
+   *
+   * URL signedUrl = storage.signUrl(
+   *     BlobInfo.newBuilder(bucketName, blobName, generation).build(),
+   *     7, TimeUnit.DAYS,
+   *     SignUrlOption.withQueryParams(ImmutableMap.of("generation", String.valueOf(generation))));
+   * }
+ * + * @param blobInfo the blob associated with the signed URL + * @param duration time until the signed URL expires, expressed in {@code unit}. The finest + * granularity supported is 1 second, finer granularities will be truncated + * @param unit time unit of the {@code duration} parameter + * @param options optional URL signing options + * @throws IllegalStateException if {@link SignUrlOption#signWith(ServiceAccountSigner)} was not + * used and no implementation of {@link ServiceAccountSigner} was provided to {@link + * StorageOptions} + * @throws IllegalArgumentException if {@code SignUrlOption.withMd5()} option is used and {@code + * blobInfo.md5()} is {@code null} + * @throws IllegalArgumentException if {@code SignUrlOption.withContentType()} option is used and + * {@code blobInfo.contentType()} is {@code null} + * @throws SigningException if the attempt to sign the URL failed + * @see Signed-URLs + */ + @TransportCompatibility(Transport.HTTP) + URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options); + + /** + * Generates a URL and a map of fields that can be specified in an HTML form to submit a POST + * request. The returned map includes a signature which must be provided with the request. + * Generating a presigned POST policy requires a service account signer. If an instance of {@link + * com.google.auth.ServiceAccountSigner} was passed to {@link StorageOptions}' builder via {@code + * setCredentials(Credentials)} or the default credentials are being used and the environment + * variable {@code GOOGLE_APPLICATION_CREDENTIALS} is set, generatPresignedPostPolicyV4 will use + * that credentials to sign the URL. If the credentials passed to {@link StorageOptions} do not + * implement {@link ServiceAccountSigner} (this is the case, for instance, for Google Cloud SDK + * credentials) then {@code signUrl} will throw an {@link IllegalStateException} unless an + * implementation of {@link ServiceAccountSigner} is passed using the {@link + * PostPolicyV4Option#signWith(ServiceAccountSigner)} option. + * + *

Example of generating a presigned post policy which has the condition that only jpeg images + * can be uploaded, and applies the public read acl to each image uploaded, and making the POST + * request: + * + *

{@code
+   * PostFieldsV4 fields = PostFieldsV4.newBuilder().setAcl("public-read").build();
+   * PostConditionsV4 conditions = PostConditionsV4.newBuilder().addContentTypeCondition(ConditionV4Type.MATCHES, "image/jpeg").build();
+   *
+   * PostPolicyV4 policy = storage.generateSignedPostPolicyV4(
+   *     BlobInfo.newBuilder("my-bucket", "my-object").build(),
+   *     7, TimeUnit.DAYS, fields, conditions);
+   *
+   * HttpClient client = HttpClientBuilder.create().build();
+   * HttpPost request = new HttpPost(policy.getUrl());
+   * MultipartEntityBuilder builder = MultipartEntityBuilder.create();
+   *
+   * for (Map.Entry entry : policy.getFields().entrySet()) {
+   *     builder.addTextBody(entry.getKey(), entry.getValue());
+   * }
+   * File file = new File("path/to/your/file/to/upload");
+   * builder.addBinaryBody("file", new FileInputStream(file), ContentType.APPLICATION_OCTET_STREAM, file.getName());
+   * request.setEntity(builder.build());
+   * client.execute(request);
+   * }
+ * + * @param blobInfo the blob uploaded in the form + * @param duration time before expiration + * @param unit duration time unit + * @param fields the fields specified in the form + * @param conditions which conditions every upload must satisfy + * @param duration how long until the form expires, in milliseconds + * @param options optional post policy options + * @see POST + * Object + */ + @TransportCompatibility(Transport.HTTP) + PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostConditionsV4 conditions, + PostPolicyV4Option... options); + + /** + * Generates a presigned post policy without any conditions. Automatically creates required + * conditions. See full documentation for {@link #generateSignedPostPolicyV4(BlobInfo, long, + * TimeUnit, PostPolicyV4.PostFieldsV4, PostPolicyV4.PostConditionsV4, PostPolicyV4Option...)}. + */ + @TransportCompatibility(Transport.HTTP) + PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostPolicyV4Option... options); + + /** + * Generates a presigned post policy without any fields. Automatically creates required fields. + * See full documentation for {@link #generateSignedPostPolicyV4(BlobInfo, long, TimeUnit, + * PostPolicyV4.PostFieldsV4, PostPolicyV4.PostConditionsV4, PostPolicyV4Option...)}. + */ + @TransportCompatibility(Transport.HTTP) + PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostConditionsV4 conditions, + PostPolicyV4Option... options); + + /** + * Generates a presigned post policy without any fields or conditions. Automatically creates + * required fields and conditions. See full documentation for {@link + * #generateSignedPostPolicyV4(BlobInfo, long, TimeUnit, PostPolicyV4.PostFieldsV4, + * PostPolicyV4.PostConditionsV4, PostPolicyV4Option...)}. + */ + @TransportCompatibility(Transport.HTTP) + PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, long duration, TimeUnit unit, PostPolicyV4Option... options); + + /** + * Gets the requested blobs. A batch request is used to perform this call. + * + *

Example of getting information on several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * BlobId firstBlob = BlobId.of(bucketName, blobName1);
+   * BlobId secondBlob = BlobId.of(bucketName, blobName2);
+   * List blobs = storage.get(firstBlob, secondBlob);
+   * }
+ * + * @param blobIds blobs to get + * @return an immutable list of {@code Blob} objects. If a blob does not exist or access to it has + * been denied the corresponding item in the list is {@code null}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List get(BlobId... blobIds); + + /** + * Gets the requested blobs. A batch request is used to perform this call. + * + *

Example of getting information on several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * List blobIds = new LinkedList<>();
+   * blobIds.add(BlobId.of(bucketName, blobName1));
+   * blobIds.add(BlobId.of(bucketName, blobName2));
+   * List blobs = storage.get(blobIds);
+   * }
+ * + * @param blobIds blobs to get + * @return an immutable list of {@code Blob} objects. If a blob does not exist or access to it has + * been denied the corresponding item in the list is {@code null}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List get(Iterable blobIds); + + /** + * Updates the requested blobs. A batch request is used to perform this call. The original + * properties are merged with the properties in the provided {@code BlobInfo} objects. Unsetting a + * property can be done by setting the property of the provided {@code BlobInfo} objects to {@code + * null}. See {@link #update(BlobInfo)} for a code example. + * + *

Example of updating information on several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * Blob firstBlob = storage.get(bucketName, blobName1);
+   * Blob secondBlob = storage.get(bucketName, blobName2);
+   * List updatedBlobs = storage.update(
+   *     firstBlob.toBuilder().setContentType("text/plain").build(),
+   *     secondBlob.toBuilder().setContentType("text/plain").build());
+   * }
+ * + * @param blobInfos blobs to update + * @return an immutable list of {@code Blob} objects. If a blob does not exist or access to it has + * been denied the corresponding item in the list is {@code null}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List update(BlobInfo... blobInfos); + + /** + * Updates the requested blobs. A batch request is used to perform this call. The original + * properties are merged with the properties in the provided {@code BlobInfo} objects. Unsetting a + * property can be done by setting the property of the provided {@code BlobInfo} objects to {@code + * null}. See {@link #update(BlobInfo)} for a code example. + * + *

Example of updating information on several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * Blob firstBlob = storage.get(bucketName, blobName1);
+   * Blob secondBlob = storage.get(bucketName, blobName2);
+   * List blobs = new LinkedList<>();
+   * blobs.add(firstBlob.toBuilder().setContentType("text/plain").build());
+   * blobs.add(secondBlob.toBuilder().setContentType("text/plain").build());
+   * List updatedBlobs = storage.update(blobs);
+   * }
+ * + * @param blobInfos blobs to update + * @return an immutable list of {@code Blob} objects. If a blob does not exist or access to it has + * been denied the corresponding item in the list is {@code null}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List update(Iterable blobInfos); + + /** + * Deletes the requested blobs. A batch request is used to perform this call. + * + *

Example of deleting several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * BlobId firstBlob = BlobId.of(bucketName, blobName1);
+   * BlobId secondBlob = BlobId.of(bucketName, blobName2);
+   * List deleted = storage.delete(firstBlob, secondBlob);
+   * }
+ * + * @param blobIds blobs to delete + * @return an immutable list of booleans. If a blob has been deleted the corresponding item in the + * list is {@code true}. If a blob was not found, deletion failed or access to the resource + * was denied the corresponding item is {@code false}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List delete(BlobId... blobIds); + + /** + * Deletes the requested blobs. A batch request is used to perform this call. + * + *

Example of deleting several blobs using a single batch request. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName1 = "my-blob-name1";
+   * String blobName2 = "my-blob-name2";
+   * List blobIds = new LinkedList<>();
+   * blobIds.add(BlobId.of(bucketName, blobName1));
+   * blobIds.add(BlobId.of(bucketName, blobName2));
+   * List deleted = storage.delete(blobIds);
+   * }
+ * + * @param blobIds blobs to delete + * @return an immutable list of booleans. If a blob has been deleted the corresponding item in the + * list is {@code true}. If a blob was not found, deletion failed or access to the resource + * was denied the corresponding item is {@code false}. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List delete(Iterable blobIds); + + /** + * Returns the ACL entry for the specified entity on the specified bucket or {@code null} if not + * found. + * + *

Example of getting the ACL entry for an entity on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.getAcl(bucketName, User.ofAllAuthenticatedUsers());
+   * }
+ * + *

Example of getting the ACL entry for a specific user on a requester_pays bucket with a + * user_project option. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String userEmail = "google-cloud-java-tests@java-docs-samples-tests.iam.gserviceaccount.com";
+   * BucketSourceOption userProjectOption = BucketSourceOption.userProject("myProject");
+   * Acl acl = storage.getAcl(bucketName, new User(userEmail), userProjectOption);
+   * }
+ * + *

Behavioral Differences between HTTP and gRPC

+ * + *
    + *
  1. Calling this method for a Bucket which has Uniform + * bucket-level access enabled exhibits different behavior Depending on which {@link + * Transport} is used. For JSON, an HTTP 400 Bad Request error will be thrown. Whereas for + * gRPC, an empty list will be returned. + *
+ * + * @param bucket name of the bucket where the getAcl operation takes place + * @param entity ACL entity to fetch + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl getAcl(String bucket, Entity entity, BucketSourceOption... options); + + /** + * @see #getAcl(String, Entity, BucketSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl getAcl(String bucket, Entity entity); + + /** + * Deletes the ACL entry for the specified entity on the specified bucket. + * + *

Example of deleting the ACL entry for an entity on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * boolean deleted = storage.deleteAcl(bucketName, User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + *

Example of deleting the ACL entry for a specific user on a requester_pays bucket with a + * user_project option. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * BucketSourceOption userProject = BucketSourceOption.userProject("myProject");
+   * boolean deleted = storage.deleteAcl(bucketName, User.ofAllAuthenticatedUsers(), userProject);
+   * }
+ * + * @param bucket name of the bucket to delete an ACL from + * @param entity ACL entity to delete + * @param options extra parameters to apply to this operation + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean deleteAcl(String bucket, Entity entity, BucketSourceOption... options); + + /** + * @see #deleteAcl(String, Entity, BucketSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean deleteAcl(String bucket, Entity entity); + + /** + * Creates a new ACL entry on the specified bucket. + * + *

Example of creating a new ACL entry on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.createAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.READER));
+   * }
+ * + *

Example of creating a new ACL entry on a requester_pays bucket with a user_project option. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.createAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.READER),
+   *     BucketSourceOption.userProject("myProject"));
+   * }
+ * + * @param bucket name of the bucket for which an ACL should be created + * @param acl ACL to create + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl createAcl(String bucket, Acl acl, BucketSourceOption... options); + + /** + * @see #createAcl(String, Acl, BucketSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl createAcl(String bucket, Acl acl); + + /** + * Updates an ACL entry on the specified bucket. + * + *

Example of updating a new ACL entry on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.updateAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER));
+   * }
+ * + *

Example of updating a new ACL entry on a requester_pays bucket with a user_project option. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.updateAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER),
+   *     BucketSourceOption.userProject("myProject"));
+   * }
+ * + * @param bucket name of the bucket where the updateAcl operation takes place + * @param acl ACL to update + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options); + + /** + * @see #updateAcl(String, Acl, BucketSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl updateAcl(String bucket, Acl acl); + + /** + * Lists the ACL entries for the provided bucket. + * + *

Example of listing the ACL entries for a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * List acls = storage.listAcls(bucketName);
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + *

Example of listing the ACL entries for a blob in a requester_pays bucket with a user_project + * option. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * List acls = storage.listAcls(bucketName, BucketSourceOption.userProject("myProject"));
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + *

Behavioral Differences between HTTP and gRPC

+ * + *
    + *
  1. Calling this method for a Bucket which has Uniform + * bucket-level access enabled exhibits different behavior Depending on which {@link + * Transport} is used. For JSON, an HTTP 400 Bad Request error will be thrown. Whereas for + * gRPC, an empty list will be returned. + *
+ * + * @param bucket the name of the bucket to list ACLs for + * @param options any number of BucketSourceOptions to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + List listAcls(String bucket, BucketSourceOption... options); + + /** + * @see #listAcls(String, BucketSourceOption...) + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + List listAcls(String bucket); + + /** + * Returns the default object ACL entry for the specified entity on the specified bucket or {@code + * null} if not found. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of getting the default ACL entry for an entity on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl = storage.getDefaultAcl(bucketName, User.ofAllAuthenticatedUsers());
+   * }
+ * + *

Behavioral Differences between HTTP and gRPC

+ * + *
    + *
  1. Calling this method for a Bucket which has Uniform + * bucket-level access enabled exhibits different behavior Depending on which {@link + * Transport} is used. For JSON, an HTTP 400 Bad Request error will be thrown. Whereas for + * gRPC, an empty list will be returned. + *
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl getDefaultAcl(String bucket, Entity entity); + + /** + * Deletes the default object ACL entry for the specified entity on the specified bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of deleting the default ACL entry for an entity on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * boolean deleted = storage.deleteDefaultAcl(bucketName, User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean deleteDefaultAcl(String bucket, Entity entity); + + /** + * Creates a new default blob ACL entry on the specified bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of creating a new default ACL entry on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl =
+   *     storage.createDefaultAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.READER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl createDefaultAcl(String bucket, Acl acl); + + /** + * Updates a default blob ACL entry on the specified bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of updating a new default ACL entry on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Acl acl =
+   *     storage.updateDefaultAcl(bucketName, Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl updateDefaultAcl(String bucket, Acl acl); + + /** + * Lists the default blob ACL entries for the provided bucket. + * + *

Default ACLs are applied to a new blob within the bucket when no ACL was provided for that + * blob. + * + *

Example of listing the default ACL entries for a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * List acls = storage.listDefaultAcls(bucketName);
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + *

Behavioral Differences between HTTP and gRPC

+ * + *
    + *
  1. Calling this method for a Bucket which has Uniform + * bucket-level access enabled exhibits different behavior Depending on which {@link + * Transport} is used. For JSON, an HTTP 400 Bad Request error will be thrown. Whereas for + * gRPC, an empty list will be returned. + *
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + List listDefaultAcls(String bucket); + + /** + * Returns the ACL entry for the specified entity on the specified blob or {@code null} if not + * found. + * + *

Example of getting the ACL entry for an entity on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * Acl acl = storage.getAcl(blobId, User.ofAllAuthenticatedUsers());
+   * }
+ * + *

Example of getting the ACL entry for a specific user on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * String userEmail = "google-cloud-java-tests@java-docs-samples-tests.iam.gserviceaccount.com";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * Acl acl = storage.getAcl(blobId, new User(userEmail));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl getAcl(BlobId blob, Entity entity); + + /** + * Deletes the ACL entry for the specified entity on the specified blob. + * + *

Example of deleting the ACL entry for an entity on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * boolean deleted = storage.deleteAcl(blobId, User.ofAllAuthenticatedUsers());
+   * if (deleted) {
+   *   // the acl entry was deleted
+   * } else {
+   *   // the acl entry was not found
+   * }
+   * }
+ * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + boolean deleteAcl(BlobId blob, Entity entity); + + /** + * Creates a new ACL entry on the specified blob. + * + *

Example of creating a new ACL entry on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * Acl acl = storage.createAcl(blobId, Acl.of(User.ofAllAuthenticatedUsers(), Role.READER));
+   * }
+ * + *

Example of updating a blob to be public-read. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * Acl acl = storage.createAcl(blobId, Acl.of(User.ofAllUsers(), Role.READER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl createAcl(BlobId blob, Acl acl); + + /** + * Updates an ACL entry on the specified blob. + * + *

Example of updating a new ACL entry on a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * Acl acl = storage.updateAcl(blobId, Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER));
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Acl updateAcl(BlobId blob, Acl acl); + + /** + * Lists the ACL entries for the provided blob. + * + *

Example of listing the ACL entries for a blob. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * long blobGeneration = 42;
+   * BlobId blobId = BlobId.of(bucketName, blobName, blobGeneration);
+   * List acls = storage.listAcls(blobId);
+   * for (Acl acl : acls) {
+   *   // do something with ACL entry
+   * }
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + List listAcls(BlobId blob); + + /** + * Creates a new HMAC Key for the provided service account, including the secret key. Note that + * the secret key is only returned upon creation via this method. + * + *

Example of creating a new HMAC Key. + * + *

{@code
+   * ServiceAccount serviceAccount = ServiceAccount.of("my-service-account@google.com");
+   *
+   * HmacKey hmacKey = storage.createHmacKey(serviceAccount);
+   *
+   * String secretKey = hmacKey.getSecretKey();
+   * HmacKey.HmacKeyMetadata metadata = hmacKey.getMetadata();
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + HmacKey createHmacKey(ServiceAccount serviceAccount, CreateHmacKeyOption... options); + + /** + * Lists HMAC keys for a given service account. Note this returns {@code HmacKeyMetadata} objects, + * which do not contain secret keys. + * + *

Example of listing HMAC keys, specifying project id. + * + *

{@code
+   * Page metadataPage = storage.listHmacKeys(
+   *     Storage.ListHmacKeysOption.projectId("my-project-id"));
+   * for (HmacKey.HmacKeyMetadata hmacKeyMetadata : metadataPage.getValues()) {
+   *     //do something with the metadata
+   * }
+   * }
+ * + *

Example of listing HMAC keys, specifying max results and showDeletedKeys. Since projectId is + * not specified, the same project ID as the storage client instance will be used + * + *

{@code
+   * ServiceAccount serviceAccount = ServiceAccount.of("my-service-account@google.com");
+   *
+   * Page metadataPage = storage.listHmacKeys(
+   *     Storage.ListHmacKeysOption.serviceAccount(serviceAccount),
+   *     Storage.ListHmacKeysOption.maxResults(10L),
+   *     Storage.ListHmacKeysOption.showDeletedKeys(true));
+   * for (HmacKey.HmacKeyMetadata hmacKeyMetadata : metadataPage.getValues()) {
+   *     //do something with the metadata
+   * }
+   * }
+ * + * @param options the options to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + Page listHmacKeys(ListHmacKeysOption... options); + + /** + * Gets an HMAC key given its access id. Note that this returns a {@code HmacKeyMetadata} object, + * which does not contain the secret key. + * + *

Example of getting an HMAC key. Since projectId isn't specified, the same project ID as the + * storage client instance will be used. + * + *

{@code
+   * String hmacKeyAccessId = "my-access-id";
+   * HmacKey.HmackeyMetadata hmacKeyMetadata = storage.getHmacKey(hmacKeyAccessId);
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + HmacKeyMetadata getHmacKey(String accessId, GetHmacKeyOption... options); + + /** + * Deletes an HMAC key. Note that only an {@code INACTIVE} key can be deleted. Attempting to + * delete a key whose {@code HmacKey.HmacKeyState} is anything other than {@code INACTIVE} will + * fail. + * + *

Example of updating an HMAC key's state to INACTIVE and then deleting it. + * + *

{@code
+   * String hmacKeyAccessId = "my-access-id";
+   * HmacKey.HmacKeyMetadata hmacKeyMetadata = storage.getHmacKey(hmacKeyAccessId);
+   *
+   * storage.updateHmacKeyState(hmacKeyMetadata, HmacKey.HmacKeyState.INACTIVE);
+   * storage.deleteHmacKey(hmacKeyMetadata);
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, DeleteHmacKeyOption... options); + + /** + * Updates the state of an HMAC key and returns the updated metadata. + * + *

Example of updating the state of an HMAC key. + * + *

{@code
+   * String hmacKeyAccessId = "my-access-id";
+   * HmacKey.HmacKeyMetadata hmacKeyMetadata = storage.getHmacKey(hmacKeyAccessId);
+   *
+   * storage.updateHmacKeyState(hmacKeyMetadata, HmacKey.HmacKeyState.INACTIVE);
+   * }
+ * + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + HmacKeyMetadata updateHmacKeyState( + final HmacKeyMetadata hmacKeyMetadata, + final HmacKey.HmacKeyState state, + UpdateHmacKeyOption... options); + + /** + * Gets the IAM policy for the provided bucket. + * + *

It's possible for bindings to be empty and instead have permissions inherited through + * Project or Organization IAM Policies. To prevent corrupting policies when you update an IAM + * policy with {@code Storage.setIamPolicy}, the ETAG value is used to perform optimistic + * concurrency. + * + *

Example of getting the IAM policy for a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Policy policy = storage.getIamPolicy(bucketName);
+   * }
+ * + * @param bucket name of the bucket where the getIamPolicy operation takes place + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Policy getIamPolicy(String bucket, BucketSourceOption... options); + + /** + * Updates the IAM policy on the specified bucket. + * + *

To prevent corrupting policies when you update an IAM policy with {@code + * Storage.setIamPolicy}, the ETAG value is used to perform optimistic concurrency. + * + *

Example of updating the IAM policy on a bucket. + * + *

{@code
+   * // We want to make all objects in our bucket publicly readable.
+   * String bucketName = "my-unique-bucket";
+   * Policy currentPolicy = storage.getIamPolicy(bucketName);
+   * Policy updatedPolicy =
+   *     storage.setIamPolicy(
+   *         bucketName,
+   *         currentPolicy.toBuilder()
+   *             .addIdentity(StorageRoles.objectViewer(), Identity.allUsers())
+   *             .build());
+   * }
+ * + * @param bucket name of the bucket where the setIamPolicy operation takes place + * @param policy policy to be set on the specified bucket + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options); + + /** + * Tests whether the caller holds the permissions on the specified bucket. Returns a list of + * booleans in the same placement and order in which the permissions were specified. + * + *

Example of testing permissions on a bucket. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * List response =
+   *     storage.testIamPermissions(
+   *         bucketName,
+   *         ImmutableList.of("storage.buckets.get", "storage.buckets.getIamPolicy"));
+   * for (boolean hasPermission : response) {
+   *   // Do something with permission test response
+   * }
+   * }
+ * + * @param bucket name of the bucket where the testIamPermissions operation takes place + * @param permissions list of permissions to test on the bucket + * @param options extra parameters to apply to this operation + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + List testIamPermissions( + String bucket, List permissions, BucketSourceOption... options); + + /** + * Returns the service account associated with the given project. + * + *

Example of getting a service account. + * + *

{@code
+   * String projectId = "test@gmail.com";
+   * ServiceAccount account = storage.getServiceAccount(projectId);
+   * }
+ * + * @param projectId the ID of the project for which the service account should be fetched. + * @return the service account associated with this project + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + ServiceAccount getServiceAccount(String projectId); + + /** + * Creates the notification for a given bucket. + * + *

Example of creating a notification: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String topic = "projects/myProject/topics/myTopic"
+   * NotificationInfo notificationInfo = NotificationInfo.newBuilder(topic)
+   *  .setCustomAttributes(ImmutableMap.of("label1", "value1"))
+   *  .setEventTypes(NotificationInfo.EventType.OBJECT_FINALIZE)
+   *  .setPayloadFormat(NotificationInfo.PayloadFormat.JSON_API_V1)
+   *  .build();
+   * Notification notification = storage.createNotification(bucketName, notificationInfo);
+   * }
+ * + * @param bucket name of the bucket + * @param notificationInfo notification to create + * @return the created notification + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + Notification createNotification(String bucket, NotificationInfo notificationInfo); + + /** + * Gets the notification with the specified id. + * + *

Example of getting the notification: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String notificationId = "my-unique-notification-id";
+   * Notification notification = storage.getNotification(bucketName, notificationId);
+   * }
+ * + * @param bucket name of the bucket + * @param notificationId notification ID + * @return the {@code Notification} object with the given id or {@code null} if not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + Notification getNotification(String bucket, String notificationId); + + /** + * Retrieves the list of notifications associated with the bucket. + * + *

Example of listing the bucket notifications: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * List notifications = storage.listNotifications(bucketName);
+   * }
+ * + * @param bucket name of the bucket + * @return a list of {@link Notification} objects added to the bucket. + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + List listNotifications(String bucket); + + /** + * Deletes the notification with the specified id. + * + *

Example of deleting the notification: + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String notificationId = "my-unique-notification-id";
+   * boolean deleted = storage.deleteNotification(bucketName, notificationId);
+   * if (deleted) {
+   *   // the notification was deleted
+   * } else {
+   *   // the notification was not found
+   * }
+   * }
+ * + * @param bucket name of the bucket + * @param notificationId ID of the notification to delete + * @return {@code true} if the notification has been deleted, {@code false} if not found + * @throws StorageException upon failure + */ + @TransportCompatibility({Transport.HTTP}) + boolean deleteNotification(String bucket, String notificationId); + + /** + * @throws InterruptedException thrown if interrupted while awaiting termination of underlying + * resources + */ + @Override + default void close() throws Exception {} + + /** + * Create a new {@link BlobWriteSession} for the specified {@code blobInfo} and {@code options}. + * + *

The returned {@code BlobWriteSession} can be used to write an individual version, a new + * session must be created each time you want to create a new version. + * + *

By default, any MD5 value in the provided {@code blobInfo} is ignored unless the option + * {@link BlobWriteOption#md5Match()} is included in {@code options}. + * + *

By default, any CRC32c value in the provided {@code blobInfo} is ignored unless the option + * {@link BlobWriteOption#crc32cMatch()} is included in {@code options}. + * + *

Example of creating an object using {@code BlobWriteSession}:

+ * + *
{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build();
+   * ReadableByteChannel readableByteChannel = ...;
+   * BlobWriteSession blobWriteSession = storage.blobWriteSession(blobInfo, BlobWriteOption.doesNotExist());
+   *
+   * // open the channel for writing
+   * try (WritableByteChannel writableByteChannel = blobWriteSession.open()) {
+   *   // copy all bytes
+   *   ByteStreams.copy(readableByteChannel, writableByteChannel);
+   * } catch (IOException e) {
+   *   // handle IOException
+   * }
+   *
+   * // get the resulting object metadata
+   * ApiFuture resultFuture = blobWriteSession.getResult();
+   * BlobInfo gen1 = resultFuture.get();
+   * }
+ * + * @param blobInfo blob to create + * @param options blob write options + * @since 2.26.0 This new api is in preview and is subject to breaking changes. + * @see BlobWriteSessionConfig + * @see BlobWriteSessionConfigs + * @see GrpcStorageOptions.Builder#setBlobWriteSessionConfig(BlobWriteSessionConfig) + */ + @BetaApi + @TransportCompatibility({Transport.GRPC, Transport.HTTP}) + default BlobWriteSession blobWriteSession(BlobInfo blobInfo, BlobWriteOption... options) { + return throwGrpcOnly(fmtMethodName("blobWriteSession", BlobInfo.class, BlobWriteOption.class)); + } + + /** + * Atomically move an object from one name to another. + * + *

This new method is an atomic equivalent of the existing {@link Storage#copy(CopyRequest)} + + * {@link Storage#delete(BlobId)}, however without the ability to change metadata fields for the + * target object. + * + * @since 2.48.0 + */ + @TransportCompatibility({Transport.HTTP, Transport.GRPC}) + Blob moveBlob(MoveBlobRequest request); + + /** + * Asynchronously set up a new {@link BlobReadSession} for the specified {@link BlobId} and {@code + * options}. + * + *

The resulting {@code BlobReadSession} can be used to read multiple times from a single + * object generation. A new session must be created for each object generation. + * + *

Example of using {@code BlobReadSession} to read up to 20 bytes from the object:

+ * + *
{@code
+   * ApiFuture futureBlobReadSession = storage.blobReadSession(blobId);
+   *
+   * try (BlobReadSession blobReadSession = futureBlobReadSession.get(10, TimeUnit.SECONDS)) {
+   *
+   *   ByteBuffer buf = ByteBuffer.allocate(30);
+   *   RangeSpec rangeSpec = RangeSpec.of(
+   *     10, // begin
+   *     20  // maxLength
+   *   );
+   *   ReadAsChannel readAsChannelConfig = ReadProjectionConfigs.asChannel()
+   *       .withRangeSpec(rangeSpec);
+   *   try (ScatteringByteChannel channel = blobReadSession.readAs(readAsChannelConfig)) {
+   *     channel.read(buf);
+   *   }
+   *
+   *   buf.flip();
+   *   System.out.printf(
+   *       Locale.US,
+   *       "Read %d bytes from range %s of object %s%n",
+   *       buf.remaining(),
+   *       rangeSpec,
+   *       blobReadSession.getBlobInfo().getBlobId().toGsUtilUriWithGeneration()
+   *   );
+   * }
+   * }
+ * + * @param id the blob to read from + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @TransportCompatibility({Transport.GRPC}) + default ApiFuture blobReadSession(BlobId id, BlobSourceOption... options) { + return throwGrpcOnly(fmtMethodName("blobReadSession", BlobId.class, BlobSourceOption.class)); + } + + /** + * Create a new {@link BlobAppendableUpload} for the specified {@code blobInfo} and {@code + * options}. + * + *

The returned {@code BlobWriteSession} can be used to write an individual version, a new + * session must be created each time you want to create a new version. + * + *

If your object exists, but is still in an appendable state ensure you provide the generation + * of the object in the provided {@code blobInfo} ({@link BlobInfo#getBlobId() + * blobInfo.getBlobId()}{@link BlobId#getGeneration() .getGeneration()}) to enable takeover. + * + *

Example of creating an object using {@code BlobAppendableUpload}:

+ * + *
{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blobInfo-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build();
+   * ReadableByteChannel readableByteChannel = ...;
+   *
+   * BlobAppendableUpload uploadSession = storage.blobAppendableUpload(
+   *     blobInfo,
+   *     BlobAppendableUploadConfig.of()
+   * );
+   * try (AppendableUploadWriteableByteChannel channel = uploadSession.open()) {
+   *   // copy all bytes
+   *   ByteStreams.copy(readableByteChannel, channel);
+   *   channel.finalizeAndClose();
+   * } catch (IOException ex) {
+   *   // handle IOException
+   * }
+   *
+   * // get the resulting object metadata
+   * ApiFuture resultFuture = uploadSession.getResult();
+   * BlobInfo gen1 = resultFuture.get();
+   * }
+ * + * @param blobInfo blobInfo to create + * @param uploadConfig the configuration parameters for the channel + * @param options blobInfo write options + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see StorageOptions#grpc() + */ + @BetaApi + @TransportCompatibility({Transport.GRPC}) + default BlobAppendableUpload blobAppendableUpload( + BlobInfo blobInfo, BlobAppendableUploadConfig uploadConfig, BlobWriteOption... options) { + return throwGrpcOnly( + fmtMethodName("appendableBlobUpload", BlobId.class, BlobWriteOption.class)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatch.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatch.java new file mode 100644 index 000000000000..9a1f125df534 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatch.java @@ -0,0 +1,217 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; + +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.spi.v1.RpcBatch; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; +import java.util.Map; + +/** + * A batch of operations to be submitted to Google Cloud Storage using a single RPC request. + * + *

Example of using a batch request to delete, update and get a blob: + * + *

{@code
+ * StorageBatch batch = storage.batch();
+ * BlobId firstBlob = BlobId.of("bucket", "blob1"));
+ * BlobId secondBlob = BlobId.of("bucket", "blob2"));
+ * batch.delete(firstBlob).notify(new BatchResult.Callback() {
+ *   public void success(Boolean result) {
+ *     // deleted successfully
+ *   }
+ *
+ *   public void error(StorageException exception) {
+ *     // delete failed
+ *   }
+ * });
+ * batch.update(BlobInfo.builder(secondBlob).contentType("text/plain").build());
+ * StorageBatchResult result = batch.get(secondBlob);
+ * batch.submit();
+ * Blob blob = result.get(); // returns get result or throws StorageException
+ * }
+ */ +@TransportCompatibility(Transport.HTTP) +public class StorageBatch { + + private final RpcBatch batch; + private final StorageRpc storageRpc; + private final StorageOptions options; + + StorageBatch(HttpStorageOptions options) { + this.options = options; + this.storageRpc = options.getStorageRpcV1(); + this.batch = storageRpc.createBatch(); + } + + @VisibleForTesting + Object getBatch() { + return batch; + } + + @VisibleForTesting + StorageRpc getStorageRpc() { + return storageRpc; + } + + @VisibleForTesting + StorageOptions getOptions() { + return options; + } + + /** + * Adds a request representing the "delete blob" operation to this batch. Calling {@link + * StorageBatchResult#get()} on the return value yields {@code true} upon successful deletion, + * {@code false} if the blob was not found, or throws a {@link StorageException} if the operation + * failed. + */ + @TransportCompatibility(Transport.HTTP) + public StorageBatchResult delete( + String bucket, String blob, BlobSourceOption... options) { + return delete(BlobId.of(bucket, blob), options); + } + + /** + * Adds a request representing the "delete blob" operation to this batch. Calling {@link + * StorageBatchResult#get()} on the return value yields {@code true} upon successful deletion, + * {@code false} if the blob was not found, or throws a {@link StorageException} if the operation + * failed. + */ + @TransportCompatibility(Transport.HTTP) + public StorageBatchResult delete(BlobId blob, BlobSourceOption... options) { + StorageBatchResult result = new StorageBatchResult<>(); + RpcBatch.Callback callback = createDeleteCallback(result); + Map optionsMap = Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + batch.addDelete(Conversions.json().blobId().encode(blob), callback, optionsMap); + return result; + } + + /** + * Adds a request representing the "update blob" operation to this batch. The {@code options} can + * be used in the same way as for {@link Storage#update(BlobInfo, BlobTargetOption...)}. Calling + * {@link StorageBatchResult#get()} on the return value yields the updated {@link Blob} if + * successful, or throws a {@link StorageException} if the operation failed. + */ + @TransportCompatibility(Transport.HTTP) + public StorageBatchResult update(BlobInfo blobInfo, BlobTargetOption... options) { + StorageBatchResult result = new StorageBatchResult<>(); + RpcBatch.Callback callback = createUpdateCallback(this.options, result); + Map optionMap = + Opts.unwrap(options).resolveFrom(blobInfo).getRpcOptions(); + batch.addPatch(Conversions.json().blobInfo().encode(blobInfo), callback, optionMap); + return result; + } + + /** + * Adds a request representing the "get blob" operation to this batch. The {@code options} can be + * used in the same way as for {@link Storage#get(BlobId, BlobGetOption...)}. Calling {@link + * StorageBatchResult#get()} on the return value yields the requested {@link Blob} if successful, + * {@code null} if no such blob exists, or throws a {@link StorageException} if the operation + * failed. + */ + @TransportCompatibility(Transport.HTTP) + public StorageBatchResult get(String bucket, String blob, BlobGetOption... options) { + return get(BlobId.of(bucket, blob), options); + } + + /** + * Adds a request representing the "get blob" operation to this batch. The {@code options} can be + * used in the same way as for {@link Storage#get(BlobId, BlobGetOption...)}. Calling {@link + * StorageBatchResult#get()} on the return value yields the requested {@link Blob} if successful, + * {@code null} if no such blob exists, or throws a {@link StorageException} if the operation + * failed. + */ + @TransportCompatibility(Transport.HTTP) + public StorageBatchResult get(BlobId blob, BlobGetOption... options) { + StorageBatchResult result = new StorageBatchResult<>(); + RpcBatch.Callback callback = createGetCallback(this.options, result); + Map optionsMap = Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + batch.addGet(Conversions.json().blobId().encode(blob), callback, optionsMap); + return result; + } + + /** Submits this batch for processing using a single RPC request. */ + @TransportCompatibility(Transport.HTTP) + public void submit() { + batch.submit(); + } + + private RpcBatch.Callback createDeleteCallback(final StorageBatchResult result) { + return new RpcBatch.Callback() { + @Override + public void onSuccess(Void response) { + result.success(true); + } + + @Override + public void onFailure(GoogleJsonError googleJsonError) { + StorageException serviceException = new StorageException(googleJsonError); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + result.success(false); + } else { + result.error(serviceException); + } + } + }; + } + + private RpcBatch.Callback createGetCallback( + final StorageOptions serviceOptions, final StorageBatchResult result) { + return new RpcBatch.Callback() { + @Override + public void onSuccess(StorageObject response) { + BlobInfo info = Conversions.json().blobInfo().decode(response); + result.success(response == null ? null : info.asBlob(serviceOptions.getService())); + } + + @Override + public void onFailure(GoogleJsonError googleJsonError) { + StorageException serviceException = new StorageException(googleJsonError); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + result.success(null); + } else { + result.error(serviceException); + } + } + }; + } + + private RpcBatch.Callback createUpdateCallback( + final StorageOptions serviceOptions, final StorageBatchResult result) { + return new RpcBatch.Callback() { + @Override + public void onSuccess(StorageObject response) { + BlobInfo info = Conversions.json().blobInfo().decode(response); + result.success(response == null ? null : info.asBlob(serviceOptions.getService())); + } + + @Override + public void onFailure(GoogleJsonError googleJsonError) { + result.error(new StorageException(googleJsonError)); + } + }; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatchResult.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatchResult.java new file mode 100644 index 000000000000..009b1965b587 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageBatchResult.java @@ -0,0 +1,35 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.BatchResult; + +/** This class holds a single result of a batch call to Cloud Storage. */ +public class StorageBatchResult extends BatchResult { + + StorageBatchResult() {} + + @Override + protected void error(StorageException error) { + super.error(error); + } + + @Override + protected void success(T result) { + super.success(result); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java new file mode 100644 index 000000000000..2e938e5e882a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java @@ -0,0 +1,474 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.util.concurrent.locks.ReentrantLock; + +final class StorageByteChannels { + + static Readable readable() { + return Readable.INSTANCE; + } + + static Writable writable() { + return Writable.INSTANCE; + } + + public static SeekableByteChannel seekable(SeekableByteChannel delegate) { + return new SynchronizedSeekableByteChannel(delegate); + } + + static final class Readable { + private static final Readable INSTANCE = new Readable(); + + private Readable() {} + + public BufferedReadableByteChannel createSynchronized(BufferedReadableByteChannel delegate) { + return new SynchronizedBufferedReadableByteChannel(delegate); + } + + public UnbufferedReadableByteChannel createSynchronized( + UnbufferedReadableByteChannel delegate) { + return new SynchronizedUnbufferedReadableByteChannel(delegate); + } + + public ScatteringByteChannel asScatteringByteChannel(ReadableByteChannel c) { + return new ScatteringByteChannelFacade(c); + } + } + + static final class Writable { + private static final Writable INSTANCE = new Writable(); + + private Writable() {} + + public BufferedWritableByteChannel createSynchronized(BufferedWritableByteChannel delegate) { + return new SynchronizedBufferedWritableByteChannel(delegate); + } + + public UnbufferedWritableByteChannel createSynchronized( + UnbufferedWritableByteChannel delegate) { + return new SynchronizedUnbufferedWritableByteChannel(delegate); + } + } + + private static final class SynchronizedBufferedReadableByteChannel + implements BufferedReadableByteChannel { + + private final BufferedReadableByteChannel delegate; + private final ReentrantLock lock; + + public SynchronizedBufferedReadableByteChannel(BufferedReadableByteChannel delegate) { + this.delegate = delegate; + this.lock = new ReentrantLock(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + lock.lock(); + try { + return delegate.read(dst); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + delegate.close(); + } finally { + lock.unlock(); + } + } + } + + private static final class SynchronizedBufferedWritableByteChannel + implements BufferedWritableByteChannel { + + private final BufferedWritableByteChannel delegate; + private final ReentrantLock lock; + + public SynchronizedBufferedWritableByteChannel(BufferedWritableByteChannel delegate) { + this.delegate = delegate; + this.lock = new ReentrantLock(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + lock.lock(); + try { + return delegate.write(src); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + delegate.close(); + } finally { + lock.unlock(); + } + } + + @Override + public void flush() throws IOException { + lock.lock(); + try { + delegate.flush(); + } finally { + lock.unlock(); + } + } + } + + private static final class SynchronizedUnbufferedReadableByteChannel + implements UnbufferedReadableByteChannel { + + private final UnbufferedReadableByteChannel delegate; + private final ReentrantLock lock; + + private SynchronizedUnbufferedReadableByteChannel(UnbufferedReadableByteChannel delegate) { + this.delegate = delegate; + this.lock = new ReentrantLock(); + } + + @Override + public int read(ByteBuffer src) throws IOException { + lock.lock(); + try { + return delegate.read(src); + } finally { + lock.unlock(); + } + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + lock.lock(); + try { + return delegate.read(dsts); + } finally { + lock.unlock(); + } + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + lock.lock(); + try { + return delegate.read(dsts, offset, length); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + delegate.close(); + } finally { + lock.unlock(); + } + } + } + + private static final class SynchronizedUnbufferedWritableByteChannel + implements UnbufferedWritableByteChannel { + + private final UnbufferedWritableByteChannel delegate; + private final ReentrantLock lock; + + private SynchronizedUnbufferedWritableByteChannel(UnbufferedWritableByteChannel delegate) { + this.delegate = delegate; + this.lock = new ReentrantLock(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + lock.lock(); + try { + return delegate.write(src); + } finally { + lock.unlock(); + } + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + lock.lock(); + try { + return delegate.write(srcs); + } finally { + lock.unlock(); + } + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + lock.lock(); + try { + return delegate.write(srcs, offset, length); + } finally { + lock.unlock(); + } + } + + @Override + public int writeAndClose(ByteBuffer src) throws IOException { + lock.lock(); + try { + return delegate.writeAndClose(src); + } finally { + lock.unlock(); + } + } + + @Override + public long writeAndClose(ByteBuffer[] srcs) throws IOException { + lock.lock(); + try { + return delegate.writeAndClose(srcs); + } finally { + lock.unlock(); + } + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + lock.lock(); + try { + return delegate.writeAndClose(srcs, offset, length); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + delegate.close(); + } finally { + lock.unlock(); + } + } + } + + private static final class ScatteringByteChannelFacade implements ScatteringByteChannel { + private final ReadableByteChannel c; + + private ScatteringByteChannelFacade(final ReadableByteChannel c) { + this.c = c; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return Math.toIntExact(read(new ByteBuffer[] {dst}, 0, 1)); + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (!c.isOpen()) { + throw new ClosedChannelException(); + } + + long totalBytesRead = 0; + for (int i = offset; i < length; i++) { + ByteBuffer dst = dsts[i]; + int goal = dst.remaining(); + if (dst.hasRemaining()) { + int read = c.read(dst); + if (read == -1) { + if (totalBytesRead == 0) { + c.close(); + return -1; + } else { + break; + } + } else if (read != goal) { + // if we weren't able to fill up the current buffer with this last read, return so we + // don't block and wait for another read call. + return totalBytesRead + read; + } + totalBytesRead += read; + } + } + return totalBytesRead; + } + + @Override + public boolean isOpen() { + return c.isOpen(); + } + + @Override + public void close() throws IOException { + c.close(); + } + } + + private static final class SynchronizedSeekableByteChannel implements SeekableByteChannel { + private final SeekableByteChannel delegate; + private final ReentrantLock lock; + + private SynchronizedSeekableByteChannel(SeekableByteChannel delegate) { + this.delegate = delegate; + this.lock = new ReentrantLock(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + lock.lock(); + try { + return delegate.read(dst); + } finally { + lock.unlock(); + } + } + + @Override + public int write(ByteBuffer src) throws IOException { + lock.lock(); + try { + return delegate.write(src); + } finally { + lock.unlock(); + } + } + + @Override + public long position() throws IOException { + lock.lock(); + try { + return delegate.position(); + } finally { + lock.unlock(); + } + } + + @Override + public SeekableByteChannel position(long newPosition) throws IOException { + lock.lock(); + try { + return delegate.position(newPosition); + } finally { + lock.unlock(); + } + } + + @Override + public long size() throws IOException { + lock.lock(); + try { + return delegate.size(); + } finally { + lock.unlock(); + } + } + + @Override + public SeekableByteChannel truncate(long size) throws IOException { + lock.lock(); + try { + return delegate.truncate(size); + } finally { + lock.unlock(); + } + } + + @Override + public boolean isOpen() { + lock.lock(); + try { + return delegate.isOpen(); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + lock.lock(); + try { + delegate.close(); + } finally { + lock.unlock(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java new file mode 100644 index 000000000000..d72059104573 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java @@ -0,0 +1,79 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; + +/** + * Set of utility methods for working with non-blocking channels returned by this library. + * + * @since 2.56.0 + */ +public final class StorageChannelUtils { + + private StorageChannelUtils() {} + + /** + * Attempt to fill {@code buf} from {@code c}, blocking the invoking thread if necessary in order + * to do so. + * + *

This method will not close {@code c}. + * + * @return The number of bytes read, possibly zero, or {@code -1} if the channel has reached + * end-of-stream + * @throws IOException any IOException from calling {@link ReadableByteChannel#read(ByteBuffer)} + * @since 2.56.0 + */ + public static int blockingFillFrom(ByteBuffer buf, ReadableByteChannel c) throws IOException { + int total = 0; + while (buf.hasRemaining()) { + int read = c.read(buf); + if (read != -1) { + total += read; + } else if (total == 0) { + return -1; + } else { + break; + } + } + return total; + } + + /** + * Attempt to empty {@code buf} to {@code c}, blocking the invoking thread if necessary in order + * to do so. + * + *

This method will not close {@code c} + * + * @return The number of bytes written, possibly zero + * @throws IOException any IOException from calling {@link WritableByteChannel#write(ByteBuffer)} + * @since 2.56.0 + */ + public static int blockingEmptyTo(ByteBuffer buf, WritableByteChannel c) throws IOException { + int total = 0; + while (buf.hasRemaining()) { + int written = c.write(buf); + if (written != 0) { + total += written; + } + } + return total; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageClass.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageClass.java new file mode 100644 index 000000000000..07efcbf8424e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageClass.java @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.api.core.ApiFunction; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; + +/** + * Enums for the storage classes. See https://cloud.google.com/storage/docs/storage-classes + * for details. + */ +public final class StorageClass extends StringEnumValue { + private static final long serialVersionUID = -1077862391496082625L; + + private StorageClass(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = + new ApiFunction() { + @Override + public StorageClass apply(String constant) { + return new StorageClass(constant); + } + }; + + private static final StringEnumType type = + new StringEnumType(StorageClass.class, CONSTRUCTOR); + + /** + * Standard storage class. + * + * @see https://cloud.google.com/storage/docs/storage-classes#standard + */ + public static final StorageClass STANDARD = type.createAndRegister("STANDARD"); + + /** + * Nearline storage class. + * + * @see https://cloud.google.com/storage/docs/storage-classes#nearline + */ + public static final StorageClass NEARLINE = type.createAndRegister("NEARLINE"); + + /** + * Coldline storage class. + * + * @see https://cloud.google.com/storage/docs/storage-classes#coldline + */ + public static final StorageClass COLDLINE = type.createAndRegister("COLDLINE"); + + /** + * Archive storage class. + * + * @see https://cloud.google.com/storage/docs/storage-classes#archive + */ + public static final StorageClass ARCHIVE = type.createAndRegister("ARCHIVE"); + + /** + * Legacy Regional storage class, use {@link #STANDARD} instead. This class will be deprecated in + * the future. + * + * @see https://cloud.google.com/storage/docs/storage-classes#legacy + */ + public static final StorageClass REGIONAL = type.createAndRegister("REGIONAL"); + + /** + * Legacy Multi-regional storage class, use {@link #STANDARD} instead. This class will be + * deprecated in the future. + * + * @see https://cloud.google.com/storage/docs/storage-classes#legacy + */ + public static final StorageClass MULTI_REGIONAL = type.createAndRegister("MULTI_REGIONAL"); + + /** + * Legacy Durable Reduced Availability storage class, use {@link #STANDARD} instead. This class + * will be deprecated in the future. + * + * @see https://cloud.google.com/storage/docs/storage-classes#legacy + */ + public static final StorageClass DURABLE_REDUCED_AVAILABILITY = + type.createAndRegister("DURABLE_REDUCED_AVAILABILITY"); + + /** + * Get the StorageClass for the given String constant, and throw an exception if the constant is + * not recognized. + */ + public static StorageClass valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** Get the StorageClass for the given String constant, and allow unrecognized values. */ + public static StorageClass valueOf(String constant) { + return type.valueOf(constant); + } + + /** Return the known values for StorageClass. */ + public static StorageClass[] values() { + return type.values(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java new file mode 100644 index 000000000000..43fd503a3679 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java @@ -0,0 +1,199 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.GrpcUtils.ZeroCopyBidiStreamingCallable; +import com.google.cloud.storage.ObjectReadSessionState.OpenArguments; +import com.google.cloud.storage.ReadProjectionConfig.ProjectionType; +import com.google.cloud.storage.RetryContext.RetryContextProvider; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +@InternalApi +final class StorageDataClient implements AutoCloseable { + + final ScheduledExecutorService executor; + private final Duration terminationAwaitDuration; + private final ZeroCopyBidiStreamingCallable + bidiReadObject; + final RetryContextProvider retryContextProvider; + private final IOAutoCloseable onClose; + + private StorageDataClient( + ScheduledExecutorService executor, + Duration terminationAwaitDuration, + ZeroCopyBidiStreamingCallable bidiReadObject, + RetryContextProvider retryContextProvider, + IOAutoCloseable onClose) { + this.executor = executor; + this.terminationAwaitDuration = terminationAwaitDuration; + this.bidiReadObject = bidiReadObject; + this.retryContextProvider = retryContextProvider; + this.onClose = onClose; + } + + ApiFuture readSession(BidiReadObjectRequest req, GrpcCallContext ctx) { + checkArgument( + req.getReadRangesList().isEmpty(), + "ranges included in the initial request are not supported"); + ObjectReadSessionState state = new ObjectReadSessionState(ctx, req); + + ZeroCopyBidiStreamingCallable callable = + getCallable(); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(executor, callable, state, retryContextProvider.create()); + + ApiFuture objectReadSessionFuture = + ApiFutures.transform( + stream, + nowOpen -> + new ObjectReadSessionImpl(executor, callable, stream, state, retryContextProvider), + executor); + stream.send(req); + return objectReadSessionFuture; + } + + ApiFuture> fastOpenReadSession( + BidiReadObjectRequest openRequest, + GrpcCallContext ctx, + ReadProjectionConfig config) { + checkArgument( + openRequest.getReadRangesList().isEmpty(), + "ranges included in the initial request are not supported"); + checkArgument( + config.getType() == ProjectionType.STREAM_READ, + "unsupported ReadProjectionConfig: %s", + config.getClass().getName()); + ObjectReadSessionState state = new ObjectReadSessionState(ctx, openRequest); + + ZeroCopyBidiStreamingCallable callable = + getCallable(); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(executor, callable, state, retryContextProvider.create()); + + long readId = state.newReadId(); + ObjectReadSessionStreamRead read = + config.cast().newRead(readId, retryContextProvider.create()); + state.putOutstandingRead(readId, read); + + ApiFuture> objectReadSessionFuture = + ApiFutures.transform( + stream, + nowOpen -> + new FastOpenObjectReadSession<>( + new ObjectReadSessionImpl( + executor, callable, stream, state, retryContextProvider), + read, + stream), + executor); + OpenArguments openArguments = state.getOpenArguments(); + BidiReadObjectRequest req = openArguments.getReq(); + stream.send(req); + read.setOnCloseCallback(stream); + return objectReadSessionFuture; + } + + @SuppressWarnings("ResultOfMethodCallIgnored") + @Override + public void close() throws Exception { + try (IOAutoCloseable ignore = onClose) { + // today, we own the executor service. If StorageDataClient is ever standalone, this code will + // need to be re-evaluated. Especially if a customer is able to provide the executor. + executor.shutdownNow(); + executor.awaitTermination(terminationAwaitDuration.toNanos(), TimeUnit.NANOSECONDS); + } + } + + private ZeroCopyBidiStreamingCallable + getCallable() { + return bidiReadObject.withDefaultCallContext(Retrying.newCallContext()); + } + + static StorageDataClient create( + ScheduledExecutorService executor, + Duration terminationAwaitDuration, + ZeroCopyBidiStreamingCallable read, + RetryContextProvider retryContextProvider, + IOAutoCloseable onClose) { + return new StorageDataClient( + executor, terminationAwaitDuration, read, retryContextProvider, onClose); + } + + @FunctionalInterface + interface Borrowable { + void borrow(); + } + + static final class FastOpenObjectReadSession implements IOAutoCloseable { + private final ObjectReadSession session; + private final ObjectReadSessionStreamRead read; + private final Borrowable borrowable; + private boolean sessionLeased; + + private FastOpenObjectReadSession( + ObjectReadSession session, + ObjectReadSessionStreamRead read, + Borrowable borrowable) { + this.session = session; + this.read = read; + this.borrowable = borrowable; + this.sessionLeased = false; + } + + ObjectReadSession getSession() { + if (!sessionLeased) { + sessionLeased = true; + borrowable.borrow(); + } + return session; + } + + ObjectReadSessionStreamRead getRead() { + return read; + } + + Projection getProjection() { + return read.project(); + } + + @Override + public void close() throws IOException { + //noinspection EmptyTryBlock + try (IOAutoCloseable ignore1 = session; + IOAutoCloseable ignore2 = read) { + // use try-with to ensure full cleanup + } + } + + public static FastOpenObjectReadSession of( + ObjectReadSession session, + ObjectReadSessionStreamRead read, + Borrowable borrowable) { + return new FastOpenObjectReadSession<>(session, read, borrowable); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java new file mode 100644 index 000000000000..6bd3bfc60068 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java @@ -0,0 +1,292 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.BaseServiceException; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.http.BaseHttpServiceException; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.TextFormat; +import io.grpc.StatusException; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Storage service exception. + * + * @see Google Cloud + * Storage error codes + */ +@InternalApi +public final class StorageException extends BaseHttpServiceException { + private static final String INTERNAL_ERROR = "internalError"; + private static final String CONNECTION_CLOSED_PREMATURELY = "connectionClosedPrematurely"; + + // see: https://cloud.google.com/storage/docs/resumable-uploads-xml#practices + static final Set RETRYABLE_ERRORS = + ImmutableSet.of( + new Error(504, null), + new Error(503, null), + new Error(502, null), + new Error(500, null), + new Error(429, null), + new Error(408, null), + new Error(null, INTERNAL_ERROR), + new Error(null, CONNECTION_CLOSED_PREMATURELY)); + + private static final long serialVersionUID = 757915549325467990L; + + final ApiException apiExceptionCause; + + public StorageException(int code, String message) { + this(code, message, null); + } + + public StorageException(int code, String message, Throwable cause) { + super(code, message, null, true, RETRYABLE_ERRORS, cause); + this.apiExceptionCause = asApiExceptionOrNull(cause); + } + + public StorageException(int code, String message, String reason, Throwable cause) { + super(code, message, reason, true, RETRYABLE_ERRORS, cause); + this.apiExceptionCause = asApiExceptionOrNull(cause); + } + + public StorageException(IOException exception) { + super(exception, true, RETRYABLE_ERRORS); + this.apiExceptionCause = null; + } + + public StorageException(GoogleJsonError error) { + super(error, true, RETRYABLE_ERRORS); + this.apiExceptionCause = null; + } + + /** + * Translate RetryHelperException to the StorageException that caused the error. This method will + * always throw an exception. + * + * @throws StorageException when {@code ex} was caused by a {@code StorageException} + */ + public static StorageException translateAndThrow(RetryHelperException ex) { + BaseServiceException.translate(ex); + throw getStorageException(ex); + } + + private static StorageException getStorageException(Throwable t) { + // unwrap a RetryHelperException if that is what is being translated + if (t instanceof RetryHelperException) { + Throwable cause = t.getCause(); + return new StorageException(UNKNOWN_CODE, cause != null ? cause.getMessage() : "", cause); + } + return new StorageException(UNKNOWN_CODE, t.getMessage(), t); + } + + /** + * Attempt to find an Exception which is a {@link BaseServiceException} If neither {@code t} or + * {@code t.getCause()} are a {@code BaseServiceException} a {@link StorageException} will be + * created with an unknown status code. + */ + static BaseServiceException coalesce(Throwable t) { + if (t instanceof BaseServiceException) { + return (BaseServiceException) t; + } + if (t.getCause() instanceof BaseServiceException) { + return (BaseServiceException) t.getCause(); + } + if (t instanceof ApiException) { + return asStorageException((ApiException) t); + } + if (t.getCause() instanceof ApiException) { + return asStorageException((ApiException) t.getCause()); + } + return getStorageException(t); + } + + static ApiFuture coalesceAsync(ApiFuture originalFuture) { + return ApiFutures.catchingAsync( + originalFuture, + Throwable.class, + throwable -> ApiFutures.immediateFailedFuture(coalesce(throwable)), + MoreExecutors.directExecutor()); + } + + static StorageException asStorageException(ApiException apiEx) { + // https://cloud.google.com/storage/docs/json_api/v1/status-codes + // https://cloud.google.com/apis/design/errors#http_mapping + int httpStatusCode = 0; + StatusCode statusCode = apiEx.getStatusCode(); + if (statusCode instanceof GrpcStatusCode) { + GrpcStatusCode gsc = (GrpcStatusCode) statusCode; + httpStatusCode = + GrpcToHttpStatusCodeTranslation.grpcCodeToHttpStatusCode(gsc.getTransportCode()); + } + // If there is a gRPC exception in our cause change pull it's error message up to be our + // message otherwise, create a generic error message with the status code. + String message = null; + if (apiEx.getCause() != null) { + Throwable cause = apiEx.getCause(); + if (cause instanceof StatusRuntimeException || cause instanceof StatusException) { + message = cause.getMessage(); + } + // if not a grpc exception fall through to the default handling + } + if (message == null && apiEx.getMessage() != null) { + message = apiEx.getMessage(); + } + if (message == null) { + String statusCodeName = statusCode.getCode().name(); + message = "Error: " + statusCodeName; + } + + // https://cloud.google.com/apis/design/errors#error_payloads + attachErrorDetails(apiEx); + + // It'd be better to use ExceptionData and BaseServiceException#(ExceptionData) but, + // BaseHttpServiceException does not pass that through so we're stuck using this for now. + // TODO: When we can break the coupling to BaseHttpServiceException replace this + return new StorageException(httpStatusCode, message, apiEx.getReason(), apiEx); + } + + private static void attachErrorDetails(ApiException ae) { + if (ae != null && ae.getErrorDetails() != null && !errorDetailsAttached(ae)) { + StringBuilder sb = new StringBuilder(); + ErrorDetails ed = ae.getErrorDetails(); + sb.append("ErrorDetails {\n"); + Stream.of( + ed.getErrorInfo(), + ed.getDebugInfo(), + ed.getQuotaFailure(), + ed.getPreconditionFailure(), + ed.getBadRequest(), + ed.getHelp()) + .filter(Objects::nonNull) + .forEach( + msg -> + sb.append("\t\t") + .append(msg.getClass().getSimpleName()) + .append(": { ") + .append(TextFormat.printer().shortDebugString(msg)) + .append(" }\n")); + sb.append("\t}"); + + ae.addSuppressed(new ApiExceptionErrorDetailsComment(sb.toString())); + } + } + + private static boolean errorDetailsAttached(ApiException ae) { + Throwable[] suppressed = ae.getSuppressed(); + for (Throwable throwable : suppressed) { + if (throwable instanceof ApiExceptionErrorDetailsComment) { + return true; + } + } + return false; + } + + /** + * Translate IOException to a StorageException representing the cause of the error. This method + * defaults to idempotent always being {@code true}. Additionally, this method translates + * transient issues Connection Closed Prematurely as a retryable error. + * + * @return {@code StorageException} + */ + public static StorageException translate(IOException exception) { + String message = exception.getMessage(); + if (message != null + && (message.contains("Connection closed prematurely") + || message.contains("Premature EOF"))) { + return new StorageException(0, message, CONNECTION_CLOSED_PREMATURELY, exception); + } else { + // default + return new StorageException(exception); + } + } + + static T wrapIOException(IOExceptionCallable c) { + try { + return c.call(); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + } + + static void wrapIOException(IOExceptionRunnable r) { + try { + r.run(); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + } + + static T wrapFutureGet(ApiFuture f) { + try { + return ApiExceptions.callAndTranslateApiException(f); + } catch (Exception e) { + throw StorageException.coalesce(e); + } + } + + @Nullable + private static ApiException asApiExceptionOrNull(Throwable cause) { + if (cause instanceof ApiException) { + return (ApiException) cause; + } else { + return null; + } + } + + @FunctionalInterface + interface IOExceptionCallable { + T call() throws IOException; + } + + @FunctionalInterface + interface IOExceptionRunnable { + void run() throws IOException; + } + + static Runnable liftToRunnable(IOExceptionRunnable ioer) { + return () -> { + try { + ioer.run(); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + }; + } + + private static final class ApiExceptionErrorDetailsComment extends Throwable { + private ApiExceptionErrorDetailsComment(String message) { + super(message, null, true, false); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageFactory.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageFactory.java new file mode 100644 index 000000000000..7bc5414ec2f1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.ServiceFactory; + +/** An interface for Storage factories. */ +public interface StorageFactory extends ServiceFactory {} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java new file mode 100644 index 000000000000..ebc4cbe5d736 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java @@ -0,0 +1,1830 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.SignedUrlEncodingHelper.Rfc3986UriEncode; +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.concurrent.Executors.callable; + +import com.google.api.client.util.Data; +import com.google.api.core.ApiFuture; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.StorageObject; +import com.google.auth.ServiceAccountSigner; +import com.google.cloud.BaseService; +import com.google.cloud.BatchResult; +import com.google.cloud.PageImpl; +import com.google.cloud.PageImpl.NextPageFetcher; +import com.google.cloud.Policy; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.BlobWriteSessionConfig.WriterFactory; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.PostPolicyV4.ConditionV4Type; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.PostPolicyV4.PostPolicyV4Document; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.cloud.storage.UnifiedOpts.NestedNamedField; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteRequest; +import com.google.common.base.CharMatcher; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.io.BaseEncoding; +import com.google.common.io.CountingOutputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLEncoder; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TimeZone; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class StorageImpl extends BaseService implements Storage, StorageInternal { + + private static final byte[] EMPTY_BYTE_ARRAY = {}; + private static final String EMPTY_BYTE_ARRAY_MD5 = "1B2M2Y8AsgTpgAmY7PhCfg=="; + private static final String EMPTY_BYTE_ARRAY_CRC32C = "AAAAAA=="; + private static final String PATH_DELIMITER = "/"; + + private final String STORAGE_XML_URI_SCHEME; + private final String STORAGE_XML_URI_HOST_NAME; + + private static final int DEFAULT_BUFFER_SIZE = 15 * 1024 * 1024; + private static final int MIN_BUFFER_SIZE = 256 * 1024; + + private static final JsonConversions codecs = Conversions.json(); + + final HttpRetryAlgorithmManager retryAlgorithmManager; + final StorageRpc storageRpc; + final WriterFactory writerFactory; + final Retrier retrier; + + StorageImpl(HttpStorageOptions options, WriterFactory writerFactory, Retrier retrier) { + super(options); + this.retryAlgorithmManager = options.getRetryAlgorithmManager(); + this.storageRpc = options.getStorageRpcV1(); + this.writerFactory = writerFactory; + try { + String resolvedApiaryHost = options.getResolvedApiaryHost("storage"); + URI uri = new URI(resolvedApiaryHost); + STORAGE_XML_URI_HOST_NAME = uri.getHost(); + STORAGE_XML_URI_SCHEME = firstNonNull(uri.getScheme(), "https"); + } catch (URISyntaxException e) { + throw StorageException.coalesce(e); + } + this.retrier = retrier; + } + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + final com.google.api.services.storage.model.Bucket bucketPb = + codecs.bucketInfo().encode(bucketInfo); + final Map optionsMap = + Opts.unwrap(options).resolveFrom(bucketInfo).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsCreate(bucketPb, optionsMap); + return run( + algorithm, + () -> storageRpc.create(bucketPb, optionsMap), + (b) -> Conversions.json().bucketInfo().decode(b).asBucket(this)); + } + + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + return internalDirectUpload(blobInfo, opts, Buffers.allocate(0)).asBlob(this); + } + + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + content = firstNonNull(content, EMPTY_BYTE_ARRAY); + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + return internalDirectUpload(blobInfo, opts, ByteBuffer.wrap(content)).asBlob(this); + } + + @Override + public Blob create( + BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + return internalDirectUpload(blobInfo, opts, ByteBuffer.wrap(content, offset, length)) + .asBlob(this); + } + + @Override + @Deprecated + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = blobInfo.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + StorageObject blobPb = codecs.blobInfo().encode(updated); + InputStream inputStreamParam = + firstNonNull(content, new ByteArrayInputStream(EMPTY_BYTE_ARRAY)); + // retries are not safe when the input is an InputStream, so we can't retry. + BlobInfo info = + Conversions.json() + .blobInfo() + .decode(storageRpc.create(blobPb, inputStreamParam, optionsMap)); + return info.asBlob(this); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, BlobWriteOption... options) + throws IOException { + return createFrom(blobInfo, path, DEFAULT_BUFFER_SIZE, options); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, int bufferSize, BlobWriteOption... options) + throws IOException { + if (Files.isDirectory(path)) { + throw new StorageException(0, path + " is a directory"); + } + long size = Files.size(path); + if (size == 0L) { + return create(blobInfo, null, options); + } + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + final Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = blobInfo.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + StorageObject encode = codecs.blobInfo().encode(updated); + + Supplier uploadIdSupplier = + ResumableMedia.startUploadForBlobInfo( + getOptions(), + updated, + optionsMap, + retrier.withAlg(retryAlgorithmManager.getForResumableUploadSessionCreate(optionsMap))); + JsonResumableWrite jsonResumableWrite = + JsonResumableWrite.of( + encode, + optionsMap, + uploadIdSupplier.get(), + 0, + opts.getHasher(), + opts.getHasher().initialValue()); + + JsonResumableSession session = + ResumableSession.json( + HttpClientContext.from(storageRpc), + retrier.withAlg(retryAlgorithmManager.idempotent()), + jsonResumableWrite); + HttpContentRange contentRange = HttpContentRange.of(ByteRangeSpec.explicit(0L, size), size); + ResumableOperationResult put = + session.put(RewindableContent.of(path), contentRange); + // all exception translation is taken care of down in the JsonResumableSession + StorageObject object = put.getObject(); + if (object == null) { + // if by some odd chance the put didn't get the StorageObject, query for it + ResumableOperationResult<@Nullable StorageObject> query = session.query(); + object = query.getObject(); + } + return codecs.blobInfo().decode(object).asBlob(this); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) + throws IOException { + return createFrom(blobInfo, content, DEFAULT_BUFFER_SIZE, options); + } + + @Override + public Blob createFrom( + BlobInfo blobInfo, InputStream content, int bufferSize, BlobWriteOption... options) + throws IOException { + + ApiFuture objectFuture; + try (StorageWriteChannel writer = writer(blobInfo, options)) { + objectFuture = writer.getObject(); + uploadHelper(Channels.newChannel(content), writer, bufferSize); + } + // keep these two try blocks separate for the time being + // leaving the above will cause the writer to close writing and finalizing the session and + // (hopefully, on successful finalization) resolve our future + try { + BlobInfo info = objectFuture.get(10, TimeUnit.SECONDS); + return info.asBlob(this); + } catch (ExecutionException | InterruptedException | TimeoutException e) { + throw StorageException.coalesce(e); + } + } + + /* + * Uploads the given content to the storage using specified write channel and the given buffer + * size. This method does not close any channels. + */ + private static void uploadHelper(ReadableByteChannel reader, WriteChannel writer, int bufferSize) + throws IOException { + bufferSize = Math.max(bufferSize, MIN_BUFFER_SIZE); + ByteBuffer buffer = ByteBuffer.allocate(bufferSize); + writer.setChunkSize(bufferSize); + + while (reader.read(buffer) >= 0) { + buffer.flip(); + writer.write(buffer); + buffer.clear(); + } + } + + @Override + public Bucket get(String bucket, BucketGetOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + return internalBucketGet(bucket, optionsMap); + } + + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + return get(BlobId.of(bucket, blob), options); + } + + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + ImmutableMap optionsMap = + Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + return internalGetBlob(blob, optionsMap); + } + + @Override + public Blob get(BlobId blob) { + return get(blob, new BlobGetOption[0]); + } + + @Override + public Blob restore(BlobId blob, BlobRestoreOption... options) { + ImmutableMap optionsMap = + Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + + StorageObject obj = codecs.blobId().encode(blob); + + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectsRestore(obj, optionsMap); + + return run( + algorithm, + () -> storageRpc.restore(obj, optionsMap), + (x) -> { + BlobInfo info = Conversions.json().blobInfo().decode(x); + return info.asBlob(this); + }); + } + + private static class BucketPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 8534413447247364038L; + private final Map requestOptions; + private final HttpStorageOptions serviceOptions; + private final Retrier retrier; + + BucketPageFetcher( + HttpStorageOptions serviceOptions, + String cursor, + Map optionMap, + Retrier retrier) { + this.requestOptions = + PageImpl.nextRequestOptions(StorageRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.retrier = retrier; + } + + @Override + public Page getNextPage() { + return listBuckets(serviceOptions, requestOptions, retrier); + } + } + + private static class BlobPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -4308415167471093443L; + private final Map requestOptions; + private final HttpStorageOptions serviceOptions; + private final String bucket; + private final Retrier retrier; + + BlobPageFetcher( + String bucket, + HttpStorageOptions serviceOptions, + String cursor, + Map optionMap, + Retrier retrier) { + this.retrier = retrier; + this.requestOptions = + PageImpl.nextRequestOptions(StorageRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.bucket = bucket; + } + + @Override + public Page getNextPage() { + return listBlobs(bucket, serviceOptions, requestOptions, retrier); + } + } + + private static class HmacKeyMetadataPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -8637392485924772927L; + private final HttpStorageOptions serviceOptions; + private final HttpRetryAlgorithmManager retryAlgorithmManager; + private final Map options; + private final Retrier retrier; + + HmacKeyMetadataPageFetcher( + HttpStorageOptions serviceOptions, + HttpRetryAlgorithmManager retryAlgorithmManager, + Map options, + Retrier retrier) { + this.serviceOptions = serviceOptions; + this.retryAlgorithmManager = retryAlgorithmManager; + this.options = options; + this.retrier = retrier; + } + + @Override + public Page getNextPage() { + return listHmacKeys(serviceOptions, retryAlgorithmManager, options, retrier); + } + } + + @Override + public Page list(BucketListOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + return listBuckets(getOptions(), optionsMap, retrier); + } + + @Override + public Page list(final String bucket, BlobListOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + return listBlobs(bucket, getOptions(), optionsMap, retrier); + } + + private static Page listBuckets( + final HttpStorageOptions serviceOptions, + final Map optionsMap, + Retrier retrier) { + ResultRetryAlgorithm algorithm = + serviceOptions.getRetryAlgorithmManager().getForBucketsList(optionsMap); + return retrier.run( + algorithm, + () -> serviceOptions.getStorageRpcV1().list(optionsMap), + (result) -> { + String cursor = result.x(); + Iterable buckets = + result.y() == null + ? ImmutableList.of() + : Iterables.transform( + result.y(), + bucketPb -> + Conversions.json() + .bucketInfo() + .decode(bucketPb) + .asBucket(serviceOptions.getService())); + return new PageImpl<>( + new BucketPageFetcher(serviceOptions, cursor, optionsMap, retrier), cursor, buckets); + }); + } + + private static Page listBlobs( + final String bucket, + final HttpStorageOptions serviceOptions, + final Map optionsMap, + Retrier retrier) { + ResultRetryAlgorithm algorithm = + serviceOptions.getRetryAlgorithmManager().getForObjectsList(bucket, optionsMap); + return retrier.run( + algorithm, + () -> serviceOptions.getStorageRpcV1().list(bucket, optionsMap), + (result) -> { + String cursor = result.x(); + Iterable blobs = + result.y() == null + ? ImmutableList.of() + : Iterables.transform( + result.y(), + storageObject -> { + BlobInfo info = Conversions.json().blobInfo().decode(storageObject); + return info.asBlob(serviceOptions.getService()); + }); + return new PageImpl<>( + new BlobPageFetcher(bucket, serviceOptions, cursor, optionsMap, retrier), + cursor, + blobs); + }); + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + Map optionsMap = + Opts.unwrap(options).resolveFrom(bucketInfo).getRpcOptions(); + ImmutableSet modifiedFields = bucketInfo.getModifiedFields(); + if (modifiedFields.isEmpty()) { + return internalBucketGet(bucketInfo.getName(), optionsMap); + } else { + com.google.api.services.storage.model.Bucket tmp = codecs.bucketInfo().encode(bucketInfo); + com.google.api.services.storage.model.Bucket bucketPb = + new com.google.api.services.storage.model.Bucket(); + Stream.concat(modifiedFields.stream(), BucketField.REQUIRED_FIELDS.stream()) + .map( + f -> { + if (f instanceof NestedNamedField) { + return ((NestedNamedField) f).getParent(); + } else { + return f; + } + }) + .forEach( + field -> { + String jsonName = field.getApiaryName(); + if (tmp.containsKey(jsonName)) { + bucketPb.put(jsonName, tmp.get(jsonName)); + } else { + BucketField lookup = BucketField.lookup(field); + if (lookup != null) { + bucketPb.put(jsonName, Data.nullOf(lookup.getJsonClass())); + } + } + }); + + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsUpdate(bucketPb, optionsMap); + return run( + algorithm, + () -> storageRpc.patch(bucketPb, optionsMap), + (x) -> Conversions.json().bucketInfo().decode(x).asBucket(this)); + } + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + Map optionsMap = opts.getRpcOptions(); + ImmutableSet modifiedFields = blobInfo.getModifiedFields(); + boolean unmodifiedBeforeOpts = modifiedFields.isEmpty(); + BlobInfo.Builder builder = blobInfo.toBuilder(); + + // This is a workaround until everything is in prod for both json and grpc. + // We need to make sure that the retention field is only included in the + // request if it was modified, so that we don't send a null object in a + // grpc or json request. + // todo: b/308194853 + if (modifiedFields.contains(BlobField.RETENTION)) { + builder.setRetention(blobInfo.getRetention()); + } + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + boolean unmodifiedAfterOpts = updated.getModifiedFields().isEmpty(); + if (unmodifiedBeforeOpts && unmodifiedAfterOpts) { + return internalGetBlob(blobInfo.getBlobId(), optionsMap); + } else { + StorageObject tmp = codecs.blobInfo().encode(updated); + StorageObject pb = new StorageObject(); + ImmutableSet fields = + Stream.of( + modifiedFields.stream(), + BlobField.REQUIRED_FIELDS.stream(), + Stream.of(BlobField.GENERATION)) + .flatMap(s -> s) + .collect(ImmutableSet.toImmutableSet()); + + Map> fieldsByRoot = new HashMap<>(); + { + for (NamedField f : fields) { + Set fieldSet = + fieldsByRoot.computeIfAbsent(NamedField.root(f), v -> new HashSet<>()); + fieldSet.add(f); + } + } + + fieldsByRoot.forEach( + (topLevelField, subFields) -> { + // only do the deep diffing for select fields, most fields simply use their top level + // name and don't have to worry about nesting. + // The following ifs are the same shape, but, they can not be collapsed. The iteration + // is per top-level field, and if you attempt to do the other at the same time you will + // potentially override its values. + if (topLevelField == BlobField.OBJECT_CONTEXTS) { + // our field names are from the root of the storage object, create a temporary + // instance that only contains the contexts + StorageObject storageObject = new StorageObject(); + storageObject.setContexts(tmp.getContexts()); + StorageObject outputJson = + JsonUtils.getOutputJsonWithSelectedFields(storageObject, subFields); + pb.setContexts(outputJson.getContexts()); + } else if (topLevelField == BlobField.METADATA) { + // our field names are from the root of the storage object, create a temporary + // instance that only contains the metadata + StorageObject storageObject = new StorageObject(); + storageObject.setMetadata(tmp.getMetadata()); + StorageObject outputJson = + JsonUtils.getOutputJsonWithSelectedFields(storageObject, subFields); + pb.setMetadata(outputJson.getMetadata()); + } else { + checkState(subFields.size() <= 1, "unexpected nested field(s) %s", subFields); + String jsonName = topLevelField.getApiaryName(); + if (tmp.containsKey(jsonName)) { + pb.put(jsonName, tmp.get(jsonName)); + } else { + BlobField lookup = BlobField.lookup(topLevelField); + if (lookup != null) { + pb.put(jsonName, Data.nullOf(lookup.getJsonClass())); + } + } + } + }); + + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectsUpdate(pb, optionsMap); + return run( + algorithm, + () -> storageRpc.patch(pb, optionsMap), + (x) -> { + BlobInfo info = Conversions.json().blobInfo().decode(x); + return info.asBlob(this); + }); + } + } + + @Override + public Blob update(BlobInfo blobInfo) { + return update(blobInfo, new BlobTargetOption[0]); + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + final com.google.api.services.storage.model.Bucket bucketPb = + codecs.bucketInfo().encode(BucketInfo.of(bucket)); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsDelete(bucketPb, optionsMap); + return run(algorithm, () -> storageRpc.delete(bucketPb, optionsMap), Decoder.identity()); + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + return delete(BlobId.of(bucket, blob), options); + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + final StorageObject storageObject = codecs.blobId().encode(blob); + ImmutableMap optionsMap = + Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsDelete(storageObject, optionsMap); + return run(algorithm, () -> storageRpc.delete(storageObject, optionsMap), Decoder.identity()); + } + + @Override + public boolean delete(BlobId blob) { + return delete(blob, new BlobSourceOption[0]); + } + + @Override + public Blob compose(final ComposeRequest composeRequest) { + final List sources = + Lists.newArrayListWithCapacity(composeRequest.getSourceBlobs().size()); + BlobInfo target = composeRequest.getTarget(); + for (ComposeRequest.SourceBlob sourceBlob : composeRequest.getSourceBlobs()) { + sources.add( + codecs + .blobInfo() + .encode( + BlobInfo.newBuilder( + BlobId.of( + target.getBucket(), sourceBlob.getName(), sourceBlob.getGeneration())) + .build())); + } + Opts targetOpts = composeRequest.getTargetOpts(); + StorageObject targetPb = codecs.blobInfo().encode(composeRequest.getTarget()); + Map targetOptions = targetOpts.getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsCompose(sources, targetPb, targetOptions); + return run( + algorithm, + () -> storageRpc.compose(sources, targetPb, targetOptions), + (x) -> { + BlobInfo info = Conversions.json().blobInfo().decode(x); + return info.asBlob(this); + }); + } + + @Override + public CopyWriter copy(final CopyRequest copyRequest) { + BlobId source = copyRequest.getSource(); + BlobInfo target = copyRequest.getTarget(); + Opts sourceOpts = + Opts.unwrap(copyRequest.getSourceOptions()).resolveFrom(source).projectAsSource(); + Opts targetOpts = + Opts.unwrap(copyRequest.getTargetOptions()).resolveFrom(target); + + StorageObject sourcePb = codecs.blobId().encode(source); + StorageObject targetPb = codecs.blobInfo().encode(target); + ImmutableMap sourceOptions = sourceOpts.getRpcOptions(); + ImmutableMap targetOptions = targetOpts.getRpcOptions(); + RewriteRequest rewriteRequest = + new RewriteRequest( + sourcePb, + sourceOptions, + copyRequest.overrideInfo(), + targetPb, + targetOptions, + copyRequest.getMegabytesCopiedPerChunk()); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectsRewrite(rewriteRequest); + return run( + algorithm, + () -> storageRpc.openRewrite(rewriteRequest), + (r) -> new HttpCopyWriter(getOptions(), r, retrier)); + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + return readAllBytes(BlobId.of(bucket, blob), options); + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + final StorageObject storageObject = codecs.blobId().encode(blob); + Opts unwrap = Opts.unwrap(options); + Opts resolve = unwrap.resolveFrom(blob); + ImmutableMap optionsMap = resolve.getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsGet(storageObject, optionsMap); + return run(algorithm, () -> storageRpc.load(storageObject, optionsMap), Decoder.identity()); + } + + @Override + public StorageBatch batch() { + return new StorageBatch(this.getOptions()); + } + + @Override + public StorageReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + return reader(BlobId.of(bucket, blob), options); + } + + @Override + public StorageReadChannel reader(BlobId blob, BlobSourceOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blob); + StorageObject storageObject = Conversions.json().blobId().encode(blob); + ImmutableMap optionsMap = opts.getRpcOptions(); + return new BlobReadChannelV2(storageObject, optionsMap, BlobReadChannelContext.from(this)); + } + + @Override + public void downloadTo(BlobId blob, Path path, BlobSourceOption... options) { + try (OutputStream outputStream = Files.newOutputStream(path)) { + downloadTo(blob, outputStream, options); + } catch (IOException e) { + throw new StorageException(e); + } + } + + @Override + public void downloadTo(BlobId blob, OutputStream outputStream, BlobSourceOption... options) { + final CountingOutputStream countingOutputStream = new CountingOutputStream(outputStream); + final StorageObject pb = codecs.blobId().encode(blob); + ImmutableMap optionsMap = + Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectsGet(pb, optionsMap); + run( + algorithm, + callable( + () -> { + storageRpc.read( + pb, optionsMap, countingOutputStream.getCount(), countingOutputStream); + }), + Decoder.identity()); + } + + @Override + public StorageWriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + final Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = blobInfo.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + + StorageObject encode = codecs.blobInfo().encode(updated); + // open the resumable session outside the write channel + // the exception behavior of open is different from #write(ByteBuffer) + Supplier uploadIdSupplier = + ResumableMedia.startUploadForBlobInfo( + getOptions(), + updated, + optionsMap, + retrier.withAlg(retryAlgorithmManager.getForResumableUploadSessionCreate(optionsMap))); + JsonResumableWrite jsonResumableWrite = + JsonResumableWrite.of( + encode, + optionsMap, + uploadIdSupplier.get(), + 0, + opts.getHasher(), + opts.getHasher().initialValue()); + return new BlobWriteChannelV2(BlobReadChannelContext.from(this), jsonResumableWrite); + } + + @Override + public StorageWriteChannel writer(URL signedURL) { + // TODO: is it possible to know if a signed url is configured to have a constraint which makes + // it idempotent? + ResultRetryAlgorithm forResumableUploadSessionCreate = + retryAlgorithmManager.getForResumableUploadSessionCreate(Collections.emptyMap()); + // open the resumable session outside the write channel + // the exception behavior of open is different from #write(ByteBuffer) + String signedUrlString = signedURL.toString(); + Supplier uploadIdSupplier = + ResumableMedia.startUploadForSignedUrl( + getOptions(), signedURL, retrier.withAlg(forResumableUploadSessionCreate)); + JsonResumableWrite jsonResumableWrite = + JsonResumableWrite.of(signedUrlString, uploadIdSupplier.get(), 0); + return new BlobWriteChannelV2(BlobReadChannelContext.from(this), jsonResumableWrite); + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + EnumMap optionMap = Maps.newEnumMap(SignUrlOption.Option.class); + for (SignUrlOption option : options) { + optionMap.put(option.getOption(), option.getValue()); + } + + boolean isV2 = + getPreferredSignatureVersion(optionMap).equals(SignUrlOption.SignatureVersion.V2); + boolean isV4 = + getPreferredSignatureVersion(optionMap).equals(SignUrlOption.SignatureVersion.V4); + + ServiceAccountSigner credentials = + (ServiceAccountSigner) optionMap.get(SignUrlOption.Option.SERVICE_ACCOUNT_CRED); + if (credentials == null) { + checkState( + this.getOptions().getCredentials() instanceof ServiceAccountSigner, + "Signing key was not provided and could not be derived"); + credentials = (ServiceAccountSigner) this.getOptions().getCredentials(); + } + + long expiration = + isV4 + ? TimeUnit.SECONDS.convert(unit.toMillis(duration), TimeUnit.MILLISECONDS) + : TimeUnit.SECONDS.convert( + getOptions().getClock().millisTime() + unit.toMillis(duration), + TimeUnit.MILLISECONDS); + + checkArgument( + !(optionMap.containsKey(SignUrlOption.Option.VIRTUAL_HOSTED_STYLE) + && optionMap.containsKey(SignUrlOption.Option.PATH_STYLE) + && optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME)), + "Only one of VIRTUAL_HOSTED_STYLE, PATH_STYLE, or BUCKET_BOUND_HOST_NAME SignUrlOptions can" + + " be specified."); + + String bucketName = slashlessBucketNameFromBlobInfo(blobInfo); + String escapedBlobName = ""; + if (!Strings.isNullOrEmpty(blobInfo.getName())) { + escapedBlobName = Rfc3986UriEncode(blobInfo.getName(), false); + } + + boolean usePathStyle = shouldUsePathStyleForSignedUrl(optionMap); + + String storageXmlHostName = + usePathStyle + ? STORAGE_XML_URI_SCHEME + "://" + getBaseStorageHostName(optionMap) + : STORAGE_XML_URI_SCHEME + "://" + bucketName + "." + getBaseStorageHostName(optionMap); + + if (optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME)) { + storageXmlHostName = (String) optionMap.get(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME); + } + + String stPath = + usePathStyle + ? constructResourceUriPath(bucketName, escapedBlobName, optionMap) + : constructResourceUriPath("", escapedBlobName, optionMap); + + URI path = URI.create(stPath); + // For V2 signing, even if we don't specify the bucket in the URI path, we still need the + // canonical resource string that we'll sign to include the bucket. + URI pathForSigning = + isV2 ? URI.create(constructResourceUriPath(bucketName, escapedBlobName, optionMap)) : path; + + try { + SignatureInfo signatureInfo = + buildSignatureInfo( + optionMap, blobInfo, expiration, pathForSigning, credentials.getAccount()); + String unsignedPayload = signatureInfo.constructUnsignedPayload(); + byte[] signatureBytes = credentials.sign(unsignedPayload.getBytes(UTF_8)); + StringBuilder stBuilder = new StringBuilder(); + stBuilder.append(storageXmlHostName).append(path); + + if (isV4) { + BaseEncoding encoding = BaseEncoding.base16().lowerCase(); + String signature = URLEncoder.encode(encoding.encode(signatureBytes), UTF_8.name()); + String v4QueryString = signatureInfo.constructV4QueryString(); + + stBuilder.append('?'); + if (!Strings.isNullOrEmpty(v4QueryString)) { + stBuilder.append(v4QueryString).append('&'); + } + stBuilder.append("X-Goog-Signature=").append(signature); + } else { + BaseEncoding encoding = BaseEncoding.base64(); + String signature = URLEncoder.encode(encoding.encode(signatureBytes), UTF_8.name()); + String v2QueryString = signatureInfo.constructV2QueryString(); + + stBuilder.append('?'); + if (!Strings.isNullOrEmpty(v2QueryString)) { + stBuilder.append(v2QueryString).append('&'); + } + stBuilder.append("GoogleAccessId=").append(credentials.getAccount()); + stBuilder.append("&Expires=").append(expiration); + stBuilder.append("&Signature=").append(signature); + } + + return new URL(stBuilder.toString()); + + } catch (MalformedURLException | UnsupportedEncodingException ex) { + throw new IllegalStateException(ex); + } + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + EnumMap optionMap = Maps.newEnumMap(SignUrlOption.Option.class); + // Convert to a map of SignUrlOptions so we can re-use some utility methods + for (PostPolicyV4Option option : options) { + optionMap.put(SignUrlOption.Option.valueOf(option.getOption().name()), option.getValue()); + } + + optionMap.put(SignUrlOption.Option.SIGNATURE_VERSION, SignUrlOption.SignatureVersion.V4); + + ServiceAccountSigner credentials = + (ServiceAccountSigner) optionMap.get(SignUrlOption.Option.SERVICE_ACCOUNT_CRED); + if (credentials == null) { + checkState( + this.getOptions().getCredentials() instanceof ServiceAccountSigner, + "Signing key was not provided and could not be derived"); + credentials = (ServiceAccountSigner) this.getOptions().getCredentials(); + } + + checkArgument( + !(optionMap.containsKey(SignUrlOption.Option.VIRTUAL_HOSTED_STYLE) + && optionMap.containsKey(SignUrlOption.Option.PATH_STYLE) + && optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME)), + "Only one of VIRTUAL_HOSTED_STYLE, PATH_STYLE, or BUCKET_BOUND_HOST_NAME SignUrlOptions can" + + " be specified."); + + String bucketName = slashlessBucketNameFromBlobInfo(blobInfo); + + boolean usePathStyle = shouldUsePathStyleForSignedUrl(optionMap); + + String url; + + if (usePathStyle) { + url = STORAGE_XML_URI_SCHEME + "://" + STORAGE_XML_URI_HOST_NAME + "/" + bucketName + "/"; + } else { + url = STORAGE_XML_URI_SCHEME + "://" + bucketName + "." + STORAGE_XML_URI_HOST_NAME + "/"; + } + + if (optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME)) { + url = optionMap.get(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME) + "/"; + } + + SimpleDateFormat googDateFormat = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); + SimpleDateFormat yearMonthDayFormat = new SimpleDateFormat("yyyyMMdd"); + SimpleDateFormat expirationFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + googDateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + yearMonthDayFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + expirationFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + + long timestamp = getOptions().getClock().millisTime(); + String date = googDateFormat.format(timestamp); + String signingCredential = + credentials.getAccount() + + "/" + + yearMonthDayFormat.format(timestamp) + + "/auto/storage/goog4_request"; + + Map policyFields = new HashMap<>(); + + PostConditionsV4.Builder conditionsBuilder = conditions.toBuilder(); + + for (Map.Entry entry : fields.getFieldsMap().entrySet()) { + // Every field needs a corresponding policy condition, so add them if they're missing + conditionsBuilder.addCustomCondition( + ConditionV4Type.MATCHES, entry.getKey(), entry.getValue()); + + policyFields.put(entry.getKey(), entry.getValue()); + } + + PostConditionsV4 v4Conditions = + conditionsBuilder + .addBucketCondition(ConditionV4Type.MATCHES, blobInfo.getBucket()) + .addKeyCondition(ConditionV4Type.MATCHES, blobInfo.getName()) + .addCustomCondition(ConditionV4Type.MATCHES, "x-goog-date", date) + .addCustomCondition(ConditionV4Type.MATCHES, "x-goog-credential", signingCredential) + .addCustomCondition(ConditionV4Type.MATCHES, "x-goog-algorithm", "GOOG4-RSA-SHA256") + .build(); + PostPolicyV4Document document = + PostPolicyV4Document.of( + expirationFormat.format(timestamp + unit.toMillis(duration)), v4Conditions); + String policy = BaseEncoding.base64().encode(document.toJson().getBytes()); + String signature = + BaseEncoding.base16().encode(credentials.sign(policy.getBytes())).toLowerCase(); + + for (PostPolicyV4.ConditionV4 condition : v4Conditions.getConditions()) { + if (condition.type == ConditionV4Type.MATCHES) { + policyFields.put(condition.operand1, condition.operand2); + } + } + policyFields.put("key", blobInfo.getName()); + policyFields.put("x-goog-credential", signingCredential); + policyFields.put("x-goog-algorithm", "GOOG4-RSA-SHA256"); + policyFields.put("x-goog-date", date); + policyFields.put("x-goog-signature", signature); + policyFields.put("policy", policy); + + policyFields.remove("bucket"); + + return PostPolicyV4.of(url, policyFields); + } + + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostPolicyV4Option... options) { + return generateSignedPostPolicyV4( + blobInfo, duration, unit, fields, PostConditionsV4.newBuilder().build(), options); + } + + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + return generateSignedPostPolicyV4( + blobInfo, duration, unit, PostFieldsV4.newBuilder().build(), conditions, options); + } + + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, long duration, TimeUnit unit, PostPolicyV4Option... options) { + return generateSignedPostPolicyV4( + blobInfo, duration, unit, PostFieldsV4.newBuilder().build(), options); + } + + private String constructResourceUriPath( + String slashlessBucketName, + String escapedBlobName, + EnumMap optionMap) { + if (Strings.isNullOrEmpty(slashlessBucketName)) { + if (Strings.isNullOrEmpty(escapedBlobName)) { + return PATH_DELIMITER; + } + if (escapedBlobName.startsWith(PATH_DELIMITER)) { + return escapedBlobName; + } + return PATH_DELIMITER + escapedBlobName; + } + + StringBuilder pathBuilder = new StringBuilder(); + pathBuilder.append(PATH_DELIMITER).append(slashlessBucketName); + if (Strings.isNullOrEmpty(escapedBlobName)) { + boolean isV2 = + getPreferredSignatureVersion(optionMap).equals(SignUrlOption.SignatureVersion.V2); + // If using virtual-hosted style URLs with V2 signing, the path string for a bucket resource + // must end with a forward slash. + if (optionMap.containsKey(SignUrlOption.Option.VIRTUAL_HOSTED_STYLE) && isV2) { + pathBuilder.append(PATH_DELIMITER); + } + return pathBuilder.toString(); + } + pathBuilder.append(PATH_DELIMITER); + pathBuilder.append(escapedBlobName); + return pathBuilder.toString(); + } + + private SignUrlOption.SignatureVersion getPreferredSignatureVersion( + EnumMap optionMap) { + // Check for an explicitly specified version in the map. + for (SignUrlOption.SignatureVersion version : SignUrlOption.SignatureVersion.values()) { + if (version.equals(optionMap.get(SignUrlOption.Option.SIGNATURE_VERSION))) { + return version; + } + } + // TODO(#6362): V2 is the default, and thus can be specified either explicitly or implicitly + // Change this to V4 once we make it the default. + return SignUrlOption.SignatureVersion.V2; + } + + private boolean shouldUsePathStyleForSignedUrl(EnumMap optionMap) { + // TODO(#6362): If we decide to change the default style used to generate URLs, switch this + // logic to return false unless PATH_STYLE was explicitly specified. + if (optionMap.containsKey(SignUrlOption.Option.VIRTUAL_HOSTED_STYLE) + || optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME)) { + return false; + } + return true; + } + + /** + * Builds signature info. + * + * @param optionMap the option map + * @param blobInfo the blob info + * @param expiration the expiration in seconds + * @param path the resource URI + * @param accountEmail the account email + * @return signature info + */ + private SignatureInfo buildSignatureInfo( + Map optionMap, + BlobInfo blobInfo, + long expiration, + URI path, + String accountEmail) { + + HttpMethod httpVerb = + optionMap.containsKey(SignUrlOption.Option.HTTP_METHOD) + ? (HttpMethod) optionMap.get(SignUrlOption.Option.HTTP_METHOD) + : HttpMethod.GET; + + SignatureInfo.Builder signatureInfoBuilder = + new SignatureInfo.Builder(httpVerb, expiration, path); + + if (firstNonNull((Boolean) optionMap.get(SignUrlOption.Option.MD5), false)) { + checkArgument(blobInfo.getMd5() != null, "Blob is missing a value for md5"); + signatureInfoBuilder.setContentMd5(blobInfo.getMd5()); + } + + if (firstNonNull((Boolean) optionMap.get(SignUrlOption.Option.CONTENT_TYPE), false)) { + checkArgument(blobInfo.getContentType() != null, "Blob is missing a value for content-type"); + signatureInfoBuilder.setContentType(blobInfo.getContentType()); + } + + signatureInfoBuilder.setSignatureVersion( + (SignUrlOption.SignatureVersion) optionMap.get(SignUrlOption.Option.SIGNATURE_VERSION)); + + signatureInfoBuilder.setAccountEmail(accountEmail); + + signatureInfoBuilder.setTimestamp(getOptions().getClock().millisTime()); + + ImmutableMap.Builder extHeadersBuilder = new ImmutableMap.Builder<>(); + + boolean isV4 = + SignUrlOption.SignatureVersion.V4.equals( + optionMap.get(SignUrlOption.Option.SIGNATURE_VERSION)); + if (isV4) { // We don't sign the host header for V2 signed URLs; only do this for V4. + // Add the host here first, allowing it to be overridden in the EXT_HEADERS option below. + if (optionMap.containsKey(SignUrlOption.Option.VIRTUAL_HOSTED_STYLE)) { + extHeadersBuilder.put( + "host", + slashlessBucketNameFromBlobInfo(blobInfo) + "." + getBaseStorageHostName(optionMap)); + } else if (optionMap.containsKey(SignUrlOption.Option.HOST_NAME) + || optionMap.containsKey(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME) + || getOptions().getUniverseDomain() != null) { + extHeadersBuilder.put("host", getBaseStorageHostName(optionMap)); + } + } + + if (optionMap.containsKey(SignUrlOption.Option.EXT_HEADERS)) { + extHeadersBuilder.putAll( + (Map) optionMap.get(SignUrlOption.Option.EXT_HEADERS)); + } + + ImmutableMap.Builder queryParamsBuilder = new ImmutableMap.Builder<>(); + if (optionMap.containsKey(SignUrlOption.Option.QUERY_PARAMS)) { + queryParamsBuilder.putAll( + (Map) optionMap.get(SignUrlOption.Option.QUERY_PARAMS)); + } + + return signatureInfoBuilder + .setCanonicalizedExtensionHeaders(extHeadersBuilder.build()) + .setCanonicalizedQueryParams(queryParamsBuilder.build()) + .build(); + } + + private String slashlessBucketNameFromBlobInfo(BlobInfo blobInfo) { + // The bucket name itself should never contain a forward slash. However, parts already existed + // in the code to check for this, so we remove the forward slashes to be safe here. + return CharMatcher.anyOf(PATH_DELIMITER).trimFrom(blobInfo.getBucket()); + } + + /** Returns the hostname used to send requests to Cloud Storage, e.g. "storage.googleapis.com". */ + private String getBaseStorageHostName(Map optionMap) { + String specifiedBaseHostName = (String) optionMap.get(SignUrlOption.Option.HOST_NAME); + String bucketBoundHostName = + (String) optionMap.get(SignUrlOption.Option.BUCKET_BOUND_HOST_NAME); + if (!Strings.isNullOrEmpty(specifiedBaseHostName)) { + return specifiedBaseHostName.replaceFirst("http(s)?://", ""); + } + if (!Strings.isNullOrEmpty(bucketBoundHostName)) { + return bucketBoundHostName.replaceFirst("http(s)?://", ""); + } + return STORAGE_XML_URI_HOST_NAME; + } + + @Override + public List get(BlobId... blobIds) { + return get(Arrays.asList(blobIds)); + } + + @Override + public List get(Iterable blobIds) { + StorageBatch batch = batch(); + final List results = Lists.newArrayList(); + for (BlobId blob : blobIds) { + batch + .get(blob) + .notify( + new BatchResult.Callback() { + @Override + public void success(Blob result) { + results.add(result); + } + + @Override + public void error(StorageException exception) { + results.add(null); + } + }); + } + batch.submit(); + return Collections.unmodifiableList(results); + } + + @Override + public List update(BlobInfo... blobInfos) { + return update(Arrays.asList(blobInfos)); + } + + @Override + public List update(Iterable blobInfos) { + StorageBatch batch = batch(); + final List results = Lists.newArrayList(); + for (BlobInfo blobInfo : blobInfos) { + batch + .update(blobInfo) + .notify( + new BatchResult.Callback() { + @Override + public void success(Blob result) { + results.add(result); + } + + @Override + public void error(StorageException exception) { + results.add(null); + } + }); + } + batch.submit(); + return Collections.unmodifiableList(results); + } + + @Override + public List delete(BlobId... blobIds) { + return delete(Arrays.asList(blobIds)); + } + + @Override + public List delete(Iterable blobIds) { + StorageBatch batch = batch(); + final List results = Lists.newArrayList(); + for (BlobId blob : blobIds) { + batch + .delete(blob) + .notify( + new BatchResult.Callback() { + @Override + public void success(Boolean result) { + results.add(result); + } + + @Override + public void error(StorageException exception) { + results.add(Boolean.FALSE); + } + }); + } + batch.submit(); + return Collections.unmodifiableList(results); + } + + @Override + public Acl getAcl(final String bucket, final Entity entity, BucketSourceOption... options) { + String pb = codecs.entity().encode(entity); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForBucketAclGet(pb, optionsMap); + return run(algorithm, () -> storageRpc.getAcl(bucket, pb, optionsMap), codecs.bucketAcl()); + } + + @Override + public Acl getAcl(final String bucket, final Entity entity) { + return getAcl(bucket, entity, new BucketSourceOption[0]); + } + + @Override + public boolean deleteAcl( + final String bucket, final Entity entity, BucketSourceOption... options) { + final String pb = codecs.entity().encode(entity); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForBucketAclDelete(pb, optionsMap); + return run(algorithm, () -> storageRpc.deleteAcl(bucket, pb, optionsMap), Decoder.identity()); + } + + @Override + public boolean deleteAcl(final String bucket, final Entity entity) { + return deleteAcl(bucket, entity, new BucketSourceOption[0]); + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + final BucketAccessControl aclPb = codecs.bucketAcl().encode(acl).setBucket(bucket); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketAclCreate(aclPb, optionsMap); + return run(algorithm, () -> storageRpc.createAcl(aclPb, optionsMap), codecs.bucketAcl()); + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + return createAcl(bucket, acl, new BucketSourceOption[0]); + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + final BucketAccessControl aclPb = codecs.bucketAcl().encode(acl).setBucket(bucket); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketAclUpdate(aclPb, optionsMap); + return run(algorithm, () -> storageRpc.patchAcl(aclPb, optionsMap), codecs.bucketAcl()); + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + return updateAcl(bucket, acl, new BucketSourceOption[0]); + } + + @Override + public List listAcls(final String bucket, BucketSourceOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketAclList(bucket, optionsMap); + return run( + algorithm, + () -> storageRpc.listAcls(bucket, optionsMap), + (answer) -> + answer.stream() + .map( + (com.google.common.base.Function) + codecs.bucketAcl()::decode) + .collect(ImmutableList.toImmutableList())); + } + + @Override + public List listAcls(final String bucket) { + return listAcls(bucket, new BucketSourceOption[0]); + } + + @Override + public Acl getDefaultAcl(final String bucket, final Entity entity) { + String pb = codecs.entity().encode(entity); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForDefaultObjectAclGet(pb); + return run(algorithm, () -> storageRpc.getDefaultAcl(bucket, pb), codecs.objectAcl()); + } + + @Override + public boolean deleteDefaultAcl(final String bucket, final Entity entity) { + String pb = codecs.entity().encode(entity); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForDefaultObjectAclDelete(pb); + return run(algorithm, () -> storageRpc.deleteDefaultAcl(bucket, pb), Decoder.identity()); + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + final ObjectAccessControl aclPb = codecs.objectAcl().encode(acl).setBucket(bucket); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForDefaultObjectAclCreate(aclPb); + return run(algorithm, () -> storageRpc.createDefaultAcl(aclPb), codecs.objectAcl()); + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + final ObjectAccessControl aclPb = codecs.objectAcl().encode(acl).setBucket(bucket); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForDefaultObjectAclUpdate(aclPb); + return run(algorithm, () -> storageRpc.patchDefaultAcl(aclPb), codecs.objectAcl()); + } + + @Override + public List listDefaultAcls(final String bucket) { + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForDefaultObjectAclList(bucket); + return run( + algorithm, + () -> storageRpc.listDefaultAcls(bucket), + (answer) -> + answer.stream() + .map( + (com.google.common.base.Function) + codecs.objectAcl()::decode) + .collect(ImmutableList.toImmutableList())); + } + + @Override + public Acl getAcl(final BlobId blob, final Entity entity) { + String bucket = blob.getBucket(); + String name = blob.getName(); + Long generation = blob.getGeneration(); + String pb = codecs.entity().encode(entity); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectAclGet(bucket, name, generation, pb); + return run( + algorithm, () -> storageRpc.getAcl(bucket, name, generation, pb), codecs.objectAcl()); + } + + @Override + public boolean deleteAcl(final BlobId blob, final Entity entity) { + String bucket = blob.getBucket(); + String name = blob.getName(); + Long generation = blob.getGeneration(); + String pb = codecs.entity().encode(entity); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectAclDelete(bucket, name, generation, pb); + return run( + algorithm, () -> storageRpc.deleteAcl(bucket, name, generation, pb), Decoder.identity()); + } + + @Override + public Acl createAcl(final BlobId blob, final Acl acl) { + final ObjectAccessControl aclPb = + codecs + .objectAcl() + .encode(acl) + .setBucket(blob.getBucket()) + .setObject(blob.getName()) + .setGeneration(blob.getGeneration()); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectAclCreate(aclPb); + return run(algorithm, () -> storageRpc.createAcl(aclPb), codecs.objectAcl()); + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + final ObjectAccessControl aclPb = + codecs + .objectAcl() + .encode(acl) + .setBucket(blob.getBucket()) + .setObject(blob.getName()) + .setGeneration(blob.getGeneration()); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForObjectAclUpdate(aclPb); + return run(algorithm, () -> storageRpc.patchAcl(aclPb), codecs.objectAcl()); + } + + @Override + public List listAcls(final BlobId blob) { + String bucket = blob.getBucket(); + String name = blob.getName(); + Long generation = blob.getGeneration(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectAclList(bucket, name, generation); + return run( + algorithm, + () -> storageRpc.listAcls(bucket, name, generation), + (answer) -> + answer.stream() + .map( + (com.google.common.base.Function) + codecs.objectAcl()::decode) + .collect(ImmutableList.toImmutableList())); + } + + @Override + public HmacKey createHmacKey( + final ServiceAccount serviceAccount, final CreateHmacKeyOption... options) { + String pb = serviceAccount.getEmail(); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForHmacKeyCreate(pb, optionsMap); + return run(algorithm, () -> storageRpc.createHmacKey(pb, optionsMap), codecs.hmacKey()); + } + + @Override + public Page listHmacKeys(ListHmacKeysOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + return listHmacKeys(getOptions(), retryAlgorithmManager, optionsMap, retrier); + } + + @Override + public HmacKeyMetadata getHmacKey(final String accessId, final GetHmacKeyOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForHmacKeyGet(accessId, optionsMap); + return run( + algorithm, () -> storageRpc.getHmacKey(accessId, optionsMap), codecs.hmacKeyMetadata()); + } + + private HmacKeyMetadata updateHmacKey( + final HmacKeyMetadata hmacKeyMetadata, final UpdateHmacKeyOption... options) { + com.google.api.services.storage.model.HmacKeyMetadata pb = + codecs.hmacKeyMetadata().encode(hmacKeyMetadata); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForHmacKeyUpdate(pb, optionsMap); + return run(algorithm, () -> storageRpc.updateHmacKey(pb, optionsMap), codecs.hmacKeyMetadata()); + } + + @Override + public HmacKeyMetadata updateHmacKeyState( + final HmacKeyMetadata hmacKeyMetadata, + final HmacKey.HmacKeyState state, + final UpdateHmacKeyOption... options) { + HmacKeyMetadata updatedMetadata = + HmacKeyMetadata.newBuilder(hmacKeyMetadata.getServiceAccount()) + .setProjectId(hmacKeyMetadata.getProjectId()) + .setAccessId(hmacKeyMetadata.getAccessId()) + .setState(state) + .build(); + return updateHmacKey(updatedMetadata, options); + } + + @Override + public void deleteHmacKey(final HmacKeyMetadata metadata, final DeleteHmacKeyOption... options) { + com.google.api.services.storage.model.HmacKeyMetadata pb = + codecs.hmacKeyMetadata().encode(metadata); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForHmacKeyDelete(pb, optionsMap); + run( + algorithm, + (Callable) + () -> { + storageRpc.deleteHmacKey(pb, optionsMap); + return null; + }, + Decoder.identity()); + } + + private static Page listHmacKeys( + final HttpStorageOptions serviceOptions, + final HttpRetryAlgorithmManager retryAlgorithmManager, + final Map options, + Retrier retrier) { + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForHmacKeyList(options); + return retrier.run( + algorithm, + () -> serviceOptions.getStorageRpcV1().listHmacKeys(options), + (result) -> { + String cursor = result.x(); + final Iterable metadata = + result.y() == null + ? ImmutableList.of() + : Iterables.transform(result.y(), codecs.hmacKeyMetadata()::decode); + return new PageImpl<>( + new HmacKeyMetadataPageFetcher( + serviceOptions, retryAlgorithmManager, options, retrier), + cursor, + metadata); + }); + } + + @Override + public Policy getIamPolicy(final String bucket, BucketSourceOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsGetIamPolicy(bucket, optionsMap); + return run( + algorithm, + () -> storageRpc.getIamPolicy(bucket, optionsMap), + apiPolicy -> Conversions.json().policyCodec().decode(apiPolicy)); + } + + @Override + public Policy setIamPolicy( + final String bucket, final Policy policy, BucketSourceOption... options) { + com.google.api.services.storage.model.Policy pb = + Conversions.json().policyCodec().encode(policy); + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsSetIamPolicy(bucket, pb, optionsMap); + return run( + algorithm, + () -> storageRpc.setIamPolicy(bucket, pb, optionsMap), + apiPolicy -> Conversions.json().policyCodec().decode(apiPolicy)); + } + + @Override + public List testIamPermissions( + final String bucket, final List permissions, BucketSourceOption... options) { + ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsTestIamPermissions(bucket, permissions, optionsMap); + return run( + algorithm, + () -> storageRpc.testIamPermissions(bucket, permissions, optionsMap), + (response) -> { + final Set heldPermissions = + response.getPermissions() != null + ? ImmutableSet.copyOf(response.getPermissions()) + : ImmutableSet.of(); + return permissions.stream() + .map(heldPermissions::contains) + .collect(ImmutableList.toImmutableList()); + }); + } + + @Override + public Bucket lockRetentionPolicy(BucketInfo bucketInfo, BucketTargetOption... options) { + final com.google.api.services.storage.model.Bucket bucketPb = + codecs.bucketInfo().encode(bucketInfo); + final Map optionsMap = + Opts.unwrap(options).resolveFrom(bucketInfo).getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsLockRetentionPolicy(bucketPb, optionsMap); + return run( + algorithm, + () -> storageRpc.lockRetentionPolicy(bucketPb, optionsMap), + (x) -> Conversions.json().bucketInfo().decode(x).asBucket(this)); + } + + @Override + public ServiceAccount getServiceAccount(final String projectId) { + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForServiceAccountGet(projectId); + return run(algorithm, () -> storageRpc.getServiceAccount(projectId), codecs.serviceAccount()); + } + + private U run(ResultRetryAlgorithm algorithm, Callable c, Decoder f) { + return retrier.run(algorithm, c, f); + } + + @Override + public Notification createNotification( + final String bucket, final NotificationInfo notificationInfo) { + final com.google.api.services.storage.model.Notification notificationPb = + codecs.notificationInfo().encode(notificationInfo); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForNotificationCreate(bucket, notificationPb); + return run( + algorithm, + () -> storageRpc.createNotification(bucket, notificationPb), + n -> codecs.notificationInfo().decode(n).asNotification(this)); + } + + @Override + public Notification getNotification(final String bucket, final String notificationId) { + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForNotificationGet(bucket, notificationId); + return run( + algorithm, + () -> storageRpc.getNotification(bucket, notificationId), + n -> codecs.notificationInfo().decode(n).asNotification(this)); + } + + @Override + public List listNotifications(final String bucket) { + ResultRetryAlgorithm algorithm = retryAlgorithmManager.getForNotificationList(bucket); + List result = + run( + algorithm, + () -> storageRpc.listNotifications(bucket), + (answer) -> + answer.stream() + .map(n -> codecs.notificationInfo().decode(n).asNotification(this)) + .collect(ImmutableList.toImmutableList())); + return result == null ? ImmutableList.of() : result; + } + + @Override + public boolean deleteNotification(final String bucket, final String notificationId) { + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForNotificationDelete(bucket, notificationId); + return run( + algorithm, () -> storageRpc.deleteNotification(bucket, notificationId), Decoder.identity()); + } + + @Override + public HttpStorageOptions getOptions() { + return (HttpStorageOptions) super.getOptions(); + } + + private Blob internalGetBlob(BlobId blob, Map optionsMap) { + StorageObject storedObject = codecs.blobId().encode(blob); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsGet(storedObject, optionsMap); + return run( + algorithm, + () -> storageRpc.get(storedObject, optionsMap), + (x) -> { + BlobInfo info = Conversions.json().blobInfo().decode(x); + return info.asBlob(this); + }); + } + + private Bucket internalBucketGet(String bucket, Map optionsMap) { + com.google.api.services.storage.model.Bucket bucketPb = + codecs.bucketInfo().encode(BucketInfo.of(bucket)); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForBucketsGet(bucketPb, optionsMap); + return run( + algorithm, + () -> storageRpc.get(bucketPb, optionsMap), + (b) -> Conversions.json().bucketInfo().decode(b).asBucket(this)); + } + + @Override + public BlobWriteSession blobWriteSession(BlobInfo blobInfo, BlobWriteOption... options) { + Opts opts = Opts.unwrap(options).resolveFrom(blobInfo); + + WritableByteChannelSession writableByteChannelSession = + writerFactory.writeSession(this, blobInfo, opts); + return BlobWriteSessions.of(writableByteChannelSession); + } + + @Override + public Blob moveBlob(MoveBlobRequest request) { + Opts srcOpts = + Opts.unwrap(request.getSourceOptions()).resolveFrom(request.getSource()).projectAsSource(); + Opts dstOpts = + Opts.unwrap(request.getTargetOptions()).resolveFrom(request.getTarget()); + ImmutableMap sourceOptions = srcOpts.getRpcOptions(); + ImmutableMap targetOptions = dstOpts.getRpcOptions(); + + return run( + retryAlgorithmManager.getForObjectsMove(sourceOptions, targetOptions), + () -> + storageRpc.moveObject( + request.getSource().getBucket(), + request.getSource().getName(), + request.getTarget().getName(), + sourceOptions, + targetOptions), + o -> codecs.blobInfo().decode(o).asBlob(this)); + } + + @Override + public BlobInfo internalCreateFrom(Path path, BlobInfo info, Opts opts) + throws IOException { + if (Files.isDirectory(path)) { + throw new StorageException(0, path + " is a directory"); + } + long size = Files.size(path); + if (size == 0L) { + return internalDirectUpload(info, opts, ByteBuffer.allocate(0)); + } + final Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = info.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + StorageObject encode = codecs.blobInfo().encode(updated); + + Supplier uploadIdSupplier = + ResumableMedia.startUploadForBlobInfo( + getOptions(), + updated, + optionsMap, + retrier.withAlg(retryAlgorithmManager.getForResumableUploadSessionCreate(optionsMap))); + JsonResumableWrite jsonResumableWrite = + JsonResumableWrite.of( + encode, + optionsMap, + uploadIdSupplier.get(), + 0, + opts.getHasher(), + opts.getHasher().initialValue()); + + JsonResumableSession session = + ResumableSession.json( + HttpClientContext.from(storageRpc), + retrier.withAlg(retryAlgorithmManager.idempotent()), + jsonResumableWrite); + HttpContentRange contentRange = HttpContentRange.of(ByteRangeSpec.explicit(0L, size), size); + ResumableOperationResult put = + session.put(RewindableContent.of(path), contentRange); + // all exception translation is taken care of down in the JsonResumableSession + StorageObject object = put.getObject(); + if (object == null) { + // if by some odd chance the put didn't get the StorageObject, query for it + ResumableOperationResult query = session.query(); + object = query.getObject(); + } + return codecs.blobInfo().decode(object); + } + + @Override + public BlobInfo internalDirectUpload(BlobInfo info, Opts opts, ByteBuffer buf) { + BlobInfo.Builder builder = + opts.blobInfoMapper().apply(info.toBuilder().clearMd5().clearCrc32c()); + @Nullable Crc32cLengthKnown hash = opts.getHasher().hash(buf.duplicate()); + if (hash != null) { + builder.setCrc32c(Utils.crc32cCodec.encode(hash.getValue())); + } + final Map optionsMap = opts.getRpcOptions(); + + BlobInfo updated = builder.build(); + final StorageObject encoded = codecs.blobInfo().encode(updated); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsCreate(encoded, optionsMap); + RewindableContent content = RewindableContent.of(buf); + return run( + algorithm, + () -> { + content.rewindTo(0); + return storageRpc.create(encoded, new RewindableContentInputStream(content), optionsMap); + }, + Conversions.json().blobInfo()); + } + + /** + * Behavioral difference compared to {@link #delete(BlobId, BlobSourceOption...)} instead of + * returning false when an object does not exist, we throw an exception. + */ + @Override + public Void internalObjectDelete(BlobId id, Opts opts) { + final StorageObject storageObject = codecs.blobId().encode(id); + ImmutableMap optionsMap = opts.getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsDelete(storageObject, optionsMap); + return run( + algorithm, + () -> { + boolean deleted = storageRpc.delete(storageObject, optionsMap); + // HttpStorageRpc turns a 404 into false, our code needs to know 404 + if (!deleted) { + throw new StorageException(404, "NOT_FOUND", null, null); + } + return null; + }, + Decoder.identity()); + } + + @Override + public BlobInfo internalObjectGet(BlobId blobId, Opts opts) { + StorageObject storedObject = codecs.blobId().encode(blobId); + ImmutableMap optionsMap = opts.getRpcOptions(); + ResultRetryAlgorithm algorithm = + retryAlgorithmManager.getForObjectsGet(storedObject, optionsMap); + return run( + algorithm, + () -> { + StorageObject storageObject = storageRpc.get(storedObject, optionsMap); + // HttpStorageRpc turns a 404 into null, our code needs to know 404 + if (storageObject == null) { + throw new StorageException(404, "NOT_FOUND", null, null); + } + return storageObject; + }, + codecs.blobInfo()); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageInternal.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageInternal.java new file mode 100644 index 000000000000..0d700c46df24 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageInternal.java @@ -0,0 +1,51 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; + +interface StorageInternal { + + default BlobInfo internalCreateFrom(Path path, BlobInfo info, Opts opts) + throws IOException { + throw new UnsupportedOperationException("not implemented"); + } + + default BlobInfo internalDirectUpload(BlobInfo info, Opts opts, ByteBuffer buf) { + throw new UnsupportedOperationException("not implemented"); + } + + // Void to allow easier mapping/use within streams and other mapping contexts + @SuppressWarnings("UnusedReturnValue") + default Void internalObjectDelete(BlobId id, Opts opts) { + throw new UnsupportedOperationException("not implemented"); + } + + default BlobInfo compose(ComposeRequest composeRequest) { + throw new UnsupportedOperationException("not implemented"); + } + + default BlobInfo internalObjectGet(BlobId blobId, Opts opts) { + throw new UnsupportedOperationException("not implemented"); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageOptions.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageOptions.java new file mode 100644 index 000000000000..723a11dc34ce --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageOptions.java @@ -0,0 +1,241 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceDefaults; +import com.google.cloud.ServiceOptions; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.GrpcStorageOptions.GrpcStorageDefaults; +import com.google.cloud.storage.HttpStorageOptions.HttpStorageDefaults; +import com.google.cloud.storage.HttpStorageOptions.HttpStorageFactory; +import com.google.cloud.storage.HttpStorageOptions.HttpStorageRpcFactory; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.spi.StorageRpcFactory; +import io.opentelemetry.api.OpenTelemetry; +import java.io.IOException; +import java.io.InputStream; +import java.util.Locale; +import java.util.Properties; +import org.checkerframework.checker.nullness.qual.NonNull; + +public abstract class StorageOptions extends ServiceOptions { + + private static final long serialVersionUID = -7295846567928013233L; + private static final String VERSION; + + static { + String tmp = "unresolved"; + final Properties props = new Properties(); + try { + String resourcePath = + String.format( + Locale.US, + "/META-INF/maven/%s/%s/pom.properties", + "com.google.cloud", + "google-cloud-storage"); + InputStream resourceAsStream = StorageOptions.class.getResourceAsStream(resourcePath); + if (resourceAsStream == null) { + // some classloaders don't like a leading slash + resourceAsStream = StorageOptions.class.getResourceAsStream(resourcePath.substring(1)); + } + if (resourceAsStream != null) { + props.load(resourceAsStream); + resourceAsStream.close(); + + tmp = props.getProperty("version", "unknown-version"); + } + } catch (IOException ignore) { + // ignored + } + VERSION = tmp; + } + + Retrier createRetrier() { + return new DefaultRetrier( + OtelStorageDecorator.retryContextDecorator(getOpenTelemetry()), + RetryingDependencies.simple(getClock(), getRetrySettings())); + } + + /** + * @deprecated Use {@link HttpStorageFactory} + */ + @Deprecated + public static class DefaultStorageFactory extends HttpStorageFactory { + private static final long serialVersionUID = -7856840922014956661L; + + /** + * @deprecated Use {@link HttpStorageDefaults#getDefaultServiceFactory()} + */ + @Deprecated + public DefaultStorageFactory() { + super(); + } + } + + /** + * @deprecated Use {@link HttpStorageRpcFactory} + */ + @Deprecated + public static class DefaultStorageRpcFactory extends HttpStorageRpcFactory { + private static final long serialVersionUID = -7856840922014956661L; + + /** + * @deprecated Use {@link HttpStorageDefaults#getDefaultRpcFactory()} + */ + @Deprecated + public DefaultStorageRpcFactory() { + super(); + } + } + + public abstract static class Builder + extends ServiceOptions.Builder { + + Builder() {} + + Builder(StorageOptions options) { + super(options); + } + + public abstract Builder setStorageRetryStrategy(StorageRetryStrategy storageRetryStrategy); + + /** + * @see BlobWriteSessionConfig + * @see BlobWriteSessionConfigs + * @see Storage#blobWriteSession(BlobInfo, BlobWriteOption...) + * @see HttpStorageDefaults#getDefaultStorageWriterConfig() + * @see GrpcStorageDefaults#getDefaultStorageWriterConfig() + * @since 2.37.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract StorageOptions.Builder setBlobWriteSessionConfig( + @NonNull BlobWriteSessionConfig blobWriteSessionConfig); + + /** + * Enable OpenTelemetry Tracing and provide an instance for the client to use. + * + * @param openTelemetry User defined instance of OpenTelemetry to be used by the library + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract StorageOptions.Builder setOpenTelemetry(OpenTelemetry openTelemetry); + + @Override + public abstract StorageOptions build(); + } + + StorageOptions(Builder builder, StorageDefaults serviceDefaults) { + super(StorageFactory.class, StorageRpcFactory.class, builder, serviceDefaults); + } + + abstract static class StorageDefaults implements ServiceDefaults {} + + /** + * @deprecated Use {@link HttpStorageDefaults#getDefaultTransportOptions()} + */ + @Deprecated + public static HttpTransportOptions getDefaultHttpTransportOptions() { + return HttpStorageOptions.defaults().getDefaultTransportOptions(); + } + + // Project ID is only required for creating buckets, so we don't require it for creating the + // service. + @Override + protected boolean projectIdRequired() { + return false; + } + + @Override + public String getLibraryVersion() { + return VERSION; + } + + /* This can break at any time, the value produce is intended to be informative not authoritative */ + @InternalApi + public static String version() { + return VERSION; + } + + /** + * @since 2.47.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + public abstract OpenTelemetry getOpenTelemetry(); + + @SuppressWarnings("unchecked") + @Override + public abstract StorageOptions.Builder toBuilder(); + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); + + /** + * Returns a default {@code StorageOptions} instance. The default instance will use JSON over HTTP + * for its transport. + */ + @TransportCompatibility(Transport.HTTP) + public static StorageOptions getDefaultInstance() { + return HttpStorageOptions.newBuilder().build(); + } + + /** + * Returns a unauthenticated {@code StorageOptions} instance. The returned instance will use JSON + * over HTTP for its transport. + */ + @TransportCompatibility(Transport.HTTP) + public static StorageOptions getUnauthenticatedInstance() { + return HttpStorageOptions.newBuilder().setCredentials(NoCredentials.getInstance()).build(); + } + + /** The returned instance will use JSON over HTTP for its transport. */ + @TransportCompatibility(Transport.HTTP) + public static StorageOptions.Builder newBuilder() { + return http(); + } + + /** + * Builder factory method which will create a JSON over HTTP specific instance of storage options. + * + * @since 2.14.0 + */ + @TransportCompatibility(Transport.HTTP) + public static HttpStorageOptions.Builder http() { + return HttpStorageOptions.newBuilder(); + } + + /** + * Builder factory method which will create a gRPC specific instance of storage options. + * + *

Google Cloud Storage is in Private Preview for a gRPC centric transport. + * + * @since 2.14.0 + */ + @TransportCompatibility(Transport.GRPC) + public static GrpcStorageOptions.Builder grpc() { + return GrpcStorageOptions.newBuilder(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java new file mode 100644 index 000000000000..4e01f1db7321 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java @@ -0,0 +1,78 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.ApiFuture; +import com.google.cloud.ReadChannel; +import java.io.IOException; + +interface StorageReadChannel extends ReadChannel { + + StorageReadChannel setByteRangeSpec(ByteRangeSpec byteRangeSpec); + + /** + * Return a Future which resolves to the sparse object metadata included in the response headers + * when opening the read. + */ + ApiFuture getObject(); + + default ByteRangeSpec getByteRangeSpec() { + return ByteRangeSpec.nullRange(); + } + + /** + * @deprecated Use {@link #setByteRangeSpec(ByteRangeSpec)} + */ + @Deprecated + @SuppressWarnings("resource") + @Override + default void seek(long position) throws IOException { + try { + setByteRangeSpec(getByteRangeSpec().withNewBeginOffset(position)); + } catch (StorageException e) { + Throwable cause = e.getCause(); + if (cause instanceof IOException) { + throw (IOException) cause; + } else { + throw e; + } + } + } + + /** + * @deprecated Use {@link #setByteRangeSpec(ByteRangeSpec)} + */ + @SuppressWarnings("resource") + @Deprecated + @Override + default ReadChannel limit(long limit) { + checkArgument(limit >= 0, "limit must be >= 0"); + setByteRangeSpec(getByteRangeSpec().withNewEndOffset(limit)); + return this; + } + + /** + * @deprecated Use {@link #getByteRangeSpec()} + */ + @Deprecated + @Override + default long limit() { + return getByteRangeSpec().endOffset(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java new file mode 100644 index 000000000000..d0a340a1dc22 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java @@ -0,0 +1,122 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.BaseService; +import java.io.Serializable; + +/** + * A factory class which is used to provide access to {@link ResultRetryAlgorithm} for idempotent + * and non-idempotent calls made via {@link Storage}. Before {@link Storage} performs an operation + * it will determine if the operation is idempotent and select the appropriate {@link + * ResultRetryAlgorithm} to use for that invocation. + * + * @see #getDefaultStorageRetryStrategy() + * @see #getUniformStorageRetryStrategy() + */ +public interface StorageRetryStrategy extends Serializable { + + /** + * Factory method to provide a {@link ResultRetryAlgorithm} which will be used to evaluate whether + * a retry can happen for an operation which has been deemed idempotent. + * + * @return + */ + ResultRetryAlgorithm getIdempotentHandler(); + + ResultRetryAlgorithm getNonidempotentHandler(); + + /** + * Factory method to get an instance of the default implementation of {@link + * StorageRetryStrategy}. The returned instance is provides handler which are appropriate for + * calls which are known to be idempotent vs non-idempotent. + * + *

All non-idempotent calls will not be retried + * + *

The set of retryable cases handled by this strategy is more comprehensive than that of the + * legacy strategy and should always be preferred. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Retried HTTP status codes for idempotent calls
CodeName
408Request Timeout
429Too Many Requests
500Internal Server Error
502Bad Gateway
503Service Unavailable
504Gateway Timeout
+ * + * @see StorageOptions.Builder#setStorageRetryStrategy(StorageRetryStrategy) + * @see #getUniformStorageRetryStrategy() + */ + static StorageRetryStrategy getDefaultStorageRetryStrategy() { + return DefaultStorageRetryStrategy.INSTANCE; + } + + /** + * Factory method to get an instance of {@link StorageRetryStrategy} which will uniformly retry + * all calls as if they were idempotent. + * + *

NOTE:This strategy is unsafe and will result in retying some non-idempotent + * calls. Care should be taken to ensure calls which would not normally be considered idempotent + * are made idempotent by some other means in your program. + * + * @see StorageOptions.Builder#setStorageRetryStrategy(StorageRetryStrategy) + * @see #getDefaultStorageRetryStrategy() + */ + static StorageRetryStrategy getUniformStorageRetryStrategy() { + return new UniformStorageRetryStrategy(getDefaultStorageRetryStrategy().getIdempotentHandler()); + } + + /** + * Factory method to get an instance of {@link StorageRetryStrategy} with the behavior which was + * used prior to version 2.1.8. This strategy is unsafe and will result in retying some + * non-idempotent calls. + * + * @deprecated please migrate to using {@link #getDefaultStorageRetryStrategy()} which is capable + * of providing handlers which are appropriate for idempotent and non-idempotent calls. + * @see StorageOptions.Builder#setStorageRetryStrategy(StorageRetryStrategy) + * @see #getDefaultStorageRetryStrategy() + */ + @Deprecated + static StorageRetryStrategy getLegacyStorageRetryStrategy() { + return new UniformStorageRetryStrategy(BaseService.EXCEPTION_HANDLER); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRoles.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRoles.java new file mode 100644 index 000000000000..790039c7412a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRoles.java @@ -0,0 +1,146 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.Role; + +/** + * IAM roles specific to Storage. An overview of the permissions available to Storage and the + * capabilities they grant can be found in the Google Cloud Storage IAM + * documentation. + */ +public class StorageRoles { + + /** + * Grants the following permissions: + * + *

    + *
  • storage.buckets.* + *
  • storage.objects.* + *
+ */ + public static Role admin() { + return Role.of("roles/storage.admin"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.objects.list + *
  • storage.objects.get + *
+ */ + public static Role objectViewer() { + return Role.of("roles/storage.objectViewer"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.objects.create + *
+ */ + public static Role objectCreator() { + return Role.of("roles/storage.objectCreator"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.objects.* + *
+ */ + public static Role objectAdmin() { + return Role.of("roles/storage.objectAdmin"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.buckets.get + *
  • storage.buckets.update + *
  • storage.buckets.setIamPolicy + *
  • storage.buckets.getIamPolicy + *
  • storage.objects.list + *
  • storage.objects.create + *
  • storage.objects.delete + *
+ */ + public static Role legacyBucketOwner() { + return Role.of("roles/storage.legacyBucketOwner"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.buckets.get + *
  • storage.objects.list + *
  • storage.objects.create + *
  • storage.objects.delete + *
+ */ + public static Role legacyBucketWriter() { + return Role.of("roles/storage.legacyBucketWriter"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.buckets.get + *
  • storage.objects.list + *
+ */ + public static Role legacyBucketReader() { + return Role.of("roles/storage.legacyBucketReader"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.objects.get + *
  • storage.objects.update + *
  • storage.objects.getIamPolicy + *
  • storage.objects.setIamPolicy + *
+ */ + public static Role legacyObjectOwner() { + return Role.of("roles/storage.legacyObjectOwner"); + } + + /** + * Grants the following permissions: + * + *
    + *
  • storage.objects.get + *
+ */ + public static Role legacyObjectReader() { + return Role.of("roles/storage.legacyObjectReader"); + } + + private StorageRoles() { + // Intentionally left blank. + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java new file mode 100644 index 000000000000..bffa932b13d7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java @@ -0,0 +1,193 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.protobuf.ByteString; +import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.TextFormat; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BucketAccessControl; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ObjectRangeData; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.WriteObjectRequest; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; +import java.util.function.Predicate; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class StorageV2ProtoUtils { + + private static final String VALIDATION_TEMPLATE = + "offset >= 0 && limit >= 0 (%s >= 0 && %s >= 0)"; + + private StorageV2ProtoUtils() {} + + // TODO: can we eliminate this method all together? + @NonNull + static ReadObjectRequest seekReadObjectRequest( + @NonNull ReadObjectRequest request, @NonNull ByteRangeSpec byteRangeSpec) { + + long offset = byteRangeSpec.beginOffset(); + long limit = byteRangeSpec.length(); + ReadObjectRequest req = request; + + boolean setOffset = (offset > 0 && offset != req.getReadOffset()); + boolean setLimit = (limit < ByteRangeSpec.EFFECTIVE_INFINITY && limit != req.getReadLimit()); + if (setOffset || setLimit) { + req = byteRangeSpec.seekReadObjectRequest(req.toBuilder()).build(); + } + return req; + } + + @FunctionalInterface + interface MsgFmt extends Function {} + + @NonNull + public static String fmtProto(@NonNull Object obj) { + return fmtProtoWithFmt(obj, TextFormat.printer()::shortDebugString); + } + + @NonNull + public static String fmtProtoWithFmt(@NonNull Object obj, MsgFmt fmt) { + if (obj instanceof WriteObjectRequest) { + return fmtProtoWithFmt((WriteObjectRequest) obj, fmt); + } else if (obj instanceof BidiWriteObjectRequest) { + return fmtProtoWithFmt((BidiWriteObjectRequest) obj, fmt); + } else if (obj instanceof ReadObjectResponse) { + return fmtProtoWithFmt((ReadObjectResponse) obj, fmt); + } else if (obj instanceof BidiReadObjectResponse) { + return fmtProtoWithFmt((BidiReadObjectResponse) obj, fmt); + } else if (obj instanceof ChecksummedData) { + return fmtProtoWithFmt((ChecksummedData) obj, fmt); + } else if (obj instanceof MessageOrBuilder) { + return fmt.apply((MessageOrBuilder) obj); + } else { + return obj.toString(); + } + } + + @NonNull + private static String fmtProtoWithFmt(ChecksummedData data, MsgFmt fmt) { + ByteString content = data.getContent(); + if (content.size() > 20) { + ChecksummedData.Builder b = data.toBuilder(); + ByteString trim = snipBytes(content); + b.setContent(trim); + + return fmt.apply(b.build()); + } + return fmt.apply(data); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull WriteObjectRequest msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + WriteObjectRequest.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } + } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull BidiWriteObjectRequest msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + BidiWriteObjectRequest.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } + } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull ReadObjectResponse msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + ReadObjectResponse.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } + } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull BidiReadObjectResponse msg, MsgFmt fmt) { + List rangeData = msg.getObjectDataRangesList(); + if (!rangeData.isEmpty()) { + List snips = new ArrayList<>(); + for (ObjectRangeData rd : rangeData) { + if (rd.hasChecksummedData()) { + ByteString content = rd.getChecksummedData().getContent(); + if (content.size() > 20) { + ObjectRangeData.Builder b = rd.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + snips.add(b.build()); + } else { + snips.add(rd); + } + } + } + BidiReadObjectResponse snipped = + msg.toBuilder().clearObjectDataRanges().addAllObjectDataRanges(snips).build(); + return fmt.apply(snipped); + } + return fmt.apply(msg); + } + + private static ByteString snipBytes(ByteString content) { + ByteString snip = + ByteString.copyFromUtf8(java.lang.String.format(Locale.US, "", content.size())); + return content.substring(0, 20).concat(snip); + } + + /** + * When evaluating an {@link ObjectAccessControl} entity, look at both {@code entity} (generally + * project number format) and {@code entity_alt} (generally project id format). + */ + static Predicate objectAclEntityOrAltEq(String s) { + return oAcl -> oAcl.getEntity().equals(s) || oAcl.getEntityAlt().equals(s); + } + + /** + * When evaluating an {@link BucketAccessControl} entity, look at both {@code entity} (generally + * project number format) and {@code entity_alt} (generally project id format). + */ + static Predicate bucketAclEntityOrAltEq(String s) { + return oAcl -> oAcl.getEntity().equals(s) || oAcl.getEntityAlt().equals(s); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageWriteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageWriteChannel.java new file mode 100644 index 000000000000..d1badc0b1772 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageWriteChannel.java @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.cloud.WriteChannel; + +interface StorageWriteChannel extends WriteChannel { + ApiFuture getObject(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncAndUploadUnbufferedWritableByteChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncAndUploadUnbufferedWritableByteChannel.java new file mode 100644 index 000000000000..7c9451f82e88 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncAndUploadUnbufferedWritableByteChannel.java @@ -0,0 +1,483 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.GrpcUtils.contextWithBucketName; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import io.grpc.Status.Code; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.util.Arrays; +import java.util.concurrent.CancellationException; +import java.util.concurrent.atomic.AtomicBoolean; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class SyncAndUploadUnbufferedWritableByteChannel implements UnbufferedWritableByteChannel { + + private final ClientStreamingCallable write; + private final UnaryCallable query; + private final SettableApiFuture resultFuture; + private final ChunkSegmenter chunkSegmenter; + private final WriteCtx writeCtx; + private final Retrier retrier; + private final ResultRetryAlgorithm alg; + private final RecoveryFile rf; + + private final String uploadId; + private final BufferHandle copyBuffer; + + /* --- running state --- */ + private final RequestStream stream; + + private boolean open; + private @Nullable GatheringByteChannel sync; + private boolean first; + private boolean finished; + + SyncAndUploadUnbufferedWritableByteChannel( + ClientStreamingCallable write, + UnaryCallable query, + SettableApiFuture resultFuture, + ChunkSegmenter chunkSegmenter, + Retrier retrier, + ResultRetryAlgorithm alg, + WriteCtx writeCtx, + RecoveryFile rf, + BufferHandle copyBuffer) { + this.write = + write.withDefaultCallContext( + contextWithBucketName( + writeCtx.getRequestFactory().bucketName(), GrpcCallContext.createDefault())); + this.query = query; + this.resultFuture = resultFuture; + this.chunkSegmenter = chunkSegmenter; + this.writeCtx = writeCtx; + this.retrier = retrier; + this.alg = new Alg(alg, resultFuture); + this.rf = rf; + this.uploadId = writeCtx.newRequestBuilder().getUploadId(); + this.copyBuffer = copyBuffer; + this.stream = new RequestStream(this.write, resultFuture); + this.open = true; + this.first = true; + this.finished = false; + } + + @SuppressWarnings("resource") + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + ByteBuffer[] duplicates = + Arrays.stream(srcs, offset, offset + length) + .map(ByteBuffer::duplicate) + .toArray(ByteBuffer[]::new); + long prevWritten = writeCtx.getTotalSentBytes().get(); + long syncWritten = openSync().write(duplicates); + long goalSize = Math.addExact(prevWritten, syncWritten); + ChunkSegment[] segments = chunkSegmenter.segmentBuffers(srcs, offset, length); + doUpload(false, segments, goalSize); + return syncWritten; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + try { + doUpload(true, new ChunkSegment[0], writeCtx.getTotalSentBytes().get()); + rf.close(); + } finally { + open = false; + } + } + + private GatheringByteChannel openSync() throws IOException { + if (sync == null) { + sync = rf.syncingChannel(); + } + return sync; + } + + private WriteObjectRequest processSegment(ChunkSegment segment, boolean updateCumulativeCrc32c) { + WriteObjectRequest.Builder builder = writeCtx.newRequestBuilder(); + if (!first) { + builder.clearUploadId().clearWriteObjectSpec().clearObjectChecksums(); + } else { + first = false; + } + + Crc32cLengthKnown crc32c = segment.getCrc32c(); + ByteString b = segment.getB(); + int contentSize = b.size(); + + // update ctx state that tracks overall progress + if (updateCumulativeCrc32c) { + writeCtx + .getCumulativeCrc32c() + .accumulateAndGet(crc32c, chunkSegmenter.getHasher()::nullSafeConcat); + } + // resolve current offset and set next + long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); + + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + builder.setWriteOffset(offset).setChecksummedData(checksummedData.build()); + + if (!segment.isOnlyFullBlocks()) { + finishMessage(builder); + finished = true; + } + + WriteObjectRequest build = builder.build(); + return build; + } + + @NonNull + private WriteObjectRequest finishMessage() { + long offset = writeCtx.getTotalSentBytes().get(); + + WriteObjectRequest.Builder b = writeCtx.newRequestBuilder().setWriteOffset(offset); + + WriteObjectRequest message = finishMessage(b).build(); + return message; + } + + private WriteObjectRequest.Builder finishMessage(WriteObjectRequest.Builder b) { + Crc32cLengthKnown crc32cValue = writeCtx.getCumulativeCrc32c().get(); + b.setFinishWrite(true); + if (crc32cValue != null) { + b.setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(crc32cValue.getValue()).build()); + } + return b; + } + + @SuppressWarnings("ConstantValue") + private void doUpload(boolean closing, ChunkSegment[] segments, long goalSize) { + AtomicBoolean recover = new AtomicBoolean(false); + retrier.run( + alg, + () -> { + if (closing && sync != null) { + sync.close(); + } + boolean shouldRecover = recover.getAndSet(true); + // each ChunkSegment will always have its checksum computed, but if a retry happens, and + // we need to rewind and build a new ChunkSegment, we don't want to add it to the + // cumulativeCrc32c value because that will make it appear as the bytes are duplicated. + // If we send "ABCD", get an error and find only "AB" to have been persisted, we don't + // want to add "CD" to the cumulative crc32c as that would be equivalent to "ABCDCD". + boolean updateCumulativeCrc32c = !shouldRecover; + if (!shouldRecover) { + for (ChunkSegment segment : segments) { + WriteObjectRequest writeObjectRequest = + processSegment(segment, updateCumulativeCrc32c); + stream.onNext(writeObjectRequest); + } + + if (closing && !finished) { + WriteObjectRequest message = finishMessage(); + stream.onNext(message); + finished = true; + } + + if (closing) { + stream.onCompleted(); + } + } else { + if (sync != null) { + sync.close(); + sync = null; + } + stream.reset(); + + QueryWriteStatusRequest req = + QueryWriteStatusRequest.newBuilder().setUploadId(uploadId).build(); + QueryWriteStatusResponse resp = query.call(req); + // if the response has a resource the session completed, no need to re-upload + if (!resp.hasResource()) { + long persistedSize = resp.getPersistedSize(); + + if (persistedSize != goalSize) { + + // rewind our context + finished = false; + first = true; + writeCtx.getTotalSentBytes().set(persistedSize); + writeCtx.getConfirmedBytes().set(persistedSize); + // intentionally do not modify the cumulativeCrc32c value + // this will stay in the state in sync with what has been written to disk + // when we recover, checksum the individual message but not the cumulative + + try (SeekableByteChannel reader = rf.reader()) { + reader.position(persistedSize); + ByteBuffer buf = copyBuffer.get(); + // clear before read, in case an error was thrown before + buf.clear(); + while (Buffers.fillFrom(buf, reader) != -1) { + buf.flip(); + while (buf.hasRemaining()) { + ChunkSegment[] recoverySegments = chunkSegmenter.segmentBuffer(buf); + for (ChunkSegment segment : recoverySegments) { + WriteObjectRequest writeObjectRequest = + processSegment(segment, updateCumulativeCrc32c); + stream.onNext(writeObjectRequest); + } + } + buf.clear(); + } + } + if (closing && !finished) { + WriteObjectRequest message = finishMessage(); + stream.onNext(message); + finished = true; + } + if (closing || finished) { + stream.onCompleted(); + } + recover.compareAndSet(true, false); + } + } else { + Object resource = resp.getResource(); + resultFuture.set(WriteObjectResponse.newBuilder().setResource(resource).build()); + } + } + long newWritten = writeCtx.getTotalSentBytes().get(); + Preconditions.checkState( + newWritten == goalSize, "newWritten == goalSize (%s == %s)", newWritten, goalSize); + return null; + }, + Decoder.identity()); + } + + @VisibleForTesting + static final class RequestStream implements ApiStreamObserver { + private static final ApiException CLIENT_RESET_ERROR = + ApiExceptionFactory.createException(null, GrpcStatusCode.of(Code.ABORTED), false); + + private final ClientStreamingCallable write; + private final SettableApiFuture resultFuture; + + private volatile StreamPair streamPair; + + private RequestStream( + ClientStreamingCallable write, + SettableApiFuture resultFuture) { + this.write = write; + this.resultFuture = resultFuture; + } + + @Override + public void onNext(WriteObjectRequest value) { + StreamPair pair = ensureOpen(); + Throwable err = pair.getResponseStream().error; + if (err != null) { + reset(); + throw StorageException.coalesce(err); + } + + halfClosedToUnavailable(() -> pair.getRequestStream().onNext(value)); + } + + @Override + public void onError(Throwable t) { + try { + halfClosedToUnavailable(() -> ensureOpen().getRequestStream().onError(t)); + } finally { + streamPair = null; + } + } + + @Override + public void onCompleted() { + StreamPair pair = ensureOpen(); + Throwable err = pair.getResponseStream().error; + if (err != null) { + reset(); + throw StorageException.coalesce(err); + } + + halfClosedToUnavailable(pair.getRequestStream()::onCompleted); + pair.getResponseStream().await(); + } + + private StreamPair ensureOpen() { + if (streamPair == null) { + ResponseStream responseStream = new ResponseStream(resultFuture); + ApiStreamObserver requestStream = + write.clientStreamingCall(responseStream); + streamPair = new StreamPair(requestStream, responseStream); + } + return streamPair; + } + + private void reset() { + if (streamPair != null && streamPair.getRequestStream() != null) { + streamPair.getRequestStream().onError(CLIENT_RESET_ERROR); + } + streamPair = null; + } + + /** + * If the stream is in the process of closing (usually due to error) and we call a method on it + * we will receive an IllegalStateException. A stream being half closed is not a terminal state + * for our upload operation. Attempt to detect and translate it into an UNAVAILABLE error we can + * retry. + */ + static void halfClosedToUnavailable(Runnable r) { + try { + r.run(); + } catch (IllegalStateException ise) { + String message = ise.getMessage(); + if (message != null && message.contains("half-closed")) { + throw ApiExceptionFactory.createException(ise, GrpcStatusCode.of(Code.UNAVAILABLE), true); + } else { + throw ise; + } + } + } + } + + @VisibleForTesting + static final class ResponseStream implements ApiStreamObserver { + private final SettableApiFuture invocationHandle; + private final SettableApiFuture resultFuture; + + private volatile WriteObjectResponse last; + private volatile Throwable error; + + @VisibleForTesting + ResponseStream(SettableApiFuture resultFuture) { + this.resultFuture = resultFuture; + this.invocationHandle = SettableApiFuture.create(); + } + + @Override + public void onNext(WriteObjectResponse value) { + last = value; + error = null; + } + + @Override + public void onError(Throwable t) { + error = t; + invocationHandle.setException(t); + } + + @Override + public void onCompleted() { + if (last != null && last.hasResource()) { + resultFuture.set(last); + } + invocationHandle.set(null); + } + + void await() { + // ApiExceptions.callAndTranslateApiException(invocationHandle); + ApiFutureUtils.await(invocationHandle); + } + } + + @VisibleForTesting + static final class Alg implements ResultRetryAlgorithm { + + private final ResultRetryAlgorithm delegate; + private final SettableApiFuture resultFuture; + + @VisibleForTesting + @SuppressWarnings("unchecked") + Alg(ResultRetryAlgorithm delegate, SettableApiFuture resultFuture) { + this.delegate = (ResultRetryAlgorithm) delegate; + this.resultFuture = resultFuture; + } + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, + WriteObjectResponse prevResponse, + TimedAttemptSettings prevSettings) { + return delegate.createNextAttempt(prevThrowable, prevResponse, prevSettings); + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, WriteObjectResponse prevResponse) + throws CancellationException { + boolean shouldRetry = delegate.shouldRetry(prevThrowable, prevResponse); + if (!shouldRetry && prevThrowable != null) { + resultFuture.setException(prevThrowable); + } + return shouldRetry; + } + } + + private static final class StreamPair { + private final ApiStreamObserver requestStream; + private final ResponseStream responseStream; + + private StreamPair( + ApiStreamObserver requestStream, ResponseStream responseStream) { + this.requestStream = requestStream; + this.responseStream = responseStream; + } + + public ApiStreamObserver getRequestStream() { + return requestStream; + } + + public ResponseStream getResponseStream() { + return responseStream; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncingFileChannel.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncingFileChannel.java new file mode 100644 index 000000000000..756374593a29 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/SyncingFileChannel.java @@ -0,0 +1,51 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +/** A FileChannel decorator that will fsync after every {@link #write(ByteBuffer)} */ +final class SyncingFileChannel implements UnbufferedWritableByteChannel { + + private final FileChannel fc; + + SyncingFileChannel(FileChannel fc) { + this.fc = fc; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + long written = fc.write(srcs, offset, length); + // metadata in this case are things like mtime, atime etc. Those are not important to our needs + // simply force the file contents to by synced. + fc.force(/* includeMetaData= */ false); + return written; + } + + @Override + public boolean isOpen() { + return fc.isOpen(); + } + + @Override + public void close() throws IOException { + fc.close(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Throughput.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Throughput.java new file mode 100644 index 000000000000..f0f54140d151 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Throughput.java @@ -0,0 +1,94 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.time.Duration; +import java.util.Objects; + +/** + * Convenience class to encapsulate the concept of a throughput value. + * + *

Given a number of bytes and a duration compute the number of bytes per second. + */ +final class Throughput { + + private static final double NANOS_PER_SECOND = 1_000_000_000d; + private final long numBytes; + private final Duration duration; + + // TODO: is there a efficient way we can limit precision without having to use BigDecimal? + // Realistically, we don't need precision smaller than 1 byte per microsecond, leading to + // 6 digits past the decimal of needed precision. + private final double bytesPerSecond; + + private Throughput(long numBytes, Duration duration) { + this.numBytes = numBytes; + this.duration = duration; + this.bytesPerSecond = numBytes / (duration.toNanos() / NANOS_PER_SECOND); + } + + public long getNumBytes() { + return numBytes; + } + + public Duration getDuration() { + return duration; + } + + public double toBps() { + return bytesPerSecond; + } + + public Throughput plus(Throughput other) { + return new Throughput(this.numBytes + other.numBytes, this.duration.plus(other.duration)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Throughput)) { + return false; + } + Throughput that = (Throughput) o; + return Double.compare(that.bytesPerSecond, bytesPerSecond) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(bytesPerSecond); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("bytesPerSecond", bytesPerSecond).toString(); + } + + public static Throughput zero() { + return new Throughput(0, Duration.ZERO); + } + + public static Throughput of(long numBytes, Duration duration) { + return new Throughput(numBytes, duration); + } + + public static Throughput bytesPerSecond(long numBytes) { + return new Throughput(numBytes, Duration.ofSeconds(1)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputMovingWindow.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputMovingWindow.java new file mode 100644 index 000000000000..6afae99172e7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputMovingWindow.java @@ -0,0 +1,98 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.time.Duration; +import java.time.Instant; +import java.util.Comparator; +import java.util.PriorityQueue; + +/** + * A simple moving window implementation which will keep a {@code window}s worth of Throughput + * values and allow querying for the aggregate avg over that time window. + */ +final class ThroughputMovingWindow { + + private final Duration window; + + private final PriorityQueue values; + + private ThroughputMovingWindow(Duration window) { + this.window = window; + this.values = new PriorityQueue<>(Entry.COMP); + } + + void add(Instant now, Throughput value) { + removeExpiredEntries(now); + values.add(new Entry(now, value)); + } + + Throughput avg(Instant now) { + removeExpiredEntries(now); + return values.stream() + .map(Entry::getValue) + .reduce( + Throughput.zero(), + (tp1, tp2) -> Throughput.of(tp1.getNumBytes() + tp2.getNumBytes(), window)); + } + + private void removeExpiredEntries(Instant now) { + Instant newMin = now.minus(window); + values.removeIf(e -> lteq(e.getAt(), newMin)); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("window", window) + .add("values.size()", values.size()) + .toString(); + } + + static ThroughputMovingWindow of(Duration window) { + return new ThroughputMovingWindow(window); + } + + private static boolean lteq(Instant a, Instant b) { + return a.equals(b) || a.isBefore(b); + } + + private static final class Entry { + private static final Comparator COMP = Comparator.comparing(e -> e.at); + private final Instant at; + private final Throughput value; + + private Entry(Instant at, Throughput value) { + this.at = at; + this.value = value; + } + + public Instant getAt() { + return at; + } + + public Throughput getValue() { + return value; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("at", at).add("value", value).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputSink.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputSink.java new file mode 100644 index 000000000000..e38153c1082c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ThroughputSink.java @@ -0,0 +1,404 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.WritableByteChannel; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Logger; + +/** + * Interface to mark a location in which throughput of byte movements can be recorded, and which can + * provide a decorated underlying channel. + */ +interface ThroughputSink { + + void recordThroughput(Record r); + + WritableByteChannel decorate(WritableByteChannel wbc); + + GatheringByteChannel decorate(GatheringByteChannel gbc); + + static void computeThroughput(Clock clock, ThroughputSink sink, long numBytes, IO io) + throws IOException { + boolean exception = false; + Instant begin = clock.instant(); + try { + io.apply(); + } catch (IOException e) { + exception = true; + throw e; + } finally { + Instant end = clock.instant(); + Record record = Record.of(numBytes, begin, end, exception); + sink.recordThroughput(record); + } + } + + @FunctionalInterface + interface IO { + void apply() throws IOException; + } + + static ThroughputSink logged(String prefix, Clock clock) { + return new LoggedThroughputSink(prefix, clock); + } + + static ThroughputSink windowed(ThroughputMovingWindow w, Clock clock) { + return new ThroughputMovingWindowThroughputSink(w, clock); + } + + static ThroughputSink tee(ThroughputSink a, ThroughputSink b) { + return new TeeThroughputSink(a, b); + } + + static ThroughputSink nullSink() { + return NullThroughputSink.INSTANCE; + } + + final class Record { + private final long numBytes; + private final Instant begin; + private final Instant end; + private final boolean exception; + + private Record(long numBytes, Instant begin, Instant end, boolean exception) { + this.numBytes = numBytes; + this.begin = begin; + this.end = end; + this.exception = exception; + } + + public long getNumBytes() { + return numBytes; + } + + public Instant getBegin() { + return begin; + } + + public Instant getEnd() { + return end; + } + + public Duration getDuration() { + return Duration.between(begin, end); + } + + public boolean isException() { + return exception; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Record)) { + return false; + } + Record record = (Record) o; + return numBytes == record.numBytes + && exception == record.exception + && Objects.equals(begin, record.begin) + && Objects.equals(end, record.end); + } + + @Override + public int hashCode() { + return Objects.hash(numBytes, begin, end, exception); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("numBytes", numBytes) + .add("begin", begin) + .add("end", end) + .add("exception", exception) + .toString(); + } + + public static Record of(long numBytes, Instant begin, Instant end, boolean exception) { + return new Record(numBytes, begin, end, exception); + } + } + + final class LoggedThroughputSink implements ThroughputSink { + private static final Logger LOGGER = Logger.getLogger(ThroughputSink.class.getName()); + + private final String prefix; + private final Clock clock; + + private LoggedThroughputSink(String prefix, Clock clock) { + this.prefix = prefix; + this.clock = clock; + } + + private static final double MiB = 1d / (1024 * 1024); + + @Override + public void recordThroughput(Record r) { + LOGGER.info( + () -> + String.format( + Locale.US, + "{%s} (%01.03f MiB/s) %s", + prefix, + ((r.numBytes * MiB) + / (Duration.between(r.getBegin(), r.getEnd()).toMillis() / 1000d)), + r)); + } + + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return new ThroughputRecordingWritableByteChannel(wbc, this, clock); + } + + @Override + public GatheringByteChannel decorate(GatheringByteChannel gbc) { + return new ThroughputRecordingGatheringByteChannel(gbc, this, clock); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("prefix", prefix).add("clock", clock).toString(); + } + } + + final class ThroughputRecordingWritableByteChannel implements WritableByteChannel { + private final WritableByteChannel delegate; + private final ThroughputSink sink; + private final Clock clock; + + private ThroughputRecordingWritableByteChannel( + WritableByteChannel delegate, ThroughputSink sink, Clock clock) { + this.delegate = delegate; + this.sink = sink; + this.clock = clock; + } + + @Override + public int write(ByteBuffer src) throws IOException { + return ThroughputRecordingWritableByteChannel.write(src, clock, delegate, sink); + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("delegate", delegate) + .add("sink", sink) + .add("clock", clock) + .toString(); + } + + static int write(ByteBuffer src, Clock clock, WritableByteChannel delegate, ThroughputSink sink) + throws IOException { + boolean exception = false; + int remaining = src.remaining(); + Instant begin = clock.instant(); + try { + return delegate.write(src); + } catch (IOException e) { + exception = true; + throw e; + } finally { + Instant end = clock.instant(); + Record record = Record.of(remaining - src.remaining(), begin, end, exception); + sink.recordThroughput(record); + } + } + } + + final class ThroughputRecordingGatheringByteChannel implements GatheringByteChannel { + private final GatheringByteChannel delegate; + private final ThroughputSink sink; + private final Clock clock; + + private ThroughputRecordingGatheringByteChannel( + GatheringByteChannel delegate, ThroughputSink sink, Clock clock) { + this.delegate = delegate; + this.sink = sink; + this.clock = clock; + } + + @Override + public int write(ByteBuffer src) throws IOException { + return ThroughputRecordingWritableByteChannel.write(src, clock, delegate, sink); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + boolean exception = false; + long available = Buffers.totalRemaining(srcs, offset, length); + Instant begin = clock.instant(); + try { + return delegate.write(srcs, offset, length); + } catch (IOException e) { + exception = true; + throw e; + } finally { + Instant end = clock.instant(); + long remaining = Buffers.totalRemaining(srcs, offset, length); + Record record = Record.of(available - remaining, begin, end, exception); + sink.recordThroughput(record); + } + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + boolean exception = false; + long available = Buffers.totalRemaining(srcs, 0, srcs.length); + Instant begin = clock.instant(); + try { + return delegate.write(srcs); + } catch (IOException e) { + exception = true; + throw e; + } finally { + Instant end = clock.instant(); + long remaining = Buffers.totalRemaining(srcs, 0, srcs.length); + Record record = Record.of(available - remaining, begin, end, exception); + sink.recordThroughput(record); + } + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("delegate", delegate) + .add("sink", sink) + .add("clock", clock) + .toString(); + } + } + + final class TeeThroughputSink implements ThroughputSink { + private final ThroughputSink a; + private final ThroughputSink b; + + private TeeThroughputSink(ThroughputSink a, ThroughputSink b) { + this.a = a; + this.b = b; + } + + @Override + public void recordThroughput(Record r) { + a.recordThroughput(r); + b.recordThroughput(r); + } + + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return b.decorate(a.decorate(wbc)); + } + + @Override + public GatheringByteChannel decorate(GatheringByteChannel gbc) { + return b.decorate(a.decorate(gbc)); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("a", a).add("b", b).toString(); + } + } + + final class ThroughputMovingWindowThroughputSink implements ThroughputSink { + private final ThroughputMovingWindow w; + private final Clock clock; + private final ReentrantLock lock; + + private ThroughputMovingWindowThroughputSink(ThroughputMovingWindow w, Clock clock) { + this.w = w; + this.clock = clock; + this.lock = new ReentrantLock(); + } + + @Override + public void recordThroughput(Record r) { + lock.lock(); + try { + w.add(r.end, Throughput.of(r.getNumBytes(), r.getDuration())); + } finally { + lock.unlock(); + } + } + + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return new ThroughputRecordingWritableByteChannel(wbc, this, clock); + } + + @Override + public GatheringByteChannel decorate(GatheringByteChannel gbc) { + return new ThroughputRecordingGatheringByteChannel(gbc, this, clock); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("w", w).add("clock", clock).toString(); + } + } + + final class NullThroughputSink implements ThroughputSink { + private static final NullThroughputSink INSTANCE = new NullThroughputSink(); + + private NullThroughputSink() {} + + @Override + public void recordThroughput(Record r) {} + + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return wbc; + } + + @Override + public GatheringByteChannel decorate(GatheringByteChannel gbc) { + return gbc; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/TransportCompatibility.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/TransportCompatibility.java new file mode 100644 index 000000000000..63a95b40166c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/TransportCompatibility.java @@ -0,0 +1,62 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation which is used to convey which Cloud Storage API a class or method has compatibility + * with. + * + *

Not all operations are compatible with all transports. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE, ElementType.METHOD, ElementType.FIELD}) +@Documented +@Inherited +public @interface TransportCompatibility { + + Transport[] value(); + + /** + * Enum representing the transports {@code com.google.cloud.storage} classes have implementations + * for. + */ + enum Transport { + /** + * Value indicating use of the Cloud Storage JSON API + * + * @see StorageOptions#http() + */ + HTTP, + + /** + * Value indicating usa of the Cloud + * Storage v2 gRPC API TODO: link to public docs when published. + * + * @see StorageOptions#grpc() + */ + GRPC + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java new file mode 100644 index 000000000000..6248b1d5b0cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; + +interface UnbufferedReadableByteChannelSession + extends ReadableByteChannelSession { + + interface UnbufferedReadableByteChannel extends ReadableByteChannel, ScatteringByteChannel { + @Override + default int read(ByteBuffer dst) throws IOException { + return Math.toIntExact(read(new ByteBuffer[] {dst}, 0, 1)); + } + + @Override + default long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java new file mode 100644 index 000000000000..2210822fa0cc --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java @@ -0,0 +1,59 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.WritableByteChannel; + +interface UnbufferedWritableByteChannelSession + extends WritableByteChannelSession { + + interface UnbufferedWritableByteChannel extends WritableByteChannel, GatheringByteChannel { + + /** Default assumed to be blocking, non-blocking allowed but must be documented. */ + @Override + default int write(ByteBuffer src) throws IOException { + return Math.toIntExact(write(new ByteBuffer[] {src}, 0, 1)); + } + + /** Default assumed to be blocking, non-blocking allowed but must be documented. */ + @Override + default long write(ByteBuffer[] srcs) throws IOException { + return write(srcs, 0, srcs.length); + } + + /** This method must block until terminal state is reached. */ + default int writeAndClose(ByteBuffer src) throws IOException { + return Math.toIntExact(writeAndClose(new ByteBuffer[] {src}, 0, 1)); + } + + /** This method must block until terminal state is reached. */ + default long writeAndClose(ByteBuffer[] srcs) throws IOException { + return writeAndClose(srcs, 0, srcs.length); + } + + /** This method must block until terminal state is reached. */ + default long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long write = write(srcs, offset, length); + close(); + return write; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnifiedOpts.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnifiedOpts.java new file mode 100644 index 000000000000..65c6ab13cb9b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UnifiedOpts.java @@ -0,0 +1,3237 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.projectNameCodec; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Predicates.not; +import static java.util.Objects.requireNonNull; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.hash.HashCode; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.protobuf.ByteString; +import com.google.protobuf.FieldMask; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.CommonObjectRequestParams; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.LockBucketRetentionPolicyRequest; +import com.google.storage.v2.MoveObjectRequest; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.RestoreObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import java.io.Serializable; +import java.net.FileNameMap; +import java.net.URLConnection; +import java.security.Key; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.crypto.spec.SecretKeySpec; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.RequiresNonNull; + +/** + * The set of all "Options" we currently support for per-call parameters. + * + *

Most often, each of the respective types correspond to one of the parameters from HTTP headers + * and common query string parameters for JSON. In the case of gRPC, sometimes the parameters + * are in the specific request message or in grpc metadata. + */ +@SuppressWarnings({"deprecation", "DeprecatedIsStillUsed"}) +final class UnifiedOpts { + + /** Base interface type for each of the new options we're supporting. */ + interface Opt extends Serializable {} + + /** + * A specialization of {@link java.util.function.UnaryOperator} which maintains its lower type for + * {@link #identity()} and {@link #andThen(Mapper)}. + */ + @FunctionalInterface + interface Mapper { + T apply(T t); + + default Mapper andThen(Mapper then) { + return t -> then.apply(apply(t)); + } + + static Mapper identity() { + return t -> t; + } + } + + /** Base, marker interface of those Opts which represent a get/read/origin type relationship. */ + private interface SourceOpt extends Opt {} + + /** + * Base, marker interface of those Opts which represent a set/write/destination type relationship. + */ + private interface TargetOpt extends Opt {} + + /** Base, marker interface of those Opts which apply to listing operations. */ + private interface ListOpt extends Opt {} + + /** Marker interface of those Opts which are applicable to Bucket operations. */ + private interface ApplicableBucket {} + + /** Marker interface of those Opts which are applicable to Object/Blob operations. */ + private interface ApplicableObject {} + + /** Marker interface of those Opts which are applicable to HmacKey operations. */ + private interface ApplicableHmacKey {} + + /** Base interface for those Opts which may expose their values via gRPC Metadata */ + private interface GrpcMetadataMapper extends Opt { + default Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + } + + /** Base interface for those Opts which are applicable to Object List operations */ + interface ObjectListOpt extends GrpcMetadataMapper, ListOpt, ApplicableObject { + default Mapper listObjects() { + return Mapper.identity(); + } + } + + /** + * Base interface for those Opts which are applicable to Object Source (get/read/origin + * relationship) operations + */ + interface ObjectSourceOpt extends GrpcMetadataMapper, SourceOpt, ApplicableObject { + default Mapper readObject() { + return Mapper.identity(); + } + + default Mapper bidiReadObject() { + return Mapper.identity(); + } + + default Mapper getObject() { + return Mapper.identity(); + } + + default Mapper rewriteObject() { + return Mapper.identity(); + } + + default Mapper moveObject() { + return Mapper.identity(); + } + + default Mapper restoreObject() { + return Mapper.identity(); + } + } + + /** + * Base interface for those Opts which are applicable to Object Target (set/write/destination + * relationship) operations + */ + interface ObjectTargetOpt extends GrpcMetadataMapper, TargetOpt, ApplicableObject { + default Mapper blobInfo() { + return Mapper.identity(); + } + + default Mapper writeObject() { + return Mapper.identity(); + } + + default Mapper bidiWriteObject() { + return Mapper.identity(); + } + + default Mapper updateObject() { + return Mapper.identity(); + } + + default Mapper deleteObject() { + return Mapper.identity(); + } + + default Mapper composeObject() { + return Mapper.identity(); + } + + default Mapper rewriteObject() { + return Mapper.identity(); + } + + default Mapper moveObject() { + return Mapper.identity(); + } + + default Mapper startResumableWrite() { + return Mapper.identity(); + } + } + + /** Base interface for those Opts which are applicable to Bucket List operations */ + interface BucketListOpt extends GrpcMetadataMapper, ListOpt, ApplicableBucket { + default Mapper listBuckets() { + return Mapper.identity(); + } + } + + /** + * Base interface for those Opts which are applicable to Bucket Source (get/read/origin + * relationship) operations + */ + interface BucketSourceOpt extends GrpcMetadataMapper, SourceOpt, ApplicableBucket { + default Mapper getBucket() { + return Mapper.identity(); + } + + default Mapper getIamPolicy() { + return Mapper.identity(); + } + } + + /** + * Base interface for those Opts which are applicable to Bucket Target (set/write/destination + * relationship) operations + */ + interface BucketTargetOpt extends GrpcMetadataMapper, TargetOpt, ApplicableBucket { + default Mapper createBucket() { + return Mapper.identity(); + } + + default Mapper updateBucket() { + return Mapper.identity(); + } + + default Mapper deleteBucket() { + return Mapper.identity(); + } + + default Mapper lockBucketRetentionPolicy() { + return Mapper.identity(); + } + } + + /** Base interface for those Opts which are applicable to HmacKey List operations */ + interface HmacKeyListOpt extends GrpcMetadataMapper, ListOpt, ApplicableHmacKey {} + + /** + * Base interface for those Opts which are applicable to HmacKey Source (get/read/origin + * relationship) operations + */ + interface HmacKeySourceOpt extends GrpcMetadataMapper, SourceOpt, ApplicableHmacKey {} + + /** + * Base interface for those Opts which are applicable to HmacKey Target (set/write/destination + * relationship) operations + */ + interface HmacKeyTargetOpt extends GrpcMetadataMapper, TargetOpt, ApplicableHmacKey {} + + /** + * Some Options have a corresponding "SOURCE" version, this interface provide a construct for + * accessing an projecting those Opts which can be turned into a "SOURCE" version. + */ + interface ProjectAsSource { + O asSource(); + } + + interface HasherSelector extends BucketObjectHmacKeyAllOpt { + Hasher getHasher(); + } + + /** + * This class extends off {@link ObjectSourceOpt} and {@link ObjectTargetOpt} in order to satisfy + * some the shimming constraints of the subclasses of {@link OptionShim}. + * + *

All the methods from these parent interfaces will NEVER be called, and are stubbed simply to + * satisfy the need for them to be declared. They are stubbed to use identity methods so that if + * they somehow do ever leak through and are called they won't cause issue for customers. + * + *

If/when we're able to remove all the {@link Option} classes, this interface should be + * refactored to remove the inheritance, instead providing an explicit pre-processing phase to opt + * resolution. + */ + interface ObjectOptExtractor extends Opt, ObjectSourceOpt, ObjectTargetOpt { + O extractFromBlobInfo(BlobInfo info); + + O extractFromBlobId(BlobId id); + + @Override + default Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + + @Override + default Mapper getObject() { + return Mapper.identity(); + } + + @Override + default Mapper rewriteObject() { + return Mapper.identity(); + } + + @Override + default Mapper moveObject() { + return Mapper.identity(); + } + } + + /** + * This class extends off {@link ObjectSourceOpt} and {@link ObjectTargetOpt} in order to satisfy + * some the shimming constraints of the subclasses of {@link OptionShim}. + * + *

All the methods from these parent interfaces will NEVER be called, and are stubbed simply to + * satisfy the need for them to be declared. They are stubbed to use identity methods so that if + * they somehow do ever leak through and are called they won't cause issue for customers. + * + *

If/when we're able to remove all the {@link Option} classes, this interface should be + * refactored to remove the inheritance, instead providing an explicit pre-processing phase to opt + * resolution. + */ + interface BucketOptExtractor extends Opt, BucketSourceOpt, BucketTargetOpt { + O extractFromBucketInfo(BucketInfo info); + + @Override + default Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + } + + /* -- + Factory methods for each of the supported Opts, along with some of their requisite + compatibility overloads + -- */ + + static Crc32cMatch crc32cMatch(int i) { + return new Crc32cMatch(i); + } + + static Crc32cMatch crc32cMatch(@NonNull String crc32c) { + requireNonNull(crc32c, "crc32c must be non null"); + return new Crc32cMatch(Utils.crc32cCodec.decode(crc32c)); + } + + static Delimiter currentDirectory() { + return new Delimiter("/"); + } + + static DecryptionKey decryptionKey(@NonNull String decryptionKey) { + requireNonNull(decryptionKey, "decryptionKey must be non null"); + return new DecryptionKey( + new SecretKeySpec(BaseEncoding.base64().decode(decryptionKey), "AES256")); + } + + @RequiresNonNull({"decryptionKey", "#1.getEncoded()", "#1.getAlgorithm()"}) + static DecryptionKey decryptionKey(@NonNull Key decryptionKey) { + requireNonNull(decryptionKey, "decryptionKey must be non null"); + requireNonNull(decryptionKey.getEncoded(), "decryptionKey.getEncoded() must be non null"); + requireNonNull(decryptionKey.getAlgorithm(), "decryptionKey.getAlgorithm() must be non null"); + return new DecryptionKey(decryptionKey); + } + + static Delimiter delimiter(@NonNull String delimiter) { + requireNonNull(delimiter, "delimiter must be non null"); + return new Delimiter(delimiter); + } + + static IncludeFoldersAsPrefixes includeFoldersAsPrefixes(boolean includeFoldersAsPrefixes) { + return new IncludeFoldersAsPrefixes(includeFoldersAsPrefixes); + } + + static IncludeTrailingDelimiter includeTrailingDelimiter() { + return new IncludeTrailingDelimiter(true); + } + + @Deprecated + static DetectContentType detectContentType() { + return DetectContentType.INSTANCE; + } + + static DisableGzipContent disableGzipContent() { + return new DisableGzipContent(true); + } + + static GenerationMatch doesNotExist() { + return new GenerationMatch(0); + } + + static EncryptionKey encryptionKey(@NonNull String encryptionKey) { + requireNonNull(encryptionKey, "encryptionKey must be non null"); + return new EncryptionKey( + new SecretKeySpec(BaseEncoding.base64().decode(encryptionKey), "AES256")); + } + + static EncryptionKey encryptionKey(@NonNull Key encryptionKey) { + requireNonNull(encryptionKey, "encryptionKey must be non null"); + return new EncryptionKey(encryptionKey); + } + + static EndOffset endOffset(@NonNull String endOffset) { + requireNonNull(endOffset, "endOffset must be non null"); + return new EndOffset(endOffset); + } + + static Fields fields(@NonNull ImmutableSet fields) { + requireNonNull(fields, "fields must be non null"); + return new Fields(fields); + } + + static GenerationMatch generationMatch(long l) { + return new GenerationMatch(l); + } + + static GenerationNotMatch generationNotMatch(long l) { + return new GenerationNotMatch(l); + } + + static KmsKeyName kmsKeyName(@NonNull String kmsKeyName) { + requireNonNull(kmsKeyName, "kmsKeyName must be non null"); + return new KmsKeyName(kmsKeyName); + } + + static MatchGlob matchGlob(@NonNull String glob) { + requireNonNull(glob, "glob must be non null"); + return new MatchGlob(glob); + } + + static Md5Match md5Match(@NonNull String md5) { + requireNonNull(md5, "md5 must be non null"); + return new Md5Match(md5); + } + + static MetagenerationMatch metagenerationMatch(long l) { + return new MetagenerationMatch(l); + } + + static MetagenerationNotMatch metagenerationNotMatch(long l) { + return new MetagenerationNotMatch(l); + } + + static PageSize pageSize(long l) { + return new PageSize(l); + } + + static PageToken pageToken(@NonNull String pageToken) { + requireNonNull(pageToken, "pageToken must be non null"); + return new PageToken(pageToken); + } + + static ReturnPartialSuccess returnPartialSuccess(boolean returnPartialSuccess) { + return new ReturnPartialSuccess(returnPartialSuccess); + } + + static PredefinedAcl predefinedAcl(Storage.@NonNull PredefinedAcl predefinedAcl) { + requireNonNull(predefinedAcl, "predefinedAcl must be non null"); + return new PredefinedAcl(predefinedAcl.getEntry()); + } + + static PredefinedDefaultObjectAcl predefinedDefaultObjectAcl( + Storage.@NonNull PredefinedAcl predefinedAcl) { + requireNonNull(predefinedAcl, "predefinedAcl must be non null"); + return new PredefinedDefaultObjectAcl(predefinedAcl.getEntry()); + } + + static EnableObjectRetention enableObjectRetention(boolean enable) { + return new EnableObjectRetention(enable); + } + + static OverrideUnlockedRetention overrideUnlockedRetention(boolean overrideUnlockedRetention) { + return new OverrideUnlockedRetention(overrideUnlockedRetention); + } + + static Prefix prefix(@NonNull String prefix) { + requireNonNull(prefix, "prefix must be non null"); + return new Prefix(prefix); + } + + static ProjectId projectId(@NonNull String projectId) { + requireNonNull(projectId, "projectId must be non null"); + return new ProjectId(projectId); + } + + static Projection projection(@NonNull String projection) { + requireNonNull(projection, "projection must be non null"); + return new Projection(projection); + } + + static ResumableUploadExpectedObjectSize resumableUploadExpectedObjectSize( + long expectedObjectSize) { + checkArgument(expectedObjectSize >= 0, "expectedObjectSize >= 0 (%s >= 0)", expectedObjectSize); + return new ResumableUploadExpectedObjectSize(expectedObjectSize); + } + + static SoftDeleted softDeleted(boolean softDeleted) { + return new SoftDeleted(softDeleted); + } + + static CopySourceAcl copySourceAcl(boolean copySourceAcl) { + return new CopySourceAcl(copySourceAcl); + } + + static RequestedPolicyVersion requestedPolicyVersion(long l) { + return new RequestedPolicyVersion(l); + } + + static ReturnRawInputStream returnRawInputStream(boolean b) { + return new ReturnRawInputStream(b); + } + + @RequiresNonNull({"serviceAccount", "#1.getEmail()"}) + static ServiceAccount serviceAccount( + com.google.cloud.storage.@NonNull ServiceAccount serviceAccount) { + requireNonNull(serviceAccount, "serviceAccount must be non null"); + requireNonNull(serviceAccount.getEmail(), "serviceAccount.getEmail() must be non null"); + return new ServiceAccount(serviceAccount.getEmail()); + } + + @VisibleForTesting + static SetContentType setContentType(@NonNull String s) { + requireNonNull(s, "s must be non null"); + return new SetContentType(s); + } + + static ShowDeletedKeys showDeletedKeys(boolean b) { + return new ShowDeletedKeys(b); + } + + static StartOffset startOffset(@NonNull String startOffset) { + requireNonNull(startOffset, "startOffset must be non null"); + return new StartOffset(startOffset); + } + + static UserProject userProject(@NonNull String userProject) { + requireNonNull(userProject, "userProject must be non null"); + return new UserProject(userProject); + } + + static VersionsFilter versionsFilter(boolean b) { + return new VersionsFilter(b); + } + + @Deprecated + static GenerationMatchExtractor generationMatchExtractor() { + return GenerationMatchExtractor.INSTANCE; + } + + @Deprecated + static GenerationNotMatchExtractor generationNotMatchExtractor() { + return GenerationNotMatchExtractor.INSTANCE; + } + + @Deprecated + static MetagenerationMatchExtractor metagenerationMatchExtractor() { + return MetagenerationMatchExtractor.INSTANCE; + } + + @Deprecated + static MetagenerationNotMatchExtractor metagenerationNotMatchExtractor() { + return MetagenerationNotMatchExtractor.INSTANCE; + } + + @Deprecated + static Crc32cMatchExtractor crc32cMatchExtractor() { + return Crc32cMatchExtractor.INSTANCE; + } + + @Deprecated + static Md5MatchExtractor md5MatchExtractor() { + return Md5MatchExtractor.INSTANCE; + } + + static ObjectFilter objectFilter(String filter) { + return new ObjectFilter(filter); + } + + static Headers extraHeaders(ImmutableMap extraHeaders) { + requireNonNull(extraHeaders, "extraHeaders must be non null"); + String blockedHeaders = + extraHeaders.keySet().stream() + .map(Utils::headerNameToLowerCase) + .filter(Headers.BLOCKLIST) + .sorted(Comparator.naturalOrder()) + .collect(Collectors.joining(", ", "[", "]")); + checkArgument("[]".equals(blockedHeaders), "Disallowed headers: %s", blockedHeaders); + return new Headers(extraHeaders); + } + + static DefaultHasherSelector defaultHasherSelector() { + return DefaultHasherSelector.INSTANCE; + } + + static final class Crc32cMatch implements ObjectTargetOpt, HasherSelector { + private static final long serialVersionUID = 8172282701777561769L; + private final int val; + + private Crc32cMatch(int val) { + this.val = val; + } + + @Override + public Mapper blobInfo() { + return b -> b.setCrc32c(Utils.crc32cCodec.encode(val)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Crc32cMatch)) { + return false; + } + Crc32cMatch that = (Crc32cMatch) o; + return Objects.equals(val, that.val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getObjectChecksumsBuilder().setCrc32C(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getObjectChecksumsBuilder().setCrc32C(val); + return b; + }; + } + + @Override + public Hasher getHasher() { + return Hasher.noop(); + } + + @Override + public int hashCode() { + return Objects.hash(val); + } + + @Override + public String toString() { + return "Crc32cMatch{val='" + val + "'}"; + } + } + + /** + * @see EncryptionKey + */ + static final class DecryptionKey extends RpcOptVal implements ObjectSourceOpt { + private static final long serialVersionUID = -2198422155991275316L; + + private DecryptionKey(Key val) { + super(StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, val); + } + + @Override + public Mapper> mapper() { + return b -> + b.put( + StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, + BaseEncoding.base64().encode(val.getEncoded())); + } + + @Override + public Mapper readObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper bidiReadObject() { + return b -> { + customerSuppliedKey( + b.getReadObjectSpecBuilder().getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper getObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper rewriteObject() { + return b -> { + CommonObjectRequestParams.Builder builder = + customerSuppliedKey(CommonObjectRequestParams.newBuilder(), val); + return b.setCopySourceEncryptionAlgorithm(builder.getEncryptionAlgorithm()) + .setCopySourceEncryptionKeyBytes(builder.getEncryptionKeyBytes()) + .setCopySourceEncryptionKeySha256Bytes(builder.getEncryptionKeySha256Bytes()); + }; + } + } + + static final class IncludeFoldersAsPrefixes extends RpcOptVal implements ObjectListOpt { + + private static final long serialVersionUID = 321916692864878282L; + + private IncludeFoldersAsPrefixes(boolean val) { + super(StorageRpc.Option.INCLUDE_FOLDERS_AS_PREFIXES, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setIncludeFoldersAsPrefixes(val); + } + } + + static final class IncludeTrailingDelimiter extends RpcOptVal implements ObjectListOpt { + + private static final long serialVersionUID = 321916692864878282L; + + private IncludeTrailingDelimiter(boolean val) { + super(StorageRpc.Option.INCLUDE_TRAILING_DELIMITER, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setIncludeTrailingDelimiter(val); + } + } + + static final class Delimiter extends RpcOptVal implements ObjectListOpt { + private static final long serialVersionUID = -3789556789947615714L; + + private Delimiter(String val) { + super(StorageRpc.Option.DELIMITER, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setDelimiter(val); + } + } + + static final class SoftDeleted extends RpcOptVal + implements ObjectListOpt, ObjectSourceOpt { + + private static final long serialVersionUID = -8526951678111463350L; + + private SoftDeleted(boolean val) { + super(StorageRpc.Option.SOFT_DELETED, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setSoftDeleted(val); + } + + @Override + public Mapper getObject() { + return b -> b.setSoftDeleted(val); + } + } + + static final class CopySourceAcl extends RpcOptVal implements ObjectSourceOpt { + + private static final long serialVersionUID = 2033755749149128119L; + + private CopySourceAcl(boolean val) { + super(StorageRpc.Option.COPY_SOURCE_ACL, val); + } + + @Override + public Mapper restoreObject() { + return b -> b.setCopySourceAcl(val); + } + } + + static final class DisableGzipContent extends RpcOptVal<@NonNull Boolean> + implements ObjectTargetOpt { + private static final long serialVersionUID = 7445066765944965549L; + + private DisableGzipContent(boolean val) { + super(StorageRpc.Option.IF_DISABLE_GZIP_CONTENT, val); + } + } + + /** + * @see DecryptionKey + */ + static final class EncryptionKey extends RpcOptVal + implements ObjectTargetOpt, ProjectAsSource { + private static final long serialVersionUID = -7335988656032764620L; + + private EncryptionKey(Key val) { + super(StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, val); + } + + @Override + public DecryptionKey asSource() { + return new DecryptionKey(val); + } + + @Override + public Mapper> mapper() { + return b -> + b.put( + StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, + BaseEncoding.base64().encode(val.getEncoded())); + } + + @Override + public Mapper writeObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper updateObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper deleteObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper composeObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + + @Override + public Mapper rewriteObject() { + return b -> { + customerSuppliedKey(b.getCommonObjectRequestParamsBuilder(), val); + return b; + }; + } + } + + /** + * @see StartOffset + */ + static final class EndOffset extends RpcOptVal implements ObjectListOpt { + private static final long serialVersionUID = 7446382028145458833L; + + private EndOffset(String val) { + super(StorageRpc.Option.END_OFF_SET, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setLexicographicEnd(val); + } + } + + static final class Fields extends RpcOptVal> + implements ObjectSourceOpt, + ObjectListOpt, + ObjectTargetOpt, + BucketSourceOpt, + BucketTargetOpt, + BucketListOpt { + + /** + * Apiary and gRPC have differing handling of where the field selector is evaluated relative to + * the request. For Apiary, it's from the root response document; for gRPC it's from the + * collection of results. + * + *

In our current case, this means we exclude some fields when we know it is being consumed + * for a gRPC message. Unfortunately, we don't know if the constructed Fields instance is for + * use with gRPC when it is instantiated so we must define here. + */ + private static final ImmutableSet grpcExcludedFields = + ImmutableSet.of("nextPageToken", "prefixes", "selfLink", "mediaLink", "kind", "id"); + + private static final long serialVersionUID = 3286889410148272195L; + + private Fields(ImmutableSet val) { + super(StorageRpc.Option.FIELDS, val); + } + + @Override + public Mapper> mapper() { + return b -> { + String collect = + val.stream().map(NamedField::getApiaryName).collect(Collectors.joining(",")); + return b.put(StorageRpc.Option.FIELDS, collect); + }; + } + + @Override + public Mapper getBucket() { + return b -> b.setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper listBuckets() { + return b -> b.setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper updateBucket() { + return b -> b.setUpdateMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper getObject() { + return b -> b.setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper listObjects() { + return b -> b.setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper readObject() { + return b -> b.setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper bidiReadObject() { + return b -> { + b.getReadObjectSpecBuilder() + .setReadMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + return b; + }; + } + + @Override + public Mapper updateObject() { + return b -> b.setUpdateMask(FieldMask.newBuilder().addAllPaths(getPaths()).build()); + } + + @Override + public Mapper rewriteObject() { + return Mapper.identity(); + } + + @Override + public Mapper moveObject() { + return Mapper.identity(); + } + + /** + * Define a decoder which can clear out any fields which may have not been selected. + * + *

This approach, isn't ideal at the backside after decoding has already taken place. + * However, refactoring the whole model pipeline for both json and grpc is going to be a large + * change. + */ + Decoder clearUnselectedBlobFields() { + return b -> { + if (val.isEmpty()) { + return b; + } else { + Set names = getPaths(); + BlobInfo.Builder bldr = b.toBuilder(); + blobInfoFieldClearers.entrySet().stream() + .filter(e -> !names.contains(e.getKey())) + .map(Entry::getValue) + .forEach(m -> m.apply(bldr)); + return bldr.build(); + } + }; + } + + /** + * Define a decoder which can clear out any fields which may have not been selected. + * + *

This approach, isn't ideal at the backside after decoding has already taken place. + * However, refactoring the whole model pipeline for both json and grpc is going to be a large + * change. + */ + Decoder clearUnselectedBucketFields() { + return b -> { + if (val.isEmpty()) { + return b; + } else { + Set names = getPaths(); + Bucket.Builder bldr = b.toBuilder(); + bucketInfoFieldClearers.entrySet().stream() + .filter(e -> !names.contains(e.getKey())) + .map(Entry::getValue) + .forEach(m -> m.apply(bldr)); + return bldr.build(); + } + }; + } + + private Set getPaths() { + //noinspection Guava + return val.stream() + .map(NamedField::stripPrefix) + .map(NamedField::getGrpcName) + .filter(not(grpcExcludedFields::contains)) + .collect(Collectors.toSet()); + } + + // It'd be preferable to define these clearing mappers in the fields themselves, however today + // the fields are enums and require interfaces in order to extend anything which in turn makes + // things public. + // + // To avoid putting more things on the public api that will hopefully take a different form + // in the medium term, we define them here. + private static final ImmutableMap> blobInfoFieldClearers = + ImmutableMap.>builder() + .put(BlobField.ACL.getGrpcName(), BlobInfo.Builder::clearAcl) + .put(BlobField.CACHE_CONTROL.getGrpcName(), BlobInfo.Builder::clearCacheControl) + .put(BlobField.COMPONENT_COUNT.getGrpcName(), BlobInfo.Builder::clearComponentCount) + .put( + BlobField.CONTENT_DISPOSITION.getGrpcName(), + BlobInfo.Builder::clearContentDisposition) + .put(BlobField.CONTENT_ENCODING.getGrpcName(), BlobInfo.Builder::clearContentEncoding) + .put(BlobField.CONTENT_LANGUAGE.getGrpcName(), BlobInfo.Builder::clearContentLanguage) + .put(BlobField.CONTENT_TYPE.getGrpcName(), BlobInfo.Builder::clearContentType) + .put(BlobField.CRC32C.getGrpcName(), BlobInfo.Builder::clearCrc32c) + .put( + BlobField.CUSTOMER_ENCRYPTION.getGrpcName(), + BlobInfo.Builder::clearCustomerEncryption) + .put(BlobField.CUSTOM_TIME.getGrpcName(), BlobInfo.Builder::clearCustomTime) + .put(BlobField.ETAG.getGrpcName(), BlobInfo.Builder::clearEtag) + .put(BlobField.EVENT_BASED_HOLD.getGrpcName(), BlobInfo.Builder::clearEventBasedHold) + .put( + BlobField.GENERATION.getGrpcName(), + b -> { + BlobId current = b.getBlobId(); + return b.setBlobId(BlobId.of(current.getBucket(), current.getName())); + }) + .put(BlobField.ID.getGrpcName(), BlobInfo.Builder::clearGeneratedId) + .put(BlobField.KMS_KEY_NAME.getGrpcName(), BlobInfo.Builder::clearKmsKeyName) + .put(BlobField.MD5HASH.getGrpcName(), BlobInfo.Builder::clearMd5) + .put(BlobField.MEDIA_LINK.getGrpcName(), BlobInfo.Builder::clearMediaLink) + .put(BlobField.METADATA.getGrpcName(), BlobInfo.Builder::clearMetadata) + .put(BlobField.METAGENERATION.getGrpcName(), BlobInfo.Builder::clearMetageneration) + .put(BlobField.OWNER.getGrpcName(), BlobInfo.Builder::clearOwner) + .put( + BlobField.RETENTION_EXPIRATION_TIME.getGrpcName(), + BlobInfo.Builder::clearRetentionExpirationTime) + .put(BlobField.SELF_LINK.getGrpcName(), BlobInfo.Builder::clearSelfLink) + .put(BlobField.SIZE.getGrpcName(), BlobInfo.Builder::clearSize) + .put(BlobField.STORAGE_CLASS.getGrpcName(), BlobInfo.Builder::clearStorageClass) + .put(BlobField.TEMPORARY_HOLD.getGrpcName(), BlobInfo.Builder::clearTemporaryHold) + .put(BlobField.TIME_CREATED.getGrpcName(), BlobInfo.Builder::clearCreateTime) + .put(BlobField.TIME_DELETED.getGrpcName(), BlobInfo.Builder::clearDeleteTime) + .put( + BlobField.TIME_STORAGE_CLASS_UPDATED.getGrpcName(), + BlobInfo.Builder::clearTimeStorageClassUpdated) + .put(BlobField.UPDATED.getGrpcName(), BlobInfo.Builder::clearUpdateTime) + .build(); + + private static final ImmutableMap> bucketInfoFieldClearers = + ImmutableMap.>builder() + .put(BucketField.ACL.getGrpcName(), BucketInfo.Builder::clearAcl) + // .put(BucketField.AUTOCLASS.getGrpcName(), b -> b.clearAutoclass()) + .put(BucketField.BILLING.getGrpcName(), BucketInfo.Builder::clearRequesterPays) + .put(BucketField.CORS.getGrpcName(), BucketInfo.Builder::clearCors) + .put( + BucketField.CUSTOM_PLACEMENT_CONFIG.getGrpcName(), + BucketInfo.Builder::clearCustomPlacementConfig) + .put( + BucketField.DEFAULT_EVENT_BASED_HOLD.getGrpcName(), + BucketInfo.Builder::clearDefaultEventBasedHold) + .put(BucketField.DEFAULT_OBJECT_ACL.getGrpcName(), BucketInfo.Builder::clearDefaultAcl) + .put( + BucketField.ENCRYPTION.getGrpcName(), + builder -> + builder + .clearDefaultKmsKeyName() + .clearGoogleManagedEncryptionEnforcementConfig() + .clearCustomerManagedEncryptionEnforcementConfig() + .clearCustomerSuppliedEncryptionEnforcementConfig()) + .put(BucketField.ETAG.getGrpcName(), BucketInfo.Builder::clearEtag) + .put( + BucketField.IAMCONFIGURATION.getGrpcName(), + BucketInfo.Builder::clearIamConfiguration) + .put(BucketField.ID.getGrpcName(), BucketInfo.Builder::clearGeneratedId) + .put(BucketField.IP_FILTER.getGrpcName(), BucketInfo.Builder::clearIpFilter) + .put(BucketField.LABELS.getGrpcName(), BucketInfo.Builder::clearLabels) + .put(BucketField.LIFECYCLE.getGrpcName(), BucketInfo.Builder::clearLifecycleRules) + .put(BucketField.LOCATION.getGrpcName(), BucketInfo.Builder::clearLocation) + .put(BucketField.LOCATION_TYPE.getGrpcName(), BucketInfo.Builder::clearLocationType) + .put(BucketField.LOGGING.getGrpcName(), BucketInfo.Builder::clearLogging) + .put(BucketField.METAGENERATION.getGrpcName(), BucketInfo.Builder::clearMetageneration) + .put(BucketField.NAME.getGrpcName(), BucketInfo.Builder::clearName) + .put(BucketField.OWNER.getGrpcName(), BucketInfo.Builder::clearOwner) + .put( + BucketField.RETENTION_POLICY.getGrpcName(), + b -> + b.clearRetentionEffectiveTime() + .clearRetentionPolicyIsLocked() + .clearRetentionPeriod()) + .put(BucketField.RPO.getGrpcName(), BucketInfo.Builder::clearRpo) + .put(BucketField.STORAGE_CLASS.getGrpcName(), BucketInfo.Builder::clearStorageClass) + .put(BucketField.TIME_CREATED.getGrpcName(), BucketInfo.Builder::clearCreateTime) + .put(BucketField.UPDATED.getGrpcName(), BucketInfo.Builder::clearUpdateTime) + .put(BucketField.VERSIONING.getGrpcName(), BucketInfo.Builder::clearVersioningEnabled) + .put(BucketField.WEBSITE.getGrpcName(), b -> b.clearIndexPage().clearNotFoundPage()) + .put("project", BucketInfo.Builder::clearProject) + .build(); + } + + /** + * @see GenerationNotMatch + * @see SourceGenerationMatch + */ + static final class GenerationMatch extends RpcOptVal<@NonNull Long> + implements ObjectSourceOpt, ObjectTargetOpt, ProjectAsSource { + private static final long serialVersionUID = 2645517179434741007L; + + private GenerationMatch(long val) { + super(StorageRpc.Option.IF_GENERATION_MATCH, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfGenerationMatch(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfGenerationMatch(val); + return b; + }; + } + + @Override + public Mapper readObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper bidiReadObject() { + return b -> { + b.getReadObjectSpecBuilder().setIfGenerationMatch(val); + return b; + }; + } + + @Override + public Mapper getObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper restoreObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper updateObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper deleteObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper composeObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfGenerationMatch(val); + } + + @Override + public SourceGenerationMatch asSource() { + return new SourceGenerationMatch(val); + } + } + + /** + * @see GenerationMatch + * @see SourceGenerationNotMatch + */ + static final class GenerationNotMatch extends RpcOptVal<@NonNull Long> + implements ObjectSourceOpt, ObjectTargetOpt, ProjectAsSource { + private static final long serialVersionUID = 156505623580743531L; + + private GenerationNotMatch(long val) { + super(StorageRpc.Option.IF_GENERATION_NOT_MATCH, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfGenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfGenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper readObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper bidiReadObject() { + return b -> { + b.getReadObjectSpecBuilder().setIfGenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper getObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper restoreObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper updateObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper deleteObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfGenerationNotMatch(val); + } + + @Override + public SourceGenerationNotMatch asSource() { + return new SourceGenerationNotMatch(val); + } + } + + static final class KmsKeyName extends RpcOptVal implements ObjectTargetOpt { + private static final long serialVersionUID = -3053839109272566113L; + + private KmsKeyName(String val) { + super(StorageRpc.Option.KMS_KEY_NAME, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().getResourceBuilder().setKmsKey(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().getResourceBuilder().setKmsKey(val); + return b; + }; + } + + @Override + public Mapper composeObject() { + return b -> b.setKmsKey(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setDestinationKmsKey(val); + } + } + + static final class MatchGlob extends RpcOptVal implements ObjectListOpt { + private static final long serialVersionUID = 8819855597395473178L; + + private MatchGlob(String val) { + super(StorageRpc.Option.MATCH_GLOB, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setMatchGlobBytes(ByteString.copyFromUtf8(val)); + } + } + + @Deprecated + static final class Md5Match implements ObjectTargetOpt, HasherSelector { + private static final long serialVersionUID = 5237207911268363887L; + private final String val; + + private Md5Match(String val) { + this.val = val; + } + + @Override + public Mapper blobInfo() { + return b -> b.setMd5(val); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Md5Match)) { + return false; + } + Md5Match md5Match = (Md5Match) o; + return Objects.equals(val, md5Match.val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getObjectChecksumsBuilder() + .setMd5Hash(ByteString.copyFrom(BaseEncoding.base64().decode(val))); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getObjectChecksumsBuilder() + .setMd5Hash(ByteString.copyFrom(BaseEncoding.base64().decode(val))); + return b; + }; + } + + @Override + public Hasher getHasher() { + return Hasher.noop(); + } + + @Override + public int hashCode() { + return Objects.hash(val); + } + + @Override + public String toString() { + return "Md5Match{val='" + val + "'}"; + } + } + + /** + * @see MetagenerationNotMatch + * @see SourceMetagenerationMatch + */ + static final class MetagenerationMatch extends RpcOptVal<@NonNull Long> + implements BucketSourceOpt, + BucketTargetOpt, + ObjectSourceOpt, + ObjectTargetOpt, + ProjectAsSource { + private static final long serialVersionUID = 49086960234390739L; + + private MetagenerationMatch(long val) { + super(StorageRpc.Option.IF_METAGENERATION_MATCH, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfMetagenerationMatch(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfMetagenerationMatch(val); + return b; + }; + } + + @Override + public Mapper readObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper bidiReadObject() { + return b -> { + b.getReadObjectSpecBuilder().setIfMetagenerationMatch(val); + return b; + }; + } + + @Override + public Mapper getObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper restoreObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper updateObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper deleteObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper composeObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper updateBucket() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper deleteBucket() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper getBucket() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public Mapper lockBucketRetentionPolicy() { + return b -> b.setIfMetagenerationMatch(val); + } + + @Override + public SourceMetagenerationMatch asSource() { + return new SourceMetagenerationMatch(val); + } + } + + /** + * @see MetagenerationMatch + * @see SourceMetagenerationNotMatch + */ + static final class MetagenerationNotMatch extends RpcOptVal<@NonNull Long> + implements BucketSourceOpt, + BucketTargetOpt, + ObjectSourceOpt, + ObjectTargetOpt, + ProjectAsSource { + private static final long serialVersionUID = -1795350187419586248L; + + private MetagenerationNotMatch(long val) { + super(StorageRpc.Option.IF_METAGENERATION_NOT_MATCH, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfMetagenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().setIfMetagenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper readObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper bidiReadObject() { + return b -> { + b.getReadObjectSpecBuilder().setIfMetagenerationNotMatch(val); + return b; + }; + } + + @Override + public Mapper getObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper restoreObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper updateObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper deleteObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper updateBucket() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper deleteBucket() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public Mapper getBucket() { + return b -> b.setIfMetagenerationNotMatch(val); + } + + @Override + public SourceMetagenerationNotMatch asSource() { + return new SourceMetagenerationNotMatch(val); + } + } + + static final class PageSize extends RpcOptVal<@NonNull Long> + implements BucketListOpt, ObjectListOpt, HmacKeyListOpt { + private static final long serialVersionUID = -8184518840397826601L; + + private PageSize(long val) { + super(StorageRpc.Option.MAX_RESULTS, val); + } + + @Override + public Mapper listBuckets() { + return b -> b.setPageSize(Math.toIntExact(val)); + } + + @Override + public Mapper listObjects() { + return b -> b.setPageSize(Math.toIntExact(val)); + } + } + + static final class PageToken extends RpcOptVal + implements BucketListOpt, ObjectListOpt, HmacKeyListOpt { + private static final long serialVersionUID = -1370658416509499177L; + + private PageToken(String val) { + super(StorageRpc.Option.PAGE_TOKEN, val); + } + + @Override + public Mapper listBuckets() { + return b -> b.setPageToken(val); + } + + @Override + public Mapper listObjects() { + return b -> b.setPageToken(val); + } + } + + static final class ReturnPartialSuccess extends RpcOptVal implements BucketListOpt { + private static final long serialVersionUID = -1370658416509499277L; + + private ReturnPartialSuccess(boolean val) { + super(StorageRpc.Option.RETURN_PARTIAL_SUCCESS, val); + } + + @Override + public Mapper listBuckets() { + return b -> b.setReturnPartialSuccess(val); + } + } + + static final class PredefinedAcl extends RpcOptVal + implements BucketTargetOpt, ObjectTargetOpt { + private static final long serialVersionUID = -1743736785228368741L; + + private PredefinedAcl(String val) { + super(StorageRpc.Option.PREDEFINED_ACL, val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().setPredefinedAcl(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().setPredefinedAcl(val); + return b; + }; + } + + @Override + public Mapper updateObject() { + return b -> b.setPredefinedAcl(val); + } + + @Override + public Mapper composeObject() { + return b -> b.setDestinationPredefinedAcl(val); + } + + @Override + public Mapper updateBucket() { + return b -> b.setPredefinedAcl(val); + } + + @Override + public Mapper createBucket() { + return b -> b.setPredefinedAcl(val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setDestinationPredefinedAcl(val); + } + } + + static final class PredefinedDefaultObjectAcl extends RpcOptVal + implements BucketTargetOpt { + private static final long serialVersionUID = -1771832790114963130L; + + private PredefinedDefaultObjectAcl(String val) { + super(StorageRpc.Option.PREDEFINED_DEFAULT_OBJECT_ACL, val); + } + + @Override + public Mapper createBucket() { + return b -> b.setPredefinedDefaultObjectAcl(val); + } + + @Override + public Mapper updateBucket() { + return b -> b.setPredefinedDefaultObjectAcl(val); + } + } + + static final class EnableObjectRetention extends RpcOptVal implements BucketTargetOpt { + private static final long serialVersionUID = -2581147719605551578L; + + private EnableObjectRetention(boolean val) { + super(StorageRpc.Option.ENABLE_OBJECT_RETENTION, val); + } + + @Override + public Mapper updateBucket() { + return CrossTransportUtils.throwHttpJsonOnly( + Storage.BucketTargetOption.class, "enableObjectRetention(boolean)"); + } + } + + static final class Prefix extends RpcOptVal implements BucketListOpt, ObjectListOpt { + private static final long serialVersionUID = -3973478772547687371L; + + private Prefix(String val) { + super(StorageRpc.Option.PREFIX, val); + } + + @Override + public Mapper listBuckets() { + return b -> b.setPrefix(val); + } + + @Override + public Mapper listObjects() { + return b -> b.setPrefix(val); + } + } + + /** + * This is a required property of hmac related operations. Preferably, we'd be able to push the + * defaulting to the creation of a new instance of one of the model objects + */ + @Deprecated + static final class ProjectId extends RpcOptVal + implements HmacKeySourceOpt, HmacKeyTargetOpt, HmacKeyListOpt, BucketListOpt { + private static final long serialVersionUID = 6273807286378420321L; + + private ProjectId(String val) { + super(StorageRpc.Option.PROJECT_ID, val); + } + + @Override + public Mapper listBuckets() { + return b -> b.setParent(projectNameCodec.encode(val)); + } + } + + static final class Projection extends RpcOptVal implements BucketTargetOpt { + private static final long serialVersionUID = -7394684784418942133L; + + private Projection(String val) { + super(StorageRpc.Option.PROJECTION, val); + } + } + + /** + * @see GenerationMatch + * @see SourceGenerationNotMatch + */ + static final class SourceGenerationMatch extends RpcOptVal<@NonNull Long> + implements ObjectSourceOpt, ObjectTargetOpt { + private static final long serialVersionUID = -4074703368515265616L; + + private SourceGenerationMatch(@NonNull Long val) { + super(StorageRpc.Option.IF_SOURCE_GENERATION_MATCH, val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfSourceGenerationMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfSourceGenerationMatch(val); + } + } + + /** + * @see GenerationNotMatch + * @see SourceGenerationMatch + */ + static final class SourceGenerationNotMatch extends RpcOptVal<@NonNull Long> + implements ObjectSourceOpt, ObjectTargetOpt { + private static final long serialVersionUID = -5232032184462880657L; + + private SourceGenerationNotMatch(@NonNull Long val) { + super(StorageRpc.Option.IF_SOURCE_GENERATION_NOT_MATCH, val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfSourceGenerationNotMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfSourceGenerationNotMatch(val); + } + } + + /** + * @see MetagenerationMatch + * @see SourceMetagenerationNotMatch + */ + static final class SourceMetagenerationMatch extends RpcOptVal<@NonNull Long> + implements BucketSourceOpt, BucketTargetOpt, ObjectSourceOpt, ObjectTargetOpt { + private static final long serialVersionUID = 5223360761780436495L; + + private SourceMetagenerationMatch(@NonNull Long val) { + super(StorageRpc.Option.IF_SOURCE_METAGENERATION_MATCH, val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfSourceMetagenerationMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfSourceMetagenerationMatch(val); + } + } + + /** + * @see MetagenerationNotMatch + * @see SourceMetagenerationMatch + */ + static final class SourceMetagenerationNotMatch extends RpcOptVal<@NonNull Long> + implements BucketSourceOpt, BucketTargetOpt, ObjectSourceOpt, ObjectTargetOpt { + private static final long serialVersionUID = 2679308305890468285L; + + private SourceMetagenerationNotMatch(@NonNull Long val) { + super(StorageRpc.Option.IF_SOURCE_METAGENERATION_NOT_MATCH, val); + } + + @Override + public Mapper rewriteObject() { + return b -> b.setIfSourceMetagenerationNotMatch(val); + } + + @Override + public Mapper moveObject() { + return b -> b.setIfSourceMetagenerationNotMatch(val); + } + } + + static final class RequestedPolicyVersion extends RpcOptVal<@NonNull Long> + implements BucketSourceOpt { + private static final long serialVersionUID = -3606062322328656218L; + + private RequestedPolicyVersion(Long val) { + super(StorageRpc.Option.REQUESTED_POLICY_VERSION, val); + } + + @Override + public Mapper getIamPolicy() { + return b -> { + b.getOptionsBuilder().setRequestedPolicyVersion(Math.toIntExact(val)); + return b; + }; + } + } + + @Deprecated + static final class ReturnRawInputStream extends RpcOptVal<@NonNull Boolean> + implements ObjectSourceOpt { + private static final long serialVersionUID = -5741791424843430584L; + + private ReturnRawInputStream(boolean val) { + super(StorageRpc.Option.RETURN_RAW_INPUT_STREAM, val); + } + } + + static final class ServiceAccount extends RpcOptVal implements HmacKeyListOpt { + private static final long serialVersionUID = 5617709092359745482L; + + private ServiceAccount(String val) { + super(StorageRpc.Option.SERVICE_ACCOUNT_EMAIL, val); + } + } + + static final class SetContentType implements ObjectTargetOpt { + private static final long serialVersionUID = -5715260463246857009L; + private final String val; + + private SetContentType(String val) { + this.val = val; + } + + @Override + public Mapper blobInfo() { + return b -> b.setContentType(val); + } + + @Override + public Mapper writeObject() { + return b -> { + b.getWriteObjectSpecBuilder().getResourceBuilder().setContentType(val); + return b; + }; + } + + @Override + public Mapper bidiWriteObject() { + return b -> { + b.getWriteObjectSpecBuilder().getResourceBuilder().setContentType(val); + return b; + }; + } + + @Override + public Mapper updateObject() { + return b -> { + b.getObjectBuilder().setContentType(val); + return b; + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SetContentType)) { + return false; + } + SetContentType that = (SetContentType) o; + return Objects.equals(val, that.val); + } + + @Override + public int hashCode() { + return Objects.hash(val); + } + + @Override + public String toString() { + return "SetContentType{val='" + val + "'}"; + } + } + + static final class OverrideUnlockedRetention extends RpcOptVal + implements ObjectTargetOpt { + + private static final long serialVersionUID = -7764590745622588287L; + + private OverrideUnlockedRetention(boolean val) { + super(StorageRpc.Option.OVERRIDE_UNLOCKED_RETENTION, val); + } + + @Override + public Mapper updateObject() { + return CrossTransportUtils.throwHttpJsonOnly( + Storage.BlobTargetOption.class, "overrideUnlockedRetention(boolean)"); + } + } + + static final class ResumableUploadExpectedObjectSize extends RpcOptVal<@NonNull Long> + implements ObjectTargetOpt { + private static final long serialVersionUID = 3640126281492196357L; + + private ResumableUploadExpectedObjectSize(@NonNull Long val) { + super(StorageRpc.Option.X_UPLOAD_CONTENT_LENGTH, val); + } + + @Override + public Mapper startResumableWrite() { + return b -> { + if (val > 0) { + b.getWriteObjectSpecBuilder().setObjectSize(val); + } + return b; + }; + } + } + + static final class ShowDeletedKeys extends RpcOptVal<@NonNull Boolean> implements HmacKeyListOpt { + private static final long serialVersionUID = -6604176744362903487L; + + private ShowDeletedKeys(boolean val) { + super(StorageRpc.Option.SHOW_DELETED_KEYS, val); + } + } + + /** + * @see EndOffset + */ + static final class StartOffset extends RpcOptVal implements ObjectListOpt { + private static final long serialVersionUID = -1459727336598737833L; + + private StartOffset(String val) { + super(StorageRpc.Option.START_OFF_SET, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setLexicographicStart(val); + } + } + + interface BucketObjectHmacKeyAllOpt + extends BucketSourceOpt, + BucketTargetOpt, + BucketListOpt, + ObjectSourceOpt, + ObjectTargetOpt, + ObjectListOpt, + HmacKeySourceOpt, + HmacKeyTargetOpt, + HmacKeyListOpt { + @Override + default Mapper rewriteObject() { + return Mapper.identity(); + } + + @Override + default Mapper moveObject() { + return Mapper.identity(); + } + } + + static final class UserProject extends RpcOptVal implements BucketObjectHmacKeyAllOpt { + private static final long serialVersionUID = 3962499996741180460L; + + private UserProject(String val) { + super(StorageRpc.Option.USER_PROJECT, val); + } + + @Override + public Mapper getGrpcMetadataMapper() { + return ctx -> + ctx.withExtraHeaders(ImmutableMap.of("X-Goog-User-Project", ImmutableList.of(val))); + } + } + + static final class Headers extends RpcOptVal> + implements BucketObjectHmacKeyAllOpt { + + /** + * The set of header names which are blocked from being able to be provided for an instance of + * this class. + * + *

Most values here are from the json api + * parameters list or general http headers our client otherwise sets during the course of + * normal operation. + */ + private static final Predicate BLOCKLIST; + + static { + ImmutableSet fullHeaderNames = + Stream.of( + "Accept-Encoding", + "Cache-Control", + "Connection", + "Content-ID", + "Content-Length", + "Content-Range", + "Content-Transfer-Encoding", + "Content-Type", + "Date", + "ETag", + "If-Match", + "If-None-Match", + "Keep-Alive", + "Range", + "TE", + "Trailer", + "Transfer-Encoding", + "User-Agent", + "X-Goog-Api-Client", + "X-Goog-Content-Length-Range", + "X-Goog-Copy-Source-Encryption-Algorithm", + "X-Goog-Copy-Source-Encryption-Key", + "X-Goog-Copy-Source-Encryption-Key-Sha256", + "X-Goog-Encryption-Algorithm", + "X-Goog-Encryption-Key", + "X-Goog-Encryption-Key-Sha256", + "X-Goog-Gcs-Idempotency-Token", + "X-Goog-Request-Params", + "X-Goog-User-Project", + "X-HTTP-Method-Override", + "X-Upload-Content-Length", + "X-Upload-Content-Type") + .map(Utils::headerNameToLowerCase) + .collect(ImmutableSet.toImmutableSet()); + + ImmutableSet prefixes = + Stream.of("X-Goog-Meta-") + .map(Utils::headerNameToLowerCase) + .collect(ImmutableSet.toImmutableSet()); + + BLOCKLIST = + name -> { + if (fullHeaderNames.contains(name)) { + return true; + } + + for (String prefix : prefixes) { + if (name.startsWith(prefix)) { + return true; + } + } + return false; + }; + } + + private Headers(ImmutableMap val) { + super(StorageRpc.Option.EXTRA_HEADERS, val); + } + + @Override + public Mapper getGrpcMetadataMapper() { + return ctx -> { + if (val.isEmpty()) { + return ctx; + } + Set existingHeaderNames = + ctx.getExtraHeaders().keySet().stream() + .map(Utils::headerNameToLowerCase) + .collect(Collectors.toSet()); + Map> wrapped = new HashMap<>(); + for (Entry e : val.entrySet()) { + String key = Utils.headerNameToLowerCase(e.getKey()); + if (existingHeaderNames.contains(key)) { + continue; + } + wrapped.put(key, ImmutableList.of(e.getValue())); + } + return ctx.withExtraHeaders(wrapped); + }; + } + + @SuppressWarnings("unchecked") + @Override + public Mapper> mapper() { + return optionBuilder -> { + if (val.isEmpty()) { + return optionBuilder; + } + // not ideal, but ImmutableMap.Builder doesn't have any read methods so we can detect + // collision before build time. + ImmutableMap builtOptions = Utils.mapBuild(optionBuilder); + ImmutableMap tmp = + (ImmutableMap) builtOptions.get(StorageRpc.Option.EXTRA_HEADERS); + if (tmp == null) { + ImmutableMap.Builder b = ImmutableMap.builder(); + for (Entry e : val.entrySet()) { + String key = Utils.headerNameToLowerCase(e.getKey()); + b.put(key, e.getValue()); + } + optionBuilder.put(key, Utils.mapBuild(b)); + return optionBuilder; + } else { + ImmutableMap.Builder newOptionBuilder = ImmutableMap.builder(); + for (Entry e : builtOptions.entrySet()) { + if (e.getKey() != key) { + newOptionBuilder.put(e.getKey(), e.getValue()); + } + } + + ImmutableMap.Builder extraHeadersBuilder = ImmutableMap.builder(); + copyEntries(tmp, extraHeadersBuilder); + newOptionBuilder.put(key, Utils.mapBuild(extraHeadersBuilder)); + return newOptionBuilder; + } + }; + } + + public Mapper> extraHeadersMapper() { + return map -> { + if (val.isEmpty()) { + return map; + } + ImmutableMap.Builder b = ImmutableMap.builder(); + copyEntries(map, b); + return Utils.mapBuild(b); + }; + } + + private void copyEntries( + ImmutableMap map, ImmutableMap.Builder b) { + Set existingHeaderNames = + map.keySet().stream().map(Utils::headerNameToLowerCase).collect(Collectors.toSet()); + b.putAll(map); + for (Entry e : val.entrySet()) { + String key = Utils.headerNameToLowerCase(e.getKey()); + if (!existingHeaderNames.contains(key)) { + b.put(key, e.getValue()); + } + } + } + } + + static final class VersionsFilter extends RpcOptVal<@NonNull Boolean> implements ObjectListOpt { + private VersionsFilter(boolean val) { + super(StorageRpc.Option.VERSIONS, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setVersions(val); + } + } + + /** + * Attempt to extract a crc32c value from an Object. If no crc32c value is extracted the produced + * Opt will be an effective no-op. + * + * @see Crc32cMatch + * @deprecated Use {@link BlobInfo.Builder#setCrc32c(String)} + */ + @Deprecated + static final class Crc32cMatchExtractor implements ObjectOptExtractor { + private static final Crc32cMatchExtractor INSTANCE = new Crc32cMatchExtractor(); + private static final long serialVersionUID = 7045998436157555676L; + + @Deprecated + private Crc32cMatchExtractor() {} + + @Override + public ObjectTargetOpt extractFromBlobInfo(BlobInfo info) { + String crc32c = info.getCrc32c(); + if (crc32c != null) { + return crc32cMatch(crc32c); + } else { + return NoOpObjectTargetOpt.INSTANCE; + } + } + + @Override + public ObjectTargetOpt extractFromBlobId(BlobId id) { + return NoOpObjectTargetOpt.INSTANCE; + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to determine the content type of an Object based on it's {@link BlobInfo#getName()}. If + * no name value is extracted, or the value is not a known extension the content type will be + * {@code application/octet-stream} + * + * @see SetContentType + * @see URLConnection#getFileNameMap() + * @see FileNameMap + * @deprecated Use {@link BlobInfo.Builder#setContentType(String)} + */ + @Deprecated + static final class DetectContentType implements ObjectOptExtractor { + @Deprecated private static final DetectContentType INSTANCE = new DetectContentType(); + private static final FileNameMap FILE_NAME_MAP = URLConnection.getFileNameMap(); + private static final long serialVersionUID = -1089120180148634090L; + + @Deprecated + private DetectContentType() {} + + @Override + public ObjectTargetOpt extractFromBlobInfo(BlobInfo info) { + String contentType = info.getContentType(); + if (contentType != null && !contentType.isEmpty()) { + return NoOpObjectTargetOpt.INSTANCE; + } + + return detectForName(info.getName()); + } + + @Override + public ObjectTargetOpt extractFromBlobId(BlobId id) { + return detectForName(id.getName()); + } + + private ObjectTargetOpt detectForName(String name) { + if (name != null) { + String nameLower = Utils.headerNameToLowerCase(name); + String contentTypeFor = FILE_NAME_MAP.getContentTypeFor(nameLower); + if (contentTypeFor != null) { + return new SetContentType(contentTypeFor); + } + } + return new SetContentType("application/octet-stream"); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to extract a generation value from an Object. If no generation value is extracted an + * {@link IllegalArgumentException} will be thrown. + * + * @see GenerationMatch + * @deprecated Use {@link #generationMatch(long)} + */ + @Deprecated + static final class GenerationMatchExtractor implements ObjectOptExtractor { + private static final GenerationMatchExtractor INSTANCE = new GenerationMatchExtractor(); + private static final long serialVersionUID = -7211192249703566097L; + + @Deprecated + private GenerationMatchExtractor() {} + + @Override + public GenerationMatch extractFromBlobInfo(BlobInfo info) { + Long generation = info.getGeneration(); + checkArgument(generation != null, "Option ifGenerationMatch is missing a value"); + return generationMatch(generation); + } + + @Override + public GenerationMatch extractFromBlobId(BlobId id) { + Long generation = id.getGeneration(); + checkArgument(generation != null, "Option ifGenerationMatch is missing a value"); + return generationMatch(generation); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to extract a generation value from an Object. If no generation value is extracted an + * {@link IllegalArgumentException} will be thrown. + * + * @see GenerationNotMatch + * @deprecated Use {@link #generationNotMatch(long)} + */ + @Deprecated + static final class GenerationNotMatchExtractor implements ObjectOptExtractor { + private static final GenerationNotMatchExtractor INSTANCE = new GenerationNotMatchExtractor(); + private static final long serialVersionUID = -107520114846569713L; + + @Deprecated + private GenerationNotMatchExtractor() {} + + @Override + public GenerationNotMatch extractFromBlobInfo(BlobInfo info) { + Long generation = info.getGeneration(); + checkArgument(generation != null, "Option ifGenerationNotMatch is missing a value"); + return generationNotMatch(generation); + } + + @Override + public GenerationNotMatch extractFromBlobId(BlobId id) { + Long generation = id.getGeneration(); + checkArgument(generation != null, "Option ifGenerationNotMatch is missing a value"); + return generationNotMatch(generation); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to extract an md5 value from an Object. If no md5 value is extracted the produced Opt + * will be an effective no-op. + * + * @see Md5Match + * @deprecated Use {@link BlobInfo.Builder#setMd5(String)} + */ + @Deprecated + static final class Md5MatchExtractor implements ObjectOptExtractor { + private static final Md5MatchExtractor INSTANCE = new Md5MatchExtractor(); + private static final long serialVersionUID = 8375506989224962531L; + + @Deprecated + private Md5MatchExtractor() {} + + @Override + public ObjectTargetOpt extractFromBlobInfo(BlobInfo info) { + String md5 = info.getMd5(); + if (md5 != null) { + return md5Match(md5); + } else { + return NoOpObjectTargetOpt.INSTANCE; + } + } + + @Override + public ObjectTargetOpt extractFromBlobId(BlobId id) { + return NoOpObjectTargetOpt.INSTANCE; + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to extract a metageneration value from a Bucket or Object. If no metageneration value + * is extracted an {@link IllegalArgumentException} will be thrown. + * + * @see MetagenerationMatch + * @deprecated Use {@link #metagenerationMatch(long)} + */ + @Deprecated + static final class MetagenerationMatchExtractor + implements ObjectOptExtractor, BucketOptExtractor { + private static final MetagenerationMatchExtractor INSTANCE = new MetagenerationMatchExtractor(); + private static final long serialVersionUID = -4165372534008844973L; + + @Deprecated + private MetagenerationMatchExtractor() {} + + @Override + public MetagenerationMatch extractFromBlobInfo(BlobInfo info) { + Long metageneration = info.getMetageneration(); + checkArgument(metageneration != null, "Option ifMetagenerationMatch is missing a value"); + return metagenerationMatch(metageneration); + } + + @Override + public ObjectTargetOpt extractFromBlobId(BlobId id) { + return NoOpObjectTargetOpt.INSTANCE; + } + + @Override + public MetagenerationMatch extractFromBucketInfo(BucketInfo info) { + Long metageneration = info.getMetageneration(); + checkArgument(metageneration != null, "Option ifMetagenerationMatch is missing a value"); + return metagenerationMatch(metageneration); + } + + // Both parent interfaces define this method, we need to declare a dis-ambiguous one + @Override + public Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * Attempt to extract a metageneration value from a Bucket or Object. If no metageneration value + * is extracted an {@link IllegalArgumentException} will be thrown. + * + * @see MetagenerationNotMatch + * @deprecated Use {@link #metagenerationNotMatch(long)} + */ + @Deprecated + static final class MetagenerationNotMatchExtractor + implements ObjectOptExtractor, BucketOptExtractor { + private static final MetagenerationNotMatchExtractor INSTANCE = + new MetagenerationNotMatchExtractor(); + private static final long serialVersionUID = 6544628474151482319L; + + @Deprecated + private MetagenerationNotMatchExtractor() {} + + @Override + public MetagenerationNotMatch extractFromBlobInfo(BlobInfo info) { + Long metageneration = info.getMetageneration(); + checkArgument(metageneration != null, "Option ifMetagenerationNotMatch is missing a value"); + return metagenerationNotMatch(metageneration); + } + + @Override + public ObjectTargetOpt extractFromBlobId(BlobId id) { + return NoOpObjectTargetOpt.INSTANCE; + } + + @Override + public MetagenerationNotMatch extractFromBucketInfo(BucketInfo info) { + Long metageneration = info.getMetageneration(); + checkArgument(metageneration != null, "Option ifMetagenerationNotMatch is missing a value"); + return metagenerationNotMatch(metageneration); + } + + // Both parent interfaces define this method, we need to declare a dis-ambiguous one + @Override + public Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + static final class ObjectFilter extends RpcOptVal implements ObjectListOpt { + private static final long serialVersionUID = -892748218491324843L; + + private ObjectFilter(String val) { + super(StorageRpc.Option.OBJECT_FILTER, val); + } + + @Override + public Mapper listObjects() { + return b -> b.setFilter(val); + } + } + + /** + * Internal only implementation of {@link ObjectTargetOpt} which is a No-op. + * + *

The instance of this class can be returned when a no-op is necessary. + */ + @VisibleForTesting + static final class NoOpObjectTargetOpt implements ObjectTargetOpt { + @VisibleForTesting static final NoOpObjectTargetOpt INSTANCE = new NoOpObjectTargetOpt(); + private static final long serialVersionUID = -5356245440686012545L; + + private NoOpObjectTargetOpt() {} + + @Override + public Mapper getGrpcMetadataMapper() { + return Mapper.identity(); + } + + @Override + public Mapper blobInfo() { + return Mapper.identity(); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } + + /** + * A shim class used by {@link Option} to allow a common parent which isn't part of the public + * api. + * + *

{@link Option} itself and all it's subclasses are now obsolete, and should be removed when + * we're able to remove them from the public api. + */ + @Deprecated + abstract static class OptionShim implements Serializable { + private static final long serialVersionUID = 3410752214075057852L; + private final O opt; + + OptionShim(O opt) { + this.opt = opt; + } + + O getOpt() { + return opt; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof OptionShim)) { + return false; + } + OptionShim that = (OptionShim) o; + return Objects.equals(opt, that.opt); + } + + @Override + public int hashCode() { + return Objects.hash(opt); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + "{opt=" + opt + '}'; + } + } + + /** + * Base class for those {@link Opt}s which correspond to one or more {@link StorageRpc.Option} + * keys. + * + * @param + */ + private abstract static class RpcOptVal implements Opt { + private static final long serialVersionUID = 9170283346051824148L; + protected final StorageRpc.Option key; + protected final T val; + + private RpcOptVal(StorageRpc.Option key, T val) { + this.key = requireNonNull(key, "key must be non null"); + this.val = requireNonNull(val, "val must be non null"); + } + + public Mapper> mapper() { + return b -> b.put(key, val); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RpcOptVal)) { + return false; + } + RpcOptVal rpcOptVal = (RpcOptVal) o; + return Objects.equals(key, rpcOptVal.key) && Objects.equals(val, rpcOptVal.val); + } + + @Override + public int hashCode() { + return Objects.hash(key, val); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + "{key=" + key + ", val=" + val + '}'; + } + } + + static final class DefaultHasherSelector implements HasherSelector, Opt { + private static final DefaultHasherSelector INSTANCE = new DefaultHasherSelector(); + + private DefaultHasherSelector() {} + + @Override + public Hasher getHasher() { + return Hasher.defaultHasher(); + } + } + + /** + * Internal "collection" class to represent a set of {@link Opt}s, and to provide useful + * transformations to individual mappers or to resolve any extractors providing a new instance + * without extractors. + */ + @SuppressWarnings("unchecked") + static final class Opts { + + private final ImmutableList opts; + + private Opts(ImmutableList opts) { + this.opts = opts; + } + + /** + * Resolve any extractors relative to the provided {@link BlobInfo} and return a new instance. + */ + Opts resolveFrom(BlobInfo info) { + ImmutableList resolvedOpts = + opts.stream() + .map( + o -> { + if (o instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) o; + return ex.extractFromBlobInfo(info); + } else { + return o; + } + }) + .collect(ImmutableList.toImmutableList()); + return new Opts<>(resolvedOpts); + } + + /** Resolve any extractors relative to the provided {@link BlobId} and return a new instance. */ + Opts resolveFrom(BlobId id) { + ImmutableList resolvedOpts = + opts.stream() + .map( + o -> { + if (o instanceof ObjectOptExtractor) { + ObjectOptExtractor ex = (ObjectOptExtractor) o; + return ex.extractFromBlobId(id); + } else { + return o; + } + }) + .collect(ImmutableList.toImmutableList()); + return new Opts<>(resolvedOpts); + } + + /** + * Resolve any extractors relative to the provided {@link BucketInfo} and return a new instance. + */ + Opts resolveFrom(BucketInfo info) { + ImmutableList resolvedOpts = + opts.stream() + .map( + o -> { + if (o instanceof BucketOptExtractor) { + BucketOptExtractor ex = (BucketOptExtractor) o; + return ex.extractFromBucketInfo(info); + } else { + return o; + } + }) + .collect(ImmutableList.toImmutableList()); + return new Opts<>(resolvedOpts); + } + + Opts projectAsSource() { + ImmutableList projectedOpts = + opts.stream() + .map( + o -> { + if (o instanceof ProjectAsSource) { + ProjectAsSource p = (ProjectAsSource) o; + return p.asSource(); + } else { + return o; + } + }) + .collect(ImmutableList.toImmutableList()); + return new Opts<>(projectedOpts); + } + + /** + * Attempt to construct a {@link StorageRpc} compatible map of {@link StorageRpc.Option}. + * + *

Validation ensures an absence of duplicate keys, and mutually exclusive keys. + */ + ImmutableMap getRpcOptions() { + ImmutableMap.Builder builder = + rpcOptionMapper().apply(ImmutableMap.builder()); + return Utils.mapBuild(builder); + } + + @VisibleForTesting + HasherSelector getHasherSelector() { + HasherSelector search = defaultHasherSelector(); + Predicate p = isInstanceOf(HasherSelector.class); + for (T opt : opts) { + if (p.test(opt)) { + search = (HasherSelector) opt; + } + } + return search; + } + + Hasher getHasher() { + return getHasherSelector().getHasher(); + } + + Mapper grpcMetadataMapper() { + return fuseMappers(GrpcMetadataMapper.class, GrpcMetadataMapper::getGrpcMetadataMapper); + } + + Mapper createBucketsRequest() { + return fuseMappers(BucketTargetOpt.class, BucketTargetOpt::createBucket); + } + + Mapper getBucketsRequest() { + return fuseMappers(BucketSourceOpt.class, BucketSourceOpt::getBucket); + } + + Mapper listBucketsRequest() { + return fuseMappers(BucketListOpt.class, BucketListOpt::listBuckets); + } + + Mapper updateBucketsRequest() { + return fuseMappers(BucketTargetOpt.class, BucketTargetOpt::updateBucket); + } + + Mapper deleteBucketsRequest() { + return fuseMappers(BucketTargetOpt.class, BucketTargetOpt::deleteBucket); + } + + Mapper lockBucketRetentionPolicyRequest() { + return fuseMappers(BucketTargetOpt.class, BucketTargetOpt::lockBucketRetentionPolicy); + } + + Mapper writeObjectRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::writeObject); + } + + Mapper bidiWriteObjectRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::bidiWriteObject); + } + + Mapper startResumableWriteRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::startResumableWrite); + } + + Mapper getObjectsRequest() { + return fuseMappers(ObjectSourceOpt.class, ObjectSourceOpt::getObject); + } + + Mapper restoreObjectRequest() { + return fuseMappers(ObjectSourceOpt.class, ObjectSourceOpt::restoreObject); + } + + Mapper readObjectRequest() { + return fuseMappers(ObjectSourceOpt.class, ObjectSourceOpt::readObject); + } + + Mapper bidiReadObjectRequest() { + return fuseMappers(ObjectSourceOpt.class, ObjectSourceOpt::bidiReadObject); + } + + Mapper listObjectsRequest() { + return fuseMappers(ObjectListOpt.class, ObjectListOpt::listObjects); + } + + Mapper updateObjectsRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::updateObject); + } + + Mapper deleteObjectsRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::deleteObject); + } + + Mapper composeObjectsRequest() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::composeObject); + } + + Mapper rewriteObjectsRequest() { + return opts.stream() + .filter(isInstanceOf(ObjectTargetOpt.class).or(isInstanceOf(ObjectSourceOpt.class))) + .map( + o -> { + // TODO: Do we need to formalize this type of dual relationship with it's own + // interface? + if (o instanceof ObjectTargetOpt) { + ObjectTargetOpt oto = (ObjectTargetOpt) o; + return oto.rewriteObject(); + } else if (o instanceof ObjectSourceOpt) { + ObjectSourceOpt oso = (ObjectSourceOpt) o; + return oso.rewriteObject(); + } else { + // in practice this shouldn't happen because of the filter guard upstream + throw new IllegalStateException("Unexpected type: %s" + o.getClass()); + } + }) + .reduce(Mapper.identity(), Mapper::andThen); + } + + Mapper moveObjectsRequest() { + return opts.stream() + .filter(isInstanceOf(ObjectTargetOpt.class).or(isInstanceOf(ObjectSourceOpt.class))) + .map( + o -> { + // TODO: Do we need to formalize this type of dual relationship with it's own + // interface? + if (o instanceof ObjectTargetOpt) { + ObjectTargetOpt oto = (ObjectTargetOpt) o; + return oto.moveObject(); + } else if (o instanceof ObjectSourceOpt) { + ObjectSourceOpt oso = (ObjectSourceOpt) o; + return oso.moveObject(); + } else { + // in practice this shouldn't happen because of the filter guard upstream + throw new IllegalStateException("Unexpected type: %s" + o.getClass()); + } + }) + .reduce(Mapper.identity(), Mapper::andThen); + } + + Mapper getIamPolicyRequest() { + return fuseMappers(BucketSourceOpt.class, BucketSourceOpt::getIamPolicy); + } + + Mapper blobInfoMapper() { + return fuseMappers(ObjectTargetOpt.class, ObjectTargetOpt::blobInfo); + } + + /** + * Here for compatibility. This should NOT be an "Opt" instead an attribute of the channel + * builder. When {@link ReturnRawInputStream} is removed, this method should be removed as well. + * + * @see + * GapicDownloadSessionBuilder.ReadableByteChannelSessionBuilder#setAutoGzipDecompression(boolean) + */ + @Deprecated + boolean autoGzipDecompression() { + return filterTo(ReturnRawInputStream.class).findFirst().map(r -> r.val).orElse(true); + } + + Decoder clearBlobFields() { + return filterTo(Fields.class) + .findFirst() + .map(Fields::clearUnselectedBlobFields) + .orElse(Decoder.identity()); + } + + Decoder clearBucketFields() { + return filterTo(Fields.class) + .findFirst() + .map(Fields::clearUnselectedBucketFields) + .orElse(Decoder.identity()); + } + + /** + * Create a new instance of Opts where {@code toPrepend} and {@code this}. If an {@link Opt} + * type ({@code Class}) is present in both {@code toPrepend} and {@code this}, the {@link + * Opt} from {@code this} will take priority when applied via one of the produced mappers. + */ + Opts prepend(Opts toPrepend) { + // inventory the Opt types already present in this + Set> existingOptTypes = + this.opts.stream().map(Opt::getClass).collect(Collectors.toSet()); + + ImmutableList list = + Stream.of( + toPrepend.opts.stream() + // exclude those opt types which are already present in this + .filter(o -> !existingOptTypes.contains(o.getClass())), + this.opts.stream()) + .flatMap(x -> x) + .collect(ImmutableList.toImmutableList()); + return new Opts<>(list); + } + + /** + * Create a new instance of {@code Opts} consisting of those {@code Opt}s which are also an + * {@code R}. + * + *

i.e. Given {@code Opts} produce {@code Opts} + */ + Opts constrainTo(Class c) { + return new Opts<>(filterTo(c).collect(ImmutableList.toImmutableList())); + } + + Opts filter(Predicate p) { + return new Opts<>(opts.stream().filter(p).collect(ImmutableList.toImmutableList())); + } + + Opts transformTo(Class c) { + return new Opts<>( + opts.stream() + .map( + o -> { + if (o instanceof ProjectAsSource) { + ProjectAsSource projectAsSource = (ProjectAsSource) o; + Opt asSource = projectAsSource.asSource(); + if (c.isAssignableFrom(asSource.getClass())) { + return c.cast(asSource); + } + } + if (c.isAssignableFrom(o.getClass())) { + return c.cast(o); + } else { + return null; + } + }) + .filter(Objects::nonNull) + .collect(ImmutableList.toImmutableList())); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Opts)) { + return false; + } + Opts opts1 = (Opts) o; + return Objects.equals(opts, opts1.opts); + } + + @Override + public int hashCode() { + return Objects.hash(opts); + } + + private Mapper> rpcOptionMapper() { + return fuseMappers(RpcOptVal.class, RpcOptVal::mapper); + } + + private Mapper fuseMappers(Class c, Function> f) { + return filterTo(c).map(f).reduce(Mapper.identity(), Mapper::andThen); + } + + @SuppressWarnings("unchecked") + private Stream filterTo(Class c) { + // TODO: figure out if there is need for an "isApplicableTo" predicate + return opts.stream().filter(isInstanceOf(c)).map(x -> (R) x); + } + + static Opts from(T t) { + return new Opts<>(ImmutableList.of(t)); + } + + static Opts from(T... ts) { + return new Opts<>(ImmutableList.copyOf(ts)); + } + + static Opts empty() { + return new Opts<>(ImmutableList.of()); + } + + /** + * Given an array of OptionShim, extract the opt from each of them to construct a new instance + * of {@link Opts} + */ + static > Opts unwrap(T[] ts) { + ImmutableList collect = + Arrays.stream(ts).map(OptionShim::getOpt).collect(ImmutableList.toImmutableList()); + return new Opts<>(collect); + } + + /** + * Given a collection of OptionShim, extract the opt from each of them to construct a new + * instance of {@link Opts} + */ + static > Opts unwrap(Collection ts) { + ImmutableList collect = + ts.stream().map(OptionShim::getOpt).collect(ImmutableList.toImmutableList()); + return new Opts<>(collect); + } + + /** Create a predicate which is able to effectively perform an {@code instanceof} check */ + private static Predicate isInstanceOf(Class c) { + return t -> c.isAssignableFrom(t.getClass()); + } + } + + /** + * Interface which represents a field of some resource which is present in the storage api, and + * which can be used for a {@link com.google.cloud.FieldSelector read_mask}. + */ + interface NamedField extends Serializable { + String getApiaryName(); + + String getGrpcName(); + + default NamedField stripPrefix() { + if (this instanceof PrefixedNamedField) { + PrefixedNamedField pnf = (PrefixedNamedField) this; + return pnf.delegate; + } else { + return this; + } + } + + static NamedField prefixed(String prefix, NamedField delegate) { + return new PrefixedNamedField(prefix, delegate); + } + + static NamedField literal(String name) { + return new LiteralNamedField(name); + } + + static NamedField nested(NamedField parent, NamedField child) { + return new NestedNamedField(parent, child); + } + + static NamedField root(NamedField f) { + if (f instanceof NestedNamedField) { + NestedNamedField nested = (NestedNamedField) f; + return root(nested.getParent()); + } else { + return f; + } + } + } + + private static CommonObjectRequestParams.Builder customerSuppliedKey( + CommonObjectRequestParams.Builder b, Key key) { + HashCode keySha256 = Hashing.sha256().hashBytes(key.getEncoded()); + return b.setEncryptionAlgorithm(key.getAlgorithm()) + .setEncryptionKeyBytes(ByteString.copyFrom(key.getEncoded())) + .setEncryptionKeySha256Bytes(ByteString.copyFrom(keySha256.asBytes())); + } + + private static final class PrefixedNamedField implements NamedField { + private static long serialVersionUID = -4899304145424680141L; + + private final String prefix; + private final NamedField delegate; + + private PrefixedNamedField(String prefix, NamedField delegate) { + this.prefix = prefix; + this.delegate = delegate; + } + + @Override + public String getApiaryName() { + return prefix + delegate.getApiaryName(); + } + + @Override + public String getGrpcName() { + return prefix + delegate.getGrpcName(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PrefixedNamedField)) { + return false; + } + PrefixedNamedField that = (PrefixedNamedField) o; + return Objects.equals(prefix, that.prefix) && Objects.equals(delegate, that.delegate); + } + + @Override + public int hashCode() { + return Objects.hash(prefix, delegate); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("prefix", prefix) + .add("delegate", delegate) + .toString(); + } + } + + private static final class LiteralNamedField implements NamedField { + private static long serialVersionUID = 1422947423774466409L; + + private final String name; + + private LiteralNamedField(String name) { + this.name = name; + } + + @Override + public String getApiaryName() { + return name; + } + + @Override + public String getGrpcName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LiteralNamedField)) { + return false; + } + LiteralNamedField that = (LiteralNamedField) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).toString(); + } + } + + static final class NestedNamedField implements NamedField { + private static long serialVersionUID = -7623005572810688221L; + private final NamedField parent; + private final NamedField child; + + private NestedNamedField(NamedField parent, NamedField child) { + this.parent = parent; + this.child = child; + } + + @Override + public String getApiaryName() { + return parent.getApiaryName() + "." + child.getApiaryName(); + } + + @Override + public String getGrpcName() { + return parent.getGrpcName() + "." + child.getGrpcName(); + } + + NamedField getParent() { + return parent; + } + + NamedField getChild() { + return child; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NestedNamedField)) { + return false; + } + NestedNamedField that = (NestedNamedField) o; + return Objects.equals(parent, that.parent) && Objects.equals(child, that.child); + } + + @Override + public int hashCode() { + return Objects.hash(parent, child); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("parent", parent).add("child", child).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UniformStorageRetryStrategy.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UniformStorageRetryStrategy.java new file mode 100644 index 000000000000..db46a8517f87 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UniformStorageRetryStrategy.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; + +/** + * An implementation of {@link StorageRetryStrategy} where a single algorithm will be returned for + * both idempotent and non-idempotent cases. + */ +final class UniformStorageRetryStrategy implements StorageRetryStrategy { + + private static final long serialVersionUID = -8606685654893234472L; + private final ResultRetryAlgorithm algorithm; + + public UniformStorageRetryStrategy(ResultRetryAlgorithm algorithm) { + this.algorithm = algorithm; + } + + @Override + public ResultRetryAlgorithm getIdempotentHandler() { + return algorithm; + } + + @Override + public ResultRetryAlgorithm getNonidempotentHandler() { + return algorithm; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java new file mode 100644 index 000000000000..5335ce19cf8b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java @@ -0,0 +1,468 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.ifNonNull; + +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.StorageException.IOExceptionCallable; +import com.google.common.io.CharStreams; +import com.google.protobuf.Message; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectRequest; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Predicate; +import javax.annotation.ParametersAreNonnullByDefault; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +@ParametersAreNonnullByDefault +enum UploadFailureScenario { + // TODO: send more bytes than are in the Content-Range header + SCENARIO_0(BaseServiceException.UNKNOWN_CODE, null, "Unknown Error"), + SCENARIO_0_1(BaseServiceException.UNKNOWN_CODE, null, "Response not application/json."), + SCENARIO_1( + BaseServiceException.UNKNOWN_CODE, + "invalid", + "Attempt to append to already finalized upload session."), + SCENARIO_2( + BaseServiceException.UNKNOWN_CODE, + "invalid", + "Attempt to finalize upload session with fewer bytes than the backend has received."), + SCENARIO_3( + BaseServiceException.UNKNOWN_CODE, + "dataLoss", + "Attempt to finalize upload session with more bytes than the backend has received."), + SCENARIO_4(200, "ok", "Attempt to finalize an already finalized session with same object size"), + SCENARIO_4_1( + BaseServiceException.UNKNOWN_CODE, + "dataLoss", + "Finalized upload, but object size less than expected."), + SCENARIO_4_2( + BaseServiceException.UNKNOWN_CODE, + "dataLoss", + "Finalized upload, but object size greater than expected."), + SCENARIO_5( + BaseServiceException.UNKNOWN_CODE, + "dataLoss", + "Client side data loss detected. Attempt to append to a upload session with an offset" + + " higher than the backend has"), + SCENARIO_7( + BaseServiceException.UNKNOWN_CODE, + "dataLoss", + "Client side data loss detected. Bytes acked is more than client sent."), + + SCENARIO_9(503, "backendNotConnected", "Ack less than bytes sent"); + + private static final String PREFIX_I = "\t|< "; + private static final String PREFIX_O = "\t|> "; + private static final String PREFIX_X = "\t| "; + + private static final Predicate includedHeaders = + matches("Content-Length") + .or(matches("Content-Encoding")) + .or(matches("Content-Range")) + .or(matches("Content-Type")) + .or(matches("Range")) + .or(startsWith("X-Goog-Stored-")) + .or(matches("X-Goog-GCS-Idempotency-Token")) + .or(matches("X-Goog-Hash")) + .or(matches("X-Goog-request-params")) + .or(matches("X-GUploader-UploadID")); + + private static final Predicate> includeHeader = + e -> includedHeaders.test(e.getKey()); + + private final int code; + @Nullable private final String reason; + private final String message; + + UploadFailureScenario(int code, @Nullable String reason, String message) { + this.code = code; + this.reason = reason; + this.message = message; + } + + String getMessage() { + return message; + } + + StorageException toStorageException(String uploadId, HttpResponse resp) { + return toStorageException( + uploadId, resp, null, () -> CharStreams.toString(new InputStreamReader(resp.getContent()))); + } + + StorageException toStorageException( + String uploadId, @Nullable HttpResponse resp, @Nullable Throwable cause) { + if (resp != null) { + // an exception caused this, do not try to read the content from the response. + return toStorageException(uploadId, resp, cause, () -> null); + } else { + return new StorageException(code, message, reason, cause); + } + } + + StorageException toStorageException( + String uploadId, + HttpResponse resp, + @Nullable Throwable cause, + IOExceptionCallable<@Nullable String> contentCallable) { + return toStorageException(code, message, reason, uploadId, resp, cause, contentCallable); + } + + StorageException toStorageException( + @NonNull List reqs, + @Nullable Message resp, + @NonNull GrpcCallContext context, + @Nullable Throwable cause) { + return toStorageException(code, message, reason, reqs, resp, context, cause); + } + + static StorageException toStorageException( + HttpResponse response, HttpResponseException cause, String uploadId) { + String statusMessage = cause.getStatusMessage(); + StorageException se = + UploadFailureScenario.toStorageException( + cause.getStatusCode(), + String.format( + Locale.US, + "%d %s", + cause.getStatusCode(), + statusMessage == null ? "" : statusMessage), + "", + uploadId, + response, + cause, + () -> null); + return se; + } + + static StorageException toStorageException( + int code, + String message, + @Nullable String reason, + @NonNull List reqs, + @Nullable Message resp, + @NonNull GrpcCallContext context, + @Nullable Throwable cause) { + final StringBuilder sb = new StringBuilder(); + sb.append(message); + // request context + Map> extraHeaders = context.getExtraHeaders(); + recordHeadersTo(extraHeaders, PREFIX_O, sb); + int length = reqs.size(); + if (length == 0) { + sb.append("\n").append(PREFIX_O).append("[]"); + } + for (int i = 0; i < length; i++) { + if (i == 0) { + sb.append("\n").append(PREFIX_O).append("["); + } else { + sb.append(","); + } + Message req = reqs.get(i); + fmt(req, PREFIX_O, Indentation.T1, sb); + sb.append("\n").append(PREFIX_O).append("\t}"); + if (i == length - 1) { + sb.append("\n").append(PREFIX_O).append("]"); + } + } + + sb.append("\n").append(PREFIX_X); + + // response context + if (resp != null) { + sb.append("\n").append(PREFIX_I).append(resp.getClass().getName()).append("{"); + fmt(resp, PREFIX_I, Indentation.T1, sb); + sb.append("\n").append(PREFIX_I).append("}"); + sb.append("\n").append(PREFIX_X); + } + + if (cause != null) { + if (cause instanceof ApiException) { + ApiException apiException = (ApiException) cause; + Throwable cause1 = apiException.getCause(); + if (cause1 instanceof StatusRuntimeException) { + StatusRuntimeException statusRuntimeException = (StatusRuntimeException) cause1; + sb.append("\n").append(PREFIX_I).append(statusRuntimeException.getStatus()); + ifNonNull( + statusRuntimeException.getTrailers(), + t -> sb.append("\n").append(PREFIX_I).append(t)); + } else { + sb.append("\n") + .append(PREFIX_I) + .append("code: ") + .append(apiException.getStatusCode().toString()); + ifNonNull( + apiException.getReason(), + r -> sb.append("\n").append(PREFIX_I).append("reason: ").append(r)); + ifNonNull( + apiException.getDomain(), + d -> sb.append("\n").append(PREFIX_I).append("domain: ").append(d)); + ifNonNull( + apiException.getErrorDetails(), + e -> sb.append("\n").append(PREFIX_I).append("errorDetails: ").append(e)); + } + sb.append("\n").append(PREFIX_X); + } + } + StorageException se = new StorageException(code, sb.toString(), reason, cause); + return se; + } + + static StorageException toStorageException( + int overrideCode, + String message, + @Nullable String reason, + String uploadId, + HttpResponse resp, + @Nullable Throwable cause, + IOExceptionCallable<@Nullable String> contentCallable) { + Throwable suppress = null; + StringBuilder sb = new StringBuilder(); + sb.append(message); + // add request context + sb.append("\n").append(PREFIX_O).append("PUT ").append(uploadId); + recordHeaderTo(resp.getRequest().getHeaders(), PREFIX_O, sb); + + sb.append("\n").append(PREFIX_X); + // add response context + { + int code = resp.getStatusCode(); + sb.append("\n").append(PREFIX_I).append("HTTP/1.1 ").append(code); + if (resp.getStatusMessage() != null) { + sb.append(" ").append(resp.getStatusMessage()); + } + + recordHeaderTo(resp.getHeaders(), PREFIX_I, sb); + // try to include any body that we can handle + if (isOk(code) || code == 503 || code == 400) { + try { + String content = contentCallable.call(); + if (content != null) { + sb.append("\n").append(PREFIX_I); + if (content.contains("\n") || content.contains("\r\n")) { + sb.append("\n").append(PREFIX_I).append(content.replaceAll("\r?\n", "\n" + PREFIX_I)); + } else { + sb.append("\n").append(PREFIX_I).append(content); + } + } + } catch (IOException e) { + // com.google.api.client.http.HttpResponseException.Builder.Builder + // prints an exception which might occur while attempting to resolve the content + // this can lose the context about the request it was for, instead we register it + // as a suppressed exception + suppress = new StorageException(0, "Error reading response content for diagnostics.", e); + } + } + + sb.append("\n").append(PREFIX_X); + } + StorageException storageException = + new StorageException(overrideCode, sb.toString(), reason, cause); + if (suppress != null) { + storageException.addSuppressed(suppress); + } + return storageException; + } + + static boolean isOk(int code) { + return code == 200 || code == 201; + } + + static boolean isContinue(int code) { + return code == 308; + } + + // The header names from HttpHeaders are lower cased, define some utility methods to create + // predicates where we can specify values ignoring case + private static Predicate matches(String expected) { + String lower = Utils.headerNameToLowerCase(expected); + return lower::equals; + } + + private static Predicate startsWith(String prefix) { + String lower = Utils.headerNameToLowerCase(prefix); + return s -> s.startsWith(lower); + } + + private static void recordHeaderTo(HttpHeaders h, String prefix, StringBuilder sb) { + h.entrySet().stream().filter(includeHeader).forEach(writeHeaderValue(prefix, sb)); + } + + private static void recordHeadersTo( + Map> headers, String prefix, StringBuilder sb) { + headers.entrySet().stream().filter(includeHeader).forEach(writeHeaderValue(prefix, sb)); + } + + private static Consumer> writeHeaderValue( + String prefix, StringBuilder sb) { + return e -> { + String key = e.getKey(); + String value = headerValueToString(e.getValue()); + sb.append("\n").append(prefix).append(key).append(": ").append(value); + }; + } + + private static String headerValueToString(Object o) { + if (o instanceof List) { + List l = (List) o; + if (l.size() == 1) { + return l.get(0).toString(); + } + } + + return o.toString(); + } + + private static void fmt( + Message msg, + @SuppressWarnings("SameParameterValue") String prefix, + Indentation indentation, + StringBuilder sb) { + if (msg instanceof WriteObjectRequest) { + WriteObjectRequest req = (WriteObjectRequest) msg; + fmtWriteObjectRequest(req, prefix, indentation, sb); + } else if (msg instanceof BidiWriteObjectRequest) { + BidiWriteObjectRequest req = (BidiWriteObjectRequest) msg; + fmtBidiWriteObjectRequest(req, prefix, indentation, sb); + } else { + String string = msg.toString(); + // drop the final new line before prefixing + string = string.replaceAll("\n$", ""); + sb.append("\n") + .append(prefix) + .append(indentation) + .append(string.replaceAll("\r?\n", "\n" + prefix + indentation.indentation)); + } + } + + private static void fmtWriteObjectRequest( + WriteObjectRequest req, String prefix, Indentation t1, StringBuilder sb) { + Indentation t2 = t1.indent(); + Indentation t3 = t2.indent(); + sb.append("\n").append(prefix).append(t1).append(req.getClass().getName()).append("{"); + if (req.hasUploadId()) { + sb.append("\n").append(prefix).append(t2).append("upload_id: ").append(req.getUploadId()); + } + long writeOffset = req.getWriteOffset(); + if (req.hasChecksummedData()) { + ChecksummedData checksummedData = req.getChecksummedData(); + sb.append("\n").append(prefix).append(t2); + sb.append( + String.format( + Locale.US, + "checksummed_data: {range: [%d:%d]", + writeOffset, + writeOffset + checksummedData.getContent().size())); + if (checksummedData.hasCrc32C()) { + sb.append(", crc32c: ").append(Integer.toUnsignedString(checksummedData.getCrc32C())); + } + sb.append("}"); + } else { + sb.append("\n").append(prefix).append(t2).append("write_offset: ").append(writeOffset); + } + if (req.getFinishWrite()) { + sb.append("\n").append(prefix).append(t2).append("finish_write: true"); + } + if (req.hasObjectChecksums()) { + ObjectChecksums objectChecksums = req.getObjectChecksums(); + sb.append("\n").append(prefix).append(t2).append("object_checksums: ").append("{"); + fmt(objectChecksums, prefix, t3, sb); + sb.append("\n").append(prefix).append(t2).append("}"); + } + } + + private static void fmtBidiWriteObjectRequest( + BidiWriteObjectRequest req, String prefix, Indentation t1, StringBuilder sb) { + Indentation t2 = t1.indent(); + Indentation t3 = t2.indent(); + sb.append("\n").append(prefix).append(t1).append(req.getClass().getName()).append("{"); + if (req.hasUploadId()) { + sb.append("\n").append(prefix).append(t2).append("upload_id: ").append(req.getUploadId()); + } + long writeOffset = req.getWriteOffset(); + if (req.hasChecksummedData()) { + ChecksummedData checksummedData = req.getChecksummedData(); + sb.append("\n").append(prefix).append(t2); + sb.append( + String.format( + Locale.US, + "checksummed_data: {range: [%d:%d]", + writeOffset, + writeOffset + checksummedData.getContent().size())); + if (checksummedData.hasCrc32C()) { + sb.append(", crc32c: ").append(Integer.toUnsignedString(checksummedData.getCrc32C())); + } + sb.append("}"); + } else { + sb.append("\n").append(prefix).append(t2).append("write_offset: ").append(writeOffset); + } + if (req.getFlush()) { + sb.append("\n").append(prefix).append(t2).append("flush: true"); + } + if (req.getStateLookup()) { + sb.append("\n").append(prefix).append(t2).append("state_lookup: true"); + } + if (req.getFinishWrite()) { + sb.append("\n").append(prefix).append(t2).append("finish_write: true"); + } + if (req.hasObjectChecksums()) { + ObjectChecksums objectChecksums = req.getObjectChecksums(); + sb.append("\n").append(prefix).append(t2).append("object_checksums: ").append("{"); + fmt(objectChecksums, prefix, t3, sb); + sb.append("\n").append(prefix).append(t2).append("}"); + } + } + + enum Indentation { + T1("\t"), + T2("\t\t"), + T3("\t\t\t"), + T4("\t\t\t\t"), + ; + + private final String indentation; + + Indentation(String indentation) { + this.indentation = indentation; + } + + Indentation indent() { + int ordinal = ordinal(); + return values()[ordinal + 1]; + } + + @Override + public String toString() { + return indentation; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Utils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Utils.java new file mode 100644 index 000000000000..90d6122c9aea --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/Utils.java @@ -0,0 +1,365 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.client.util.DateTime; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.storage.Conversions.Codec; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.MapDifference; +import com.google.common.collect.Maps; +import com.google.common.io.BaseEncoding; +import com.google.common.primitives.Ints; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.ProjectName; +import java.math.BigInteger; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A collection of general utility functions providing convenience facilities. + * + *

Nothing in here should be Storage specific. Anything Storage specific should go in an + * appropriately named and scoped class. + */ +@InternalApi +final class Utils { + private static final Function, ImmutableMap> mapBuild; + + static { + Function, ImmutableMap> tmp; + // buildOrThrow was added in guava 31.0 + // if it fails, fallback to the older build() method instead. + // The behavior was the same, but the new name makes the behavior clear + try { + ImmutableMap.builder().buildOrThrow(); + tmp = ImmutableMap.Builder::buildOrThrow; + } catch (NoSuchMethodError e) { + tmp = ImmutableMap.Builder::build; + } + mapBuild = tmp; + } + + static final DateTimeFormatter RFC_3339_DATE_TIME_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + static final Codec durationSecondsCodec = + Codec.of(Duration::getSeconds, Duration::ofSeconds); + + static final Codec offsetDateTimeRfc3339Codec = + Codec.of( + RFC_3339_DATE_TIME_FORMATTER::format, + s -> OffsetDateTime.parse(s, RFC_3339_DATE_TIME_FORMATTER)); + + @VisibleForTesting + static final Codec dateTimeCodec = + Codec.of( + odt -> { + ZoneOffset offset = odt.getOffset(); + int i = Math.toIntExact(TimeUnit.SECONDS.toMinutes(offset.getTotalSeconds())); + return new DateTime(odt.toInstant().toEpochMilli(), i); + }, + dt -> { + long milli = dt.getValue(); + int timeZoneShiftMinutes = dt.getTimeZoneShift(); + + Duration timeZoneShift = Duration.of(timeZoneShiftMinutes, ChronoUnit.MINUTES); + + int hours = Math.toIntExact(timeZoneShift.toHours()); + int minutes = + Math.toIntExact( + timeZoneShift.minusHours(timeZoneShift.toHours()).getSeconds() / 60); + ZoneOffset offset = ZoneOffset.ofHoursMinutes(hours, minutes); + + return Instant.ofEpochMilli(milli).atOffset(offset); + }); + + static final Codec nullableDateTimeCodec = dateTimeCodec.nullable(); + + /** + * Define a Codec which encapsulates the logic necessary to handle encoding and decoding bucket + * names. + * + *

The "Model Type" in this case is the raw bucket name as would be read from {@link + * BucketInfo#getName()}. The "Proto Type" in this case is the OnePlatform formatted + * representation of the bucket name. + * + *

As of the time of writing this, project scoped buckets are not implemented by the backend + * service. While we need to be cognisant that they are on the horizon, we do not need to track + * any data related to this future feature. As such, this method attempts to make it easier to + * work with bucket names that require the OnePlatform format while still preventing any subtle + * bugs happening to customers if they happen to attempt to use project scoped bucket features in + * this library once the service does support it. + * + *

TODO: this will need to change once the project scoped buckets first class feature work is + * done. + */ + static final Codec bucketNameCodec = + Codec.of( + bucket -> { + requireNonNull(bucket, "bucket must be non null"); + if (bucket.startsWith("projects/")) { + if (bucket.startsWith("projects/_")) { + return bucket; + } else { + throw new IllegalArgumentException( + "Project scoped buckets are not supported by this version of the library." + + " (bucket = " + + bucket + + ")"); + } + } else { + return "projects/_/buckets/" + bucket; + } + }, + resourceName -> { + requireNonNull(resourceName, "resourceName must be non null"); + if (BucketName.isParsableFrom(resourceName)) { + BucketName parse = BucketName.parse(resourceName); + return parse.getBucket(); + } else { + return resourceName; + } + }); + + /** + * Define a Codec which encapsulates the logic necessary to handle encoding and decoding project + * names. + */ + static final Codec projectNameCodec = + Codec.of( + project -> { + requireNonNull(project, "project must be non null"); + if (project.startsWith("projects/")) { + return project; + } else { + return "projects/" + project; + } + }, + resourceName -> { + requireNonNull(resourceName, "resourceName must be non null"); + if (ProjectName.isParsableFrom(resourceName)) { + ProjectName parse = ProjectName.parse(resourceName); + return parse.getProject(); + } else { + return resourceName; + } + }); + + /** + * Define a Codec which encapsulates the logic necessary to handle encoding and decoding project + * numbers. + */ + static final Codec<@NonNull BigInteger, @NonNull String> projectNumberResourceCodec = + Codec.of( + projectNumber -> { + requireNonNull(projectNumber, "projectNumber must be non null"); + return ProjectName.format(projectNumber.toString()); + }, + projectNumberResource -> { + requireNonNull(projectNumberResource, "projectNumberResource must be non null"); + Preconditions.checkArgument( + ProjectName.isParsableFrom(projectNumberResource), + "projectNumberResource '%s' is not parsable as a %s", + projectNumberResource, + ProjectName.class.getName()); + ProjectName parse = ProjectName.parse(projectNumberResource); + return new BigInteger(parse.getProject()); + }); + + static final Codec crc32cCodec = + Codec.of(Utils::crc32cEncode, Utils::crc32cDecode); + + private Utils() {} + + /** + * If the value provided as {@code t} is non-null, consume it via {@code c}. + * + *

Helper method to allow for more terse expression of: + * + *

{@code
+   * if (t != null) {
+   *   x.setT(t);
+   * }
+   * }
+ */ + @InternalApi + static void ifNonNull(@Nullable T t, Consumer c) { + if (t != null) { + c.accept(t); + } + } + + /** + * If the value provided as {@code t} is non-null, transform it using {@code map} and consume it + * via {@code c}. + * + *

Helper method to allow for more terse expression of: + * + *

{@code
+   * if (t != null) {
+   *   x.setT(map.apply(t));
+   * }
+   * }
+ */ + @InternalApi + static void ifNonNull(@Nullable T1 t, Function map, Consumer c) { + if (t != null) { + T2 apply = map.apply(t); + if (apply != null) { + c.accept(apply); + } + } + } + + /** + * Convenience method to "lift" a method reference to a {@link Function}. + * + *

While a method reference can be pass as an argument to a method which expects a {@code + * Function} it does not then allow calling {@link Function#andThen(Function) #andThen(Function)}. + * This method forces the method reference to be a {@code Function} thereby allowing {@code + * #andThen} composition. + */ + @InternalApi + static Function lift(Function f) { + return f; + } + + /** + * Convenience method to resolve the first non-null {@code T} from an array of suppliers. + * + *

Each supplier will have {@link Supplier#get()} called, and if non-null the value will be + * returned. + */ + @NonNull + @SafeVarargs + static T firstNonNull(Supplier<@Nullable T>... ss) { + for (Supplier s : ss) { + T t = s.get(); + if (t != null) { + return t; + } + } + throw new IllegalStateException("Unable to resolve non-null value"); + } + + /** + * Diff two maps, and append each differing key to {@code sink} with the parent of {{@code parent} + */ + static void diffMaps( + NamedField parent, Map left, Map right, Consumer sink) { + final Stream keys; + if (left != null && right == null) { + keys = left.keySet().stream(); + } else if (left == null && right != null) { + keys = right.keySet().stream(); + } else if (left != null && right != null) { + MapDifference difference = Maps.difference(left, right); + keys = + Stream.of( + // keys with modified values + difference.entriesDiffering().keySet().stream(), + // Only include keys to remove if ALL keys were removed + right.isEmpty() + ? difference.entriesOnlyOnLeft().keySet().stream() + : Stream.empty(), + // new keys + difference.entriesOnlyOnRight().keySet().stream()) + .flatMap(x -> x); + } else { + keys = Stream.empty(); + } + keys.map(NamedField::literal).map(k -> NamedField.nested(parent, k)).forEach(sink); + } + + static T[] subArray(T[] ts, int offset, int length) { + if (offset == 0 && length == ts.length) { + return ts; + } else { + return Arrays.copyOfRange(ts, offset, length); + } + } + + private static int crc32cDecode(String from) { + byte[] decodeCrc32c = BaseEncoding.base64().decode(from); + return Ints.fromByteArray(decodeCrc32c); + } + + private static String crc32cEncode(int from) { + return BaseEncoding.base64().encode(Ints.toByteArray(from)); + } + + /** + * Type preserving method for {@link GrpcCallContext#merge(ApiCallContext)} + * + * @see GrpcCallContext#merge(ApiCallContext) + */ + @NonNull + static GrpcCallContext merge(@NonNull GrpcCallContext l, @NonNull GrpcCallContext r) { + return (GrpcCallContext) l.merge(r); + } + + static ImmutableList nullSafeList(@Nullable T t) { + if (t == null) { + return ImmutableList.of(); + } else { + return ImmutableList.of(t); + } + } + + static ImmutableMap mapBuild(ImmutableMap.Builder b) { + return (ImmutableMap) mapBuild.apply(b); + } + + static String headerNameToLowerCase(String headerName) { + return headerName.toLowerCase(Locale.US); + } + + static Map<@NonNull K, @Nullable V> setToMap( + Set<@NonNull K> s, Function<@NonNull K, @Nullable V> valueFunction) { + // use hashmap so we can have null values + HashMap<@NonNull K, @Nullable V> m = new HashMap<>(); + for (@NonNull K k : s) { + m.put(k, valueFunction.apply(k)); + } + return Collections.unmodifiableMap(m); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WritableByteChannelSession.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WritableByteChannelSession.java new file mode 100644 index 000000000000..7a09463385fd --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WritableByteChannelSession.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.rpc.ApiExceptions; +import java.nio.channels.WritableByteChannel; + +interface WritableByteChannelSession { + + default WBC open() { + return ApiExceptions.callAndTranslateApiException(openAsync()); + } + + ApiFuture openAsync(); + + ApiFuture getResult(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WriteCtx.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WriteCtx.java new file mode 100644 index 000000000000..5539c04ba9a7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/WriteCtx.java @@ -0,0 +1,124 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; + +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.WriteCtx.WriteObjectRequestBuilderFactory; +import com.google.storage.v2.WriteObjectRequest; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class WriteCtx { + + private final RequestFactoryT requestFactory; + + private final AtomicLong totalSentBytes; + private final AtomicLong confirmedBytes; + private final AtomicReference<@Nullable Crc32cLengthKnown> cumulativeCrc32c; + + private WriteCtx(RequestFactoryT requestFactory, @Nullable Crc32cLengthKnown initialValue) { + this.requestFactory = requestFactory; + this.totalSentBytes = new AtomicLong(0); + this.confirmedBytes = new AtomicLong(0); + this.cumulativeCrc32c = new AtomicReference<>(initialValue); + } + + static WriteCtx of( + RFT rft, @NonNull Hasher hasher) { + return new WriteCtx<>(rft, hasher.initialValue()); + } + + public RequestFactoryT getRequestFactory() { + return requestFactory; + } + + public WriteObjectRequest.Builder newRequestBuilder() { + return requestFactory.newBuilder(); + } + + public AtomicLong getTotalSentBytes() { + return totalSentBytes; + } + + public AtomicLong getConfirmedBytes() { + return confirmedBytes; + } + + public AtomicReference<@Nullable Crc32cLengthKnown> getCumulativeCrc32c() { + return cumulativeCrc32c; + } + + // TODO: flush this out more + boolean isDirty() { + return confirmedBytes.get() == totalSentBytes.get(); + } + + @Override + public String toString() { + return "ServerState{" + + "requestFactory=" + + requestFactory + + ", totalSentBytes=" + + totalSentBytes + + ", confirmedBytes=" + + confirmedBytes + + ", totalSentCrc32c=" + + cumulativeCrc32c + + '}'; + } + + interface WriteObjectRequestBuilderFactory { + WriteObjectRequest.Builder newBuilder(); + + @Nullable String bucketName(); + + static SimpleWriteObjectRequestBuilderFactory simple(WriteObjectRequest req) { + return new SimpleWriteObjectRequestBuilderFactory(req); + } + } + + static final class SimpleWriteObjectRequestBuilderFactory + implements WriteObjectRequestBuilderFactory { + private final WriteObjectRequest req; + + private SimpleWriteObjectRequestBuilderFactory(WriteObjectRequest req) { + this.req = req; + } + + @Override + public WriteObjectRequest.Builder newBuilder() { + return req.toBuilder(); + } + + @Override + public @Nullable String bucketName() { + if (req.hasWriteObjectSpec() && req.getWriteObjectSpec().hasResource()) { + return req.getWriteObjectSpec().getResource().getBucket(); + } + return null; + } + + @Override + public String toString() { + return "SimpleWriteObjectRequestBuilderFactory{" + "req=" + fmtProto(req) + '}'; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XGoogApiClientHeaderProvider.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XGoogApiClientHeaderProvider.java new file mode 100644 index 000000000000..310c2b12ed11 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XGoogApiClientHeaderProvider.java @@ -0,0 +1,104 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.rpc.HeaderProvider; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.MapDifference; +import com.google.common.collect.MapDifference.ValueDifference; +import com.google.common.collect.Maps; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.stream.Collector; +import java.util.stream.Collectors; + +final class XGoogApiClientHeaderProvider implements HeaderProvider { + + /** Separate entries with a space */ + private static final Collector COMBINER = Collectors.joining(" "); + + private final Map headers; + + private XGoogApiClientHeaderProvider(Map headers) { + this.headers = headers; + } + + @Override + public Map getHeaders() { + return headers; + } + + static XGoogApiClientHeaderProvider of( + HeaderProvider baseValue, ImmutableList additionalEntries) { + if (additionalEntries.isEmpty()) { + return new XGoogApiClientHeaderProvider(baseValue.getHeaders()); + } else { + ImmutableMap right = + ImmutableMap.of("x-goog-api-client", additionalEntries.stream().collect(COMBINER)); + ImmutableMap union = union(baseValue.getHeaders(), right); + return new XGoogApiClientHeaderProvider(union); + } + } + + /** + * Union two maps, ignoring case of their keys. + * + *

Any key present in both {@code left} and {@code right} will be combined to produce a new + * single value. + */ + @VisibleForTesting + static ImmutableMap union(Map left, Map right) { + if (left.equals(right)) { + return ImmutableMap.copyOf(left); + } + Map l = lowerKeys(left); + Map r = lowerKeys(right); + if (l.equals(r)) { + return ImmutableMap.copyOf(l); + } + + Map tmp = new HashMap<>(); + + MapDifference diff = Maps.difference(l, r); + + tmp.putAll(diff.entriesOnlyOnLeft()); + tmp.putAll(diff.entriesOnlyOnRight()); + tmp.putAll(diff.entriesInCommon()); + + for (Entry> e : diff.entriesDiffering().entrySet()) { + String k = e.getKey(); + ValueDifference v = e.getValue(); + + tmp.put(k, ImmutableList.of(v.leftValue(), v.rightValue()).stream().collect(COMBINER)); + } + + return ImmutableMap.copyOf(tmp); + } + + private static Map lowerKeys(Map orig) { + Map tmp = new HashMap<>(); + for (Entry e : orig.entrySet()) { + String k = e.getKey(); + String v = e.getValue(); + tmp.put(Utils.headerNameToLowerCase(k), v); + } + return tmp; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XmlObjectParser.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XmlObjectParser.java new file mode 100644 index 000000000000..c138138aae8c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/XmlObjectParser.java @@ -0,0 +1,149 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.fasterxml.jackson.core.JacksonException; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.Version; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import com.fasterxml.jackson.databind.module.SimpleDeserializers; +import com.fasterxml.jackson.databind.module.SimpleSerializers; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import com.google.api.client.util.ObjectParser; +import com.google.cloud.StringEnumValue; +import com.google.cloud.storage.multipartupload.model.ObjectLockMode; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.lang.reflect.Type; +import java.nio.charset.Charset; +import java.util.function.Function; + +final class XmlObjectParser implements ObjectParser { + private final XmlMapper xmlMapper; + + @VisibleForTesting + public XmlObjectParser(XmlMapper xmlMapper) { + this.xmlMapper = xmlMapper; + this.xmlMapper.registerModule(new JavaTimeModule()); + // ensure parsing does not fail if any new field is ever added in the future + this.xmlMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + this.xmlMapper.registerModule( + new Module() { + @Override + public String getModuleName() { + return this.getClass().getPackage().getName(); + } + + @Override + public Version version() { + return Version.unknownVersion(); + } + + @Override + public void setupModule(SetupContext context) { + context.addSerializers( + new SimpleSerializers( + ImmutableList.of( + new StringEnumValueSerializer<>(StorageClass.class), + new StringEnumValueSerializer<>(ObjectLockMode.class)))); + context.addDeserializers( + new SimpleDeserializers( + ImmutableMap.of( + StorageClass.class, + new StringEnumValueDeserializer<>( + StorageClass.class, StorageClass::valueOf), + ObjectLockMode.class, + new StringEnumValueDeserializer<>( + ObjectLockMode.class, ObjectLockMode::valueOf)))); + } + }); + } + + @Override + public T parseAndClose(InputStream in, Charset charset, Class dataClass) + throws IOException { + return parseAndClose(new InputStreamReader(in, charset), dataClass); + } + + @Override + public Object parseAndClose(InputStream in, Charset charset, Type dataType) throws IOException { + throw new UnsupportedOperationException( + "XmlObjectParse#" + + CrossTransportUtils.fmtMethodName( + "parseAndClose", InputStream.class, Charset.class, Type.class)); + } + + @Override + public T parseAndClose(Reader reader, Class dataClass) throws IOException { + try (Reader r = reader) { + return xmlMapper.readValue(r, dataClass); + } + } + + @Override + public Object parseAndClose(Reader reader, Type dataType) throws IOException { + throw new UnsupportedOperationException( + "XmlObjectParse#" + + CrossTransportUtils.fmtMethodName("parseAndClose", Reader.class, Type.class)); + } + + private static final class StringEnumValueDeserializer + extends StdDeserializer { + + private final Function constructor; + + private StringEnumValueDeserializer(Class cl, Function constructor) { + super(cl); + this.constructor = constructor; + } + + @Override + public E deserialize(JsonParser p, DeserializationContext ctxt) + throws IOException, JacksonException { + String s = p.readValueAs(String.class); + if (s == null || s.trim().isEmpty()) { + return null; + } + return constructor.apply(s); + } + } + + private static final class StringEnumValueSerializer + extends StdSerializer { + + private StringEnumValueSerializer(Class cl) { + super(cl); + } + + @Override + public void serialize(E value, JsonGenerator gen, SerializerProvider provider) + throws IOException { + gen.writeString(value.name()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ZeroCopySupport.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ZeroCopySupport.java new file mode 100644 index 000000000000..8ac0ed61e18d --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/ZeroCopySupport.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.BetaApi; +import com.google.api.core.InternalExtensionOnly; +import com.google.protobuf.ByteString; +import java.io.Closeable; +import java.io.IOException; + +/** + * Public components which exist to support zero-copy data access + * + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ +@BetaApi +public abstract class ZeroCopySupport { + + private ZeroCopySupport() {} + + /** + * Represents an object that can be accessed as a {@link ByteString}, but has a lifecycle that + * requires being explicitly closed in order to free up resources. + * + *

Instances of this class should be used in a try-with-resources to ensure they are released. + * + *

{@code
+   * try (DisposableByteString disposableByteString = ...) {
+   *   System.out.println(disposableByteString.byteString().size());
+   * }
+   * }
+ * + * @see ReadProjectionConfigs#asFutureByteString() + * @since 2.51.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + @InternalExtensionOnly + public interface DisposableByteString extends AutoCloseable, Closeable { + + /** Get the ByteString representation of the underlying resources */ + ByteString byteString(); + + /** Signal the underlying resources that they can be released. */ + @Override + void close() throws IOException; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadRequest.java new file mode 100644 index 000000000000..8e98c4f86599 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadRequest.java @@ -0,0 +1,162 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.multipartupload.model; + +/** + * Represents a request to abort a multipart upload. This request is used to stop an in-progress + * multipart upload, deleting any previously uploaded parts. + * + * @since 2.60.0 + */ +public final class AbortMultipartUploadRequest { + private final String bucket; + private final String key; + private final String uploadId; + private final String userProject; + + private AbortMultipartUploadRequest(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.uploadId = builder.uploadId; + this.userProject = builder.userProject; + } + + /** + * Returns the name of the bucket in which the multipart upload is stored. + * + * @return The bucket name. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the name of the object that is being uploaded. + * + * @return The object name. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the upload ID of the multipart upload to abort. + * + * @return The upload ID. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * Returns the user-project. + * + * @return the user-project. + * @see x-goog-user-project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + /** + * Returns a new builder for creating {@link AbortMultipartUploadRequest} instances. + * + * @return A new {@link Builder}. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for creating {@link AbortMultipartUploadRequest} instances. + * + * @since 2.60.0 + */ + public static class Builder { + private String bucket; + private String key; + private String uploadId; + private String userProject; + + private Builder() {} + + /** + * Sets the name of the bucket in which the multipart upload is stored. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the name of the object that is being uploaded. + * + * @param key The object name. + * @return This builder. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the upload ID of the multipart upload to abort. + * + * @param uploadId The upload ID. + * @return This builder. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the user-project. + * + * @param userProject The user-project. + * @return This builder. + * @see x-goog-user-project + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Builds a new {@link AbortMultipartUploadRequest} instance. + * + * @return A new {@link AbortMultipartUploadRequest}. + * @since 2.60.0 + */ + public AbortMultipartUploadRequest build() { + return new AbortMultipartUploadRequest(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadResponse.java new file mode 100644 index 000000000000..c95ad0395765 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/AbortMultipartUploadResponse.java @@ -0,0 +1,27 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Represents a response to an abort multipart upload request. This class is currently empty as the + * abort operation does not return any specific data in its response body. + * + * @since 2.60.0 + */ +@JacksonXmlRootElement(localName = "AbortMultipartUploadResponse") +public final class AbortMultipartUploadResponse {} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadRequest.java new file mode 100644 index 000000000000..6dafbcacba01 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadRequest.java @@ -0,0 +1,223 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Represents a request to complete a multipart upload. + * + * @since 2.60.0 + */ +public final class CompleteMultipartUploadRequest { + + private final String bucket; + private final String key; + private final String uploadId; + private final CompletedMultipartUpload multipartUpload; + private final String userProject; + + private CompleteMultipartUploadRequest(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.uploadId = builder.uploadId; + this.multipartUpload = builder.multipartUpload; + this.userProject = builder.userProject; + } + + /** + * Returns the bucket name. + * + * @return The bucket name. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the object name. + * + * @return The object name. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the upload ID of the multipart upload. + * + * @return The upload ID. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * Returns the {@link CompletedMultipartUpload} payload for this request. + * + * @return The {@link CompletedMultipartUpload} payload. + * @since 2.60.0 + */ + public CompletedMultipartUpload multipartUpload() { + return multipartUpload; + } + + /** + * Returns the user-project. + * + * @return the user-project. + * @see x-goog-user-project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CompleteMultipartUploadRequest)) { + return false; + } + CompleteMultipartUploadRequest that = (CompleteMultipartUploadRequest) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId) + && Objects.equals(multipartUpload, that.multipartUpload) + && Objects.equals(userProject, that.userProject); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, key, uploadId, multipartUpload, userProject); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("uploadId", uploadId) + .add("completedMultipartUpload", multipartUpload) + .add("userProject", userProject) + .toString(); + } + + /** + * Creates a new builder for {@link CompleteMultipartUploadRequest}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link CompleteMultipartUploadRequest}. + * + * @since 2.60.0 + */ + public static class Builder { + private String bucket; + private String key; + private String uploadId; + private CompletedMultipartUpload multipartUpload; + private String userProject; + + private Builder() {} + + /** + * Sets the bucket name. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the object name. + * + * @param key The object name. + * @return This builder. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the upload ID of the multipart upload. + * + * @param uploadId The upload ID. + * @return This builder. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the {@link CompletedMultipartUpload} payload for this request. + * + * @param completedMultipartUpload The {@link CompletedMultipartUpload} payload. + * @return This builder. + * @since 2.60.0 + */ + public Builder multipartUpload(CompletedMultipartUpload completedMultipartUpload) { + this.multipartUpload = completedMultipartUpload; + return this; + } + + /** + * Sets the user-project. + * + * @param userProject The user-project. + * @return This builder. + * @see x-goog-user-project + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Builds the {@link CompleteMultipartUploadRequest} object. + * + * @return The new {@link CompleteMultipartUploadRequest} object. + * @since 2.60.0 + */ + public CompleteMultipartUploadRequest build() { + return new CompleteMultipartUploadRequest(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadResponse.java new file mode 100644 index 000000000000..78fb27432fd7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompleteMultipartUploadResponse.java @@ -0,0 +1,232 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Represents the response from a completed multipart upload. + * + * @since 2.60.0 + */ +@JsonDeserialize(builder = CompleteMultipartUploadResponse.Builder.class) +public final class CompleteMultipartUploadResponse { + + private final String location; + private final String bucket; + private final String key; + private final String etag; + private final String crc32c; + + private CompleteMultipartUploadResponse(Builder builder) { + this.location = builder.location; + this.bucket = builder.bucket; + this.key = builder.key; + this.etag = builder.etag; + this.crc32c = builder.crc32c; + } + + /** + * Returns the URL of the completed object. + * + * @return The URL of the completed object. + * @since 2.60.0 + */ + @JsonProperty("Location") + public String location() { + return location; + } + + /** + * Returns the bucket name. + * + * @return The bucket name. + * @since 2.60.0 + */ + @JsonProperty("Bucket") + public String bucket() { + return bucket; + } + + /** + * Returns the object name. + * + * @return The object name. + * @since 2.60.0 + */ + @JsonProperty("Key") + public String key() { + return key; + } + + /** + * Returns the ETag of the completed object. + * + * @return The ETag of the completed object. + * @since 2.60.0 + */ + @JsonProperty("ETag") + public String etag() { + return etag; + } + + /** + * Returns the CRC32C checksum of the completed object. + * + * @return The CRC32C checksum of the completed object. + * @since 2.60.0 + */ + public String crc32c() { + return crc32c; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CompleteMultipartUploadResponse)) { + return false; + } + CompleteMultipartUploadResponse that = (CompleteMultipartUploadResponse) o; + return Objects.equals(location, that.location) + && Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(etag, that.etag) + && Objects.equals(crc32c, that.crc32c); + } + + @Override + public int hashCode() { + return Objects.hash(location, bucket, key, etag, crc32c); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("location", location) + .add("bucket", bucket) + .add("key", key) + .add("etag", etag) + .add("crc32c", crc32c) + .toString(); + } + + /** + * Creates a new builder for {@link CompleteMultipartUploadResponse}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link CompleteMultipartUploadResponse}. + * + * @since 2.60.0 + */ + @JsonPOJOBuilder(buildMethodName = "build") + public static class Builder { + private String location; + private String bucket; + private String key; + private String etag; + private String crc32c; + + private Builder() {} + + /** + * Sets the URL of the completed object. + * + * @param location The URL of the completed object. + * @return This builder. + * @since 2.60.0 + */ + @JsonProperty("Location") + public Builder location(String location) { + this.location = location; + return this; + } + + /** + * Sets the bucket name. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.60.0 + */ + @JsonProperty("Bucket") + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the object name. + * + * @param key The object name. + * @return This builder. + * @since 2.60.0 + */ + @JsonProperty("Key") + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the ETag of the completed object. + * + * @param etag The ETag of the completed object. + * @return This builder. + * @since 2.60.0 + */ + @JsonProperty("ETag") + public Builder etag(String etag) { + this.etag = etag; + return this; + } + + /** + * Sets the CRC32C checksum of the completed object. + * + * @param crc32c The CRC32C checksum of the completed object. + * @return This builder. + * @since 2.60.0 + */ + public Builder crc32c(String crc32c) { + this.crc32c = crc32c; + return this; + } + + /** + * Builds the {@link CompleteMultipartUploadResponse} object. + * + * @return The new {@link CompleteMultipartUploadResponse} object. + * @since 2.60.0 + */ + public CompleteMultipartUploadResponse build() { + return new CompleteMultipartUploadResponse(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedMultipartUpload.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedMultipartUpload.java new file mode 100644 index 000000000000..872453431675 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedMultipartUpload.java @@ -0,0 +1,116 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.google.common.base.MoreObjects; +import java.util.List; +import java.util.Objects; + +/** + * Represents the XML payload for a completed multipart upload. This is used in the request body + * when completing a multipart upload. + * + * @since 2.60.0 This new api is in preview and is subject to breaking changes. + */ +@JacksonXmlRootElement(localName = "CompleteMultipartUpload") +public class CompletedMultipartUpload { + + @JacksonXmlElementWrapper(useWrapping = false) + @JacksonXmlProperty(localName = "Part") + private final List completedPartList; + + private CompletedMultipartUpload(Builder builder) { + this.completedPartList = builder.parts; + } + + /** + * Returns the list of completed parts for this multipart upload. + * + * @return The list of completed parts. + * @since 2.60.0 + */ + public List parts() { + return completedPartList; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CompletedMultipartUpload)) { + return false; + } + CompletedMultipartUpload that = (CompletedMultipartUpload) o; + return Objects.equals(completedPartList, that.completedPartList); + } + + @Override + public int hashCode() { + return Objects.hash(completedPartList); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("completedPartList", completedPartList).toString(); + } + + /** + * Creates a new builder for {@link CompletedMultipartUpload}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link CompletedMultipartUpload}. + * + * @since 2.60.0 + */ + public static class Builder { + private List parts; + + private Builder() {} + + /** + * Sets the list of completed parts for the multipart upload. + * + * @param completedPartList The list of completed parts. + * @return This builder. + * @since 2.60.0 + */ + public Builder parts(List completedPartList) { + this.parts = completedPartList; + return this; + } + + /** + * Builds the {@link CompletedMultipartUpload} object. + * + * @return The new {@link CompletedMultipartUpload} object. + * @since 2.60.0 + */ + public CompletedMultipartUpload build() { + return new CompletedMultipartUpload(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedPart.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedPart.java new file mode 100644 index 000000000000..0db009987888 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CompletedPart.java @@ -0,0 +1,111 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; + +/** + * Represents a completed part of a multipart upload. + * + * @since 2.60.0 + */ +public final class CompletedPart { + + @JacksonXmlProperty(localName = "PartNumber") + private final int partNumber; + + @JacksonXmlProperty(localName = "ETag") + private final String eTag; + + private CompletedPart(int partNumber, String eTag) { + this.partNumber = partNumber; + this.eTag = eTag; + } + + /** + * Creates a new builder for {@link CompletedPart}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns the part number of this completed part. + * + * @return The part number. + * @since 2.60.0 + */ + public int partNumber() { + return partNumber; + } + + /** + * Returns the ETag of this completed part. + * + * @return The ETag. + * @since 2.60.0 + */ + public String eTag() { + return eTag; + } + + /** + * Builder for {@link CompletedPart}. + * + * @since 2.60.0 + */ + public static class Builder { + private int partNumber; + private String etag; + + /** + * Sets the part number of the completed part. + * + * @param partNumber The part number. + * @return This builder. + * @since 2.60.0 + */ + public Builder partNumber(int partNumber) { + this.partNumber = partNumber; + return this; + } + + /** + * Sets the ETag of the completed part. + * + * @param etag The ETag. + * @return This builder. + * @since 2.60.0 + */ + public Builder eTag(String etag) { + this.etag = etag; + return this; + } + + /** + * Builds the {@link CompletedPart} object. + * + * @return The new {@link CompletedPart} object. + * @since 2.60.0 + */ + public CompletedPart build() { + return new CompletedPart(partNumber, etag); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest.java new file mode 100644 index 000000000000..f46b69ae235b --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadRequest.java @@ -0,0 +1,515 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.StorageClass; +import com.google.common.base.MoreObjects; +import java.time.OffsetDateTime; +import java.util.Map; +import java.util.Objects; + +/** + * Represents a request to initiate a multipart upload. This class holds all the necessary + * information to create a new multipart upload session. + * + * @since 2.60.0 + */ +public final class CreateMultipartUploadRequest { + private final String bucket; + private final String key; + private final PredefinedAcl cannedAcl; + private final String contentType; + private final String contentDisposition; + private final String contentEncoding; + private final String contentLanguage; + private final String cacheControl; + private final Map metadata; + private final StorageClass storageClass; + private final OffsetDateTime customTime; + private final String kmsKeyName; + private final ObjectLockMode objectLockMode; + private final OffsetDateTime objectLockRetainUntilDate; + private final String userProject; + + private CreateMultipartUploadRequest(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.cannedAcl = builder.cannedAcl; + this.contentType = builder.contentType; + this.contentDisposition = builder.contentDisposition; + this.contentEncoding = builder.contentEncoding; + this.contentLanguage = builder.contentLanguage; + this.cacheControl = builder.cacheControl; + this.metadata = builder.metadata; + this.storageClass = builder.storageClass; + this.customTime = builder.customTime; + this.kmsKeyName = builder.kmsKeyName; + this.objectLockMode = builder.objectLockMode; + this.objectLockRetainUntilDate = builder.objectLockRetainUntilDate; + this.userProject = builder.userProject; + } + + /** + * Returns the name of the bucket to which the object is being uploaded. + * + * @return The bucket name + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the name of the object. + * + * @see Object Naming + * @return The object name + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns a canned ACL to apply to the object. + * + * @return The canned ACL + * @since 2.60.0 + */ + public PredefinedAcl cannedAcl() { + return cannedAcl; + } + + /** + * Returns the MIME type of the data you are uploading. + * + * @return The Content-Type + * @since 2.60.0 + */ + public String contentType() { + return contentType; + } + + /** + * Returns the presentational information about how the object data is to be transmitted. + * + * @return The Content-Disposition + * @since 2.61.0 + */ + public String contentDisposition() { + return contentDisposition; + } + + /** + * Returns the compression algorithm that was used to compress the data being uploaded. + * + * @return The Content-Encoding + * @since 2.61.0 + */ + public String contentEncoding() { + return contentEncoding; + } + + /** + * Returns the language code of the content. + * + * @return The Content-Language + * @since 2.61.0 + */ + public String contentLanguage() { + return contentLanguage; + } + + /** + * Returns the conditions under which the resulting object should be cached if it is publicly + * accessible. + * + * @return The Cache-Control + * @since 2.61.0 + */ + public String cacheControl() { + return cacheControl; + } + + /** + * Returns the custom metadata of the object. + * + * @return The custom metadata + * @since 2.60.0 + */ + public Map metadata() { + return metadata; + } + + /** + * Returns the storage class for the object. + * + * @return The Storage-Class + * @since 2.60.0 + */ + public StorageClass storageClass() { + return storageClass; + } + + /** + * Returns a user-specified date and time. + * + * @return The custom time + * @since 2.60.0 + */ + public OffsetDateTime customTime() { + return customTime; + } + + /** + * Returns the customer-managed encryption key to use to encrypt the object. + * + * @return The Cloud KMS key + * @since 2.60.0 + */ + public String kmsKeyName() { + return kmsKeyName; + } + + /** + * Returns the mode of the object's retention configuration. + * + * @return The object lock mode + * @since 2.60.0 + */ + public ObjectLockMode objectLockMode() { + return objectLockMode; + } + + /** + * Returns the date that determines the time until which the object is retained as immutable. + * + * @return The object lock retention until date + * @since 2.60.0 + */ + public OffsetDateTime objectLockRetainUntilDate() { + return objectLockRetainUntilDate; + } + + /** + * Returns the project to be billed for charges associated with this request. + * + * @return The user project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CreateMultipartUploadRequest)) { + return false; + } + CreateMultipartUploadRequest that = (CreateMultipartUploadRequest) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && cannedAcl == that.cannedAcl + && Objects.equals(contentType, that.contentType) + && Objects.equals(contentDisposition, that.contentDisposition) + && Objects.equals(contentEncoding, that.contentEncoding) + && Objects.equals(contentLanguage, that.contentLanguage) + && Objects.equals(cacheControl, that.cacheControl) + && Objects.equals(metadata, that.metadata) + && Objects.equals(storageClass, that.storageClass) + && Objects.equals(customTime, that.customTime) + && Objects.equals(kmsKeyName, that.kmsKeyName) + && objectLockMode == that.objectLockMode + && Objects.equals(objectLockRetainUntilDate, that.objectLockRetainUntilDate) + && Objects.equals(userProject, that.userProject); + } + + @Override + public int hashCode() { + return Objects.hash( + bucket, + key, + cannedAcl, + contentType, + contentDisposition, + contentEncoding, + contentLanguage, + cacheControl, + metadata, + storageClass, + customTime, + kmsKeyName, + objectLockMode, + objectLockRetainUntilDate, + userProject); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("cannedAcl", cannedAcl) + .add("contentType", contentType) + .add("contentDisposition", contentDisposition) + .add("contentEncoding", contentEncoding) + .add("contentLanguage", contentLanguage) + .add("cacheControl", cacheControl) + .add("metadata", metadata) + .add("storageClass", storageClass) + .add("customTime", customTime) + .add("kmsKeyName", kmsKeyName) + .add("objectLockMode", objectLockMode) + .add("objectLockRetainUntilDate", objectLockRetainUntilDate) + .add("userProject", userProject) + .toString(); + } + + /** + * Returns a new {@link Builder} for creating a {@link CreateMultipartUploadRequest}. + * + * @return a new builder + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@link CreateMultipartUploadRequest}. + * + * @since 2.60.0 + */ + public static final class Builder { + private String bucket; + private String key; + private PredefinedAcl cannedAcl; + private String contentType; + private String contentDisposition; + private String contentEncoding; + private String contentLanguage; + private String cacheControl; + private Map metadata; + private StorageClass storageClass; + private OffsetDateTime customTime; + private String kmsKeyName; + private ObjectLockMode objectLockMode; + private OffsetDateTime objectLockRetainUntilDate; + private String userProject; + + private Builder() {} + + /** + * The bucket to which the object is being uploaded. + * + * @param bucket The bucket name + * @return this builder + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * The name of the object. + * + * @see Object Naming + * @param key The object name + * @return this builder + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * A canned ACL to apply to the object. + * + * @param cannedAcl The canned ACL + * @return this builder + * @since 2.60.0 + */ + public Builder cannedAcl(PredefinedAcl cannedAcl) { + this.cannedAcl = cannedAcl; + return this; + } + + /** + * The MIME type of the data you are uploading. + * + * @param contentType The Content-Type + * @return this builder + * @since 2.60.0 + */ + public Builder contentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Specifies presentational information about the object data. + * + * @param contentDisposition The content disposition for the object. + * @return this builder + * @since 2.61.0 + */ + public Builder contentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * Specifies the compression algorithm that was used to compress the object data. + * + * @param contentEncoding The content encoding for the object. + * @return this builder + * @since 2.61.0 + */ + public Builder contentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * Specifies the language of the object's content. + * + * @param contentLanguage The content language for the object. + * @return this builder + * @since 2.61.0 + */ + public Builder contentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * Specifies the caching behavior for the object when it is publicly accessible. + * + * @param cacheControl The cache control settings for the object. + * @return this builder + * @since 2.61.0 + */ + public Builder cacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * The custom metadata of the object. + * + * @param metadata The custom metadata + * @return this builder + * @since 2.60.0 + */ + public Builder metadata(Map metadata) { + this.metadata = metadata; + return this; + } + + /** + * Gives each part of the upload and the resulting object a storage class besides the default + * storage class of the associated bucket. + * + * @param storageClass The Storage-Class + * @return this builder + * @since 2.60.0 + */ + public Builder storageClass(StorageClass storageClass) { + this.storageClass = storageClass; + return this; + } + + /** + * A user-specified date and time. + * + * @param customTime The custom time + * @return this builder + * @since 2.60.0 + */ + public Builder customTime(OffsetDateTime customTime) { + this.customTime = customTime; + return this; + } + + /** + * The customer-managed encryption key to use to encrypt the object. Refer: Customer + * Managed Keys + * + * @param kmsKeyName The Cloud KMS key + * @return this builder + * @since 2.60.0 + */ + public Builder kmsKeyName(String kmsKeyName) { + this.kmsKeyName = kmsKeyName; + return this; + } + + /** + * Mode of the object's retention configuration. GOVERNANCE corresponds to unlocked mode, and + * COMPLIANCE corresponds to locked mode. + * + * @param objectLockMode The object lock mode + * @return this builder + * @since 2.60.0 + */ + public Builder objectLockMode(ObjectLockMode objectLockMode) { + this.objectLockMode = objectLockMode; + return this; + } + + /** + * Date that determines the time until which the object is retained as immutable. + * + * @param objectLockRetainUntilDate The object lock retention until date + * @return this builder + * @since 2.60.0 + */ + public Builder objectLockRetainUntilDate(OffsetDateTime objectLockRetainUntilDate) { + this.objectLockRetainUntilDate = objectLockRetainUntilDate; + return this; + } + + /** + * Specifies the project to be billed for this request. + * + * @param userProject The project ID to bill for this request. + * @return this builder + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Creates a new {@link CreateMultipartUploadRequest} object. + * + * @return a new {@link CreateMultipartUploadRequest} object + * @since 2.60.0 + */ + public CreateMultipartUploadRequest build() { + return new CreateMultipartUploadRequest(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadResponse.java new file mode 100644 index 000000000000..1fddd47f62cf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/CreateMultipartUploadResponse.java @@ -0,0 +1,177 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Represents the response from a CreateMultipartUpload request. This class encapsulates the details + * of the initiated multipart upload, including the bucket, key, and the unique upload ID. + * + * @since 2.60.0 + */ +@JacksonXmlRootElement(localName = "InitiateMultipartUploadResult") +public final class CreateMultipartUploadResponse { + + @JacksonXmlProperty(localName = "Bucket") + private String bucket; + + @JacksonXmlProperty(localName = "Key") + private String key; + + @JacksonXmlProperty(localName = "UploadId") + private String uploadId; + + private CreateMultipartUploadResponse() {} + + private CreateMultipartUploadResponse(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.uploadId = builder.uploadId; + } + + /** + * Returns the name of the bucket where the multipart upload was initiated. + * + * @return The bucket name. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the key (object name) for which the multipart upload was initiated. + * + * @return The object key. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the unique identifier for this multipart upload. This ID must be included in all + * subsequent requests related to this upload (e.g., uploading parts, completing the upload). + * + * @return The upload ID. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CreateMultipartUploadResponse)) { + return false; + } + CreateMultipartUploadResponse that = (CreateMultipartUploadResponse) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, key, uploadId); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("uploadId", uploadId) + .toString(); + } + + /** + * Creates a new builder for {@link CreateMultipartUploadResponse}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@link CreateMultipartUploadResponse} objects. + * + * @since 2.60.0 + */ + public static final class Builder { + private String bucket; + private String key; + private String uploadId; + + private Builder() {} + + /** + * Sets the bucket name for the multipart upload. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the key (object name) for the multipart upload. + * + * @param key The object key. + * @return This builder. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the upload ID for the multipart upload. + * + * @param uploadId The upload ID. + * @return This builder. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Builds a new {@link CreateMultipartUploadResponse} object. + * + * @return A new {@link CreateMultipartUploadResponse} object. + * @since 2.60.0 + */ + public CreateMultipartUploadResponse build() { + return new CreateMultipartUploadResponse(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsRequest.java new file mode 100644 index 000000000000..ab55c2bacbc4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsRequest.java @@ -0,0 +1,332 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * A request to list all multipart uploads in a bucket. + * + * @see Listing + * multipart uploads + * @since 2.61.0 + */ +public final class ListMultipartUploadsRequest { + + private final String bucket; + private final String delimiter; + private final String encodingType; + private final String keyMarker; + private final Integer maxUploads; + private final String prefix; + private final String uploadIdMarker; + private final String userProject; + + private ListMultipartUploadsRequest( + String bucket, + String delimiter, + String encodingType, + String keyMarker, + Integer maxUploads, + String prefix, + String uploadIdMarker, + String userProject) { + this.bucket = bucket; + this.delimiter = delimiter; + this.encodingType = encodingType; + this.keyMarker = keyMarker; + this.maxUploads = maxUploads; + this.prefix = prefix; + this.uploadIdMarker = uploadIdMarker; + this.userProject = userProject; + } + + /** + * The bucket to list multipart uploads from. + * + * @return The bucket name. + * @since 2.61.0 + */ + public String bucket() { + return bucket; + } + + /** + * Character used to group keys. + * + * @return The delimiter. + * @since 2.61.0 + */ + public String delimiter() { + return delimiter; + } + + /** + * The encoding type used by Cloud Storage to encode object names in the response. + * + * @return The encoding type. + * @since 2.61.0 + */ + public String encodingType() { + return encodingType; + } + + /** + * Together with {@code upload-id-marker}, specifies the multipart upload after which listing + * should begin. + * + * @return The key marker. + * @since 2.61.0 + */ + public String keyMarker() { + return keyMarker; + } + + /** + * The maximum number of multipart uploads to return. + * + * @return The maximum number of uploads. + * @since 2.61.0 + */ + public Integer maxUploads() { + return maxUploads; + } + + /** + * Filters results to multipart uploads whose keys begin with this prefix. + * + * @return The prefix. + * @since 2.61.0 + */ + public String prefix() { + return prefix; + } + + /** + * Together with {@code key-marker}, specifies the multipart upload after which listing should + * begin. + * + * @return The upload ID marker. + * @since 2.61.0 + */ + public String uploadIdMarker() { + return uploadIdMarker; + } + + /** + * Returns the user-project. + * + * @return the user-project. + * @see x-goog-user-project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ListMultipartUploadsRequest that = (ListMultipartUploadsRequest) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(delimiter, that.delimiter) + && Objects.equals(encodingType, that.encodingType) + && Objects.equals(keyMarker, that.keyMarker) + && Objects.equals(maxUploads, that.maxUploads) + && Objects.equals(prefix, that.prefix) + && Objects.equals(uploadIdMarker, that.uploadIdMarker) + && Objects.equals(userProject, that.userProject); + } + + @Override + public int hashCode() { + return Objects.hash( + bucket, + delimiter, + encodingType, + keyMarker, + maxUploads, + prefix, + uploadIdMarker, + userProject); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("delimiter", delimiter) + .add("encodingType", encodingType) + .add("keyMarker", keyMarker) + .add("maxUploads", maxUploads) + .add("prefix", prefix) + .add("uploadIdMarker", uploadIdMarker) + .add("userProject", userProject) + .toString(); + } + + /** + * Returns a new builder for this request. + * + * @return A new builder. + * @since 2.61.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@link ListMultipartUploadsRequest}. + * + * @since 2.61.0 + */ + public static final class Builder { + private String bucket; + private String delimiter; + private String encodingType; + private String keyMarker; + private Integer maxUploads; + private String prefix; + private String uploadIdMarker; + private String userProject; + + private Builder() {} + + /** + * Sets the bucket to list multipart uploads from. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.61.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the delimiter used to group keys. + * + * @param delimiter The delimiter. + * @return This builder. + * @since 2.61.0 + */ + public Builder delimiter(String delimiter) { + this.delimiter = delimiter; + return this; + } + + /** + * Sets the encoding type used by Cloud Storage to encode object names in the response. + * + * @param encodingType The encoding type. + * @return This builder. + * @since 2.61.0 + */ + public Builder encodingType(String encodingType) { + this.encodingType = encodingType; + return this; + } + + /** + * Sets the key marker. + * + * @param keyMarker The key marker. + * @return This builder. + * @since 2.61.0 + */ + public Builder keyMarker(String keyMarker) { + this.keyMarker = keyMarker; + return this; + } + + /** + * Sets the maximum number of multipart uploads to return. + * + * @param maxUploads The maximum number of uploads. + * @return This builder. + * @since 2.61.0 + */ + public Builder maxUploads(Integer maxUploads) { + this.maxUploads = maxUploads; + return this; + } + + /** + * Sets the prefix to filter results. + * + * @param prefix The prefix. + * @return This builder. + * @since 2.61.0 + */ + public Builder prefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Sets the upload ID marker. + * + * @param uploadIdMarker The upload ID marker. + * @return This builder. + * @since 2.61.0 + */ + public Builder uploadIdMarker(String uploadIdMarker) { + this.uploadIdMarker = uploadIdMarker; + return this; + } + + /** + * Sets the user-project. + * + * @param userProject The user-project. + * @return This builder. + * @see x-goog-user-project + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Builds the request. + * + * @return The built request. + * @since 2.61.0 + */ + public ListMultipartUploadsRequest build() { + return new ListMultipartUploadsRequest( + bucket, + delimiter, + encodingType, + keyMarker, + maxUploads, + prefix, + uploadIdMarker, + userProject); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsResponse.java new file mode 100644 index 000000000000..700784527121 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListMultipartUploadsResponse.java @@ -0,0 +1,496 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.annotation.JsonAlias; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * A response from listing all multipart uploads in a bucket. + * + * @see Listing + * multipart uploads + * @since 2.60.1 + */ +public final class ListMultipartUploadsResponse { + + @JacksonXmlElementWrapper(useWrapping = false) + @JacksonXmlProperty(localName = "Upload") + private List uploads; + + @JacksonXmlProperty(localName = "Bucket") + private String bucket; + + @JacksonXmlProperty(localName = "Delimiter") + private String delimiter; + + @JacksonXmlProperty(localName = "EncodingType") + private String encodingType; + + @JacksonXmlProperty(localName = "KeyMarker") + private String keyMarker; + + @JacksonXmlProperty(localName = "UploadIdMarker") + private String uploadIdMarker; + + @JacksonXmlProperty(localName = "NextKeyMarker") + private String nextKeyMarker; + + @JacksonXmlProperty(localName = "NextUploadIdMarker") + private String nextUploadIdMarker; + + @JacksonXmlProperty(localName = "MaxUploads") + private int maxUploads; + + @JacksonXmlProperty(localName = "Prefix") + private String prefix; + + @JsonAlias("truncated") + @JacksonXmlProperty(localName = "IsTruncated") + private boolean isTruncated; + + @JacksonXmlElementWrapper(useWrapping = false) + @JacksonXmlProperty(localName = "CommonPrefixes") + private List commonPrefixes; + + // Jackson requires a no-arg constructor + private ListMultipartUploadsResponse() {} + + private ListMultipartUploadsResponse( + List uploads, + String bucket, + String delimiter, + String encodingType, + String keyMarker, + String uploadIdMarker, + String nextKeyMarker, + String nextUploadIdMarker, + int maxUploads, + String prefix, + boolean isTruncated, + List commonPrefixes) { + this.uploads = uploads; + this.bucket = bucket; + this.delimiter = delimiter; + this.encodingType = encodingType; + this.keyMarker = keyMarker; + this.uploadIdMarker = uploadIdMarker; + this.nextKeyMarker = nextKeyMarker; + this.nextUploadIdMarker = nextUploadIdMarker; + this.maxUploads = maxUploads; + this.prefix = prefix; + this.isTruncated = isTruncated; + if (commonPrefixes != null) { + this.commonPrefixes = new ArrayList<>(); + for (String p : commonPrefixes) { + CommonPrefixHelper h = new CommonPrefixHelper(); + h.prefix = p; + this.commonPrefixes.add(h); + } + } + } + + /** + * The list of multipart uploads. + * + * @return The list of multipart uploads. + * @since 2.61.0 + */ + public ImmutableList uploads() { + return uploads == null ? ImmutableList.of() : ImmutableList.copyOf(uploads); + } + + /** + * The bucket that contains the multipart uploads. + * + * @return The bucket name. + * @since 2.61.0 + */ + public String bucket() { + return bucket; + } + + /** + * The delimiter applied to the request. + * + * @return The delimiter applied to the request. + * @since 2.61.0 + */ + public String delimiter() { + return delimiter; + } + + /** + * The encoding type used by Cloud Storage to encode object names in the response. + * + * @return The encoding type. + * @since 2.61.0 + */ + public String encodingType() { + return encodingType; + } + + /** + * The key at or after which the listing began. + * + * @return The key marker. + * @since 2.61.0 + */ + public String keyMarker() { + return keyMarker; + } + + /** + * The upload ID at or after which the listing began. + * + * @return The upload ID marker. + * @since 2.61.0 + */ + public String uploadIdMarker() { + return uploadIdMarker; + } + + /** + * The key after which listing should begin. + * + * @return The key after which listing should begin. + * @since 2.61.0 + */ + public String nextKeyMarker() { + return nextKeyMarker; + } + + /** + * The upload ID after which listing should begin. + * + * @return The upload ID after which listing should begin. + * @since 2.61.0 + */ + public String nextUploadIdMarker() { + return nextUploadIdMarker; + } + + /** + * The maximum number of uploads to return. + * + * @return The maximum number of uploads. + * @since 2.61.0 + */ + public int maxUploads() { + return maxUploads; + } + + /** + * The prefix applied to the request. + * + * @return The prefix applied to the request. + * @since 2.61.0 + */ + public String prefix() { + return prefix; + } + + /** + * A flag indicating whether or not the returned results are truncated. + * + * @return A flag indicating whether or not the returned results are truncated. + * @since 2.61.0 + */ + public boolean truncated() { + return isTruncated; + } + + /** + * If you specify a delimiter in the request, this element is returned. + * + * @return The common prefixes. + * @since 2.61.0 + */ + public ImmutableList commonPrefixes() { + if (commonPrefixes == null) { + return ImmutableList.of(); + } + return commonPrefixes.stream().map(h -> h.prefix).collect(ImmutableList.toImmutableList()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ListMultipartUploadsResponse that = (ListMultipartUploadsResponse) o; + return isTruncated == that.isTruncated + && maxUploads == that.maxUploads + && Objects.equals(uploads(), that.uploads()) + && Objects.equals(bucket, that.bucket) + && Objects.equals(delimiter, that.delimiter) + && Objects.equals(encodingType, that.encodingType) + && Objects.equals(keyMarker, that.keyMarker) + && Objects.equals(uploadIdMarker, that.uploadIdMarker) + && Objects.equals(nextKeyMarker, that.nextKeyMarker) + && Objects.equals(nextUploadIdMarker, that.nextUploadIdMarker) + && Objects.equals(prefix, that.prefix) + && Objects.equals(commonPrefixes(), that.commonPrefixes()); + } + + @Override + public int hashCode() { + return Objects.hash( + uploads(), + bucket, + delimiter, + encodingType, + keyMarker, + uploadIdMarker, + nextKeyMarker, + nextUploadIdMarker, + maxUploads, + prefix, + isTruncated, + commonPrefixes()); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("uploads", uploads()) + .add("bucket", bucket) + .add("delimiter", delimiter) + .add("encodingType", encodingType) + .add("keyMarker", keyMarker) + .add("uploadIdMarker", uploadIdMarker) + .add("nextKeyMarker", nextKeyMarker) + .add("nextUploadIdMarker", nextUploadIdMarker) + .add("maxUploads", maxUploads) + .add("prefix", prefix) + .add("isTruncated", isTruncated) + .add("commonPrefixes", commonPrefixes()) + .toString(); + } + + /** + * Returns a new builder for this response. + * + * @return A new builder. + * @since 2.61.0 + */ + public static Builder builder() { + return new Builder(); + } + + static class CommonPrefixHelper { + @JacksonXmlProperty(localName = "Prefix") + public String prefix; + } + + /** + * A builder for {@link ListMultipartUploadsResponse}. + * + * @since 2.61.0 + */ + public static final class Builder { + private ImmutableList uploads; + private String bucket; + private String delimiter; + private String encodingType; + private String keyMarker; + private String uploadIdMarker; + private String nextKeyMarker; + private String nextUploadIdMarker; + private int maxUploads; + private String prefix; + private boolean isTruncated; + private ImmutableList commonPrefixes; + + private Builder() {} + + /** + * Sets the list of multipart uploads. + * + * @param uploads The list of multipart uploads. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder uploads(ImmutableList uploads) { + this.uploads = uploads; + return this; + } + + /** + * Sets the bucket that contains the multipart uploads. + * + * @param bucket The bucket name. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the delimiter applied to the request. + * + * @param delimiter The delimiter applied to the request. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder delimiter(String delimiter) { + this.delimiter = delimiter; + return this; + } + + /** + * Sets the encoding type used by Cloud Storage to encode object names in the response. + * + * @param encodingType The encoding type. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder encodingType(String encodingType) { + this.encodingType = encodingType; + return this; + } + + /** + * Sets the key at or after which the listing began. + * + * @param keyMarker The key marker. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder keyMarker(String keyMarker) { + this.keyMarker = keyMarker; + return this; + } + + /** + * Sets the upload ID at or after which the listing began. + * + * @param uploadIdMarker The upload ID marker. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder uploadIdMarker(String uploadIdMarker) { + this.uploadIdMarker = uploadIdMarker; + return this; + } + + /** + * Sets the key after which listing should begin. + * + * @param nextKeyMarker The key after which listing should begin. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder nextKeyMarker(String nextKeyMarker) { + this.nextKeyMarker = nextKeyMarker; + return this; + } + + /** + * Sets the upload ID after which listing should begin. + * + * @param nextUploadIdMarker The upload ID after which listing should begin. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder nextUploadIdMarker(String nextUploadIdMarker) { + this.nextUploadIdMarker = nextUploadIdMarker; + return this; + } + + /** + * Sets the maximum number of uploads to return. + * + * @param maxUploads The maximum number of uploads. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder maxUploads(int maxUploads) { + this.maxUploads = maxUploads; + return this; + } + + /** + * Sets the prefix applied to the request. + * + * @param prefix The prefix applied to the request. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder prefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Sets the flag indicating whether or not the returned results are truncated. + * + * @param isTruncated The flag indicating whether or not the returned results are truncated. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder truncated(boolean isTruncated) { + this.isTruncated = isTruncated; + return this; + } + + /** + * If you specify a delimiter in the request, this element is returned. + * + * @param commonPrefixes The common prefixes. + * @return This builder. + * @since 2.61.0 This new api is in preview. + */ + public Builder commonPrefixes(ImmutableList commonPrefixes) { + this.commonPrefixes = commonPrefixes; + return this; + } + + /** + * Builds the response. + * + * @return The built response. + * @since 2.61.0 + */ + public ListMultipartUploadsResponse build() { + return new ListMultipartUploadsResponse( + uploads, + bucket, + delimiter, + encodingType, + keyMarker, + uploadIdMarker, + nextKeyMarker, + nextUploadIdMarker, + maxUploads, + prefix, + isTruncated, + commonPrefixes); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsRequest.java new file mode 100644 index 000000000000..96aa4a3cd817 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsRequest.java @@ -0,0 +1,254 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Represents a request to list the parts of a multipart upload. + * + * @since 2.60.0 + */ +public final class ListPartsRequest { + private final String bucket; + + private final String key; + + private final String uploadId; + + private final Integer maxParts; + + private final Integer partNumberMarker; + + private final String userProject; + + private ListPartsRequest(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.uploadId = builder.uploadId; + this.maxParts = builder.maxParts; + this.partNumberMarker = builder.partNumberMarker; + this.userProject = builder.userProject; + } + + /** + * Returns the bucket name. + * + * @return the bucket name. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the object name. + * + * @return the object name. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the upload ID. + * + * @return the upload ID. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * Returns the maximum number of parts to return. + * + * @return the maximum number of parts to return. + * @since 2.60.0 + */ + public Integer maxParts() { + return maxParts; + } + + /** + * Returns the part number marker. + * + * @return the part number marker. + * @since 2.60.0 + */ + public Integer partNumberMarker() { + return partNumberMarker; + } + + /** + * Returns the user-project. + * + * @return the user-project. + * @see x-goog-user-project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ListPartsRequest)) { + return false; + } + ListPartsRequest that = (ListPartsRequest) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId) + && Objects.equals(maxParts, that.maxParts) + && Objects.equals(partNumberMarker, that.partNumberMarker) + && Objects.equals(userProject, that.userProject); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, key, uploadId, maxParts, partNumberMarker, userProject); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("uploadId", uploadId) + .add("maxParts", maxParts) + .add("partNumberMarker", partNumberMarker) + .add("userProject", userProject) + .toString(); + } + + /** + * Returns a new builder for this class. + * + * @return a new builder for this class. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@link ListPartsRequest}. + * + * @since 2.60.0 + */ + public static class Builder { + private String bucket; + private String key; + private String uploadId; + private Integer maxParts; + private Integer partNumberMarker; + private String userProject; + + private Builder() {} + + /** + * Sets the bucket name. + * + * @param bucket the bucket name. + * @return this builder. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the object name. + * + * @param key the object name. + * @return this builder. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the upload ID. + * + * @param uploadId the upload ID. + * @return this builder. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the maximum number of parts to return. + * + * @param maxParts the maximum number of parts to return. + * @return this builder. + * @since 2.60.0 + */ + public Builder maxParts(Integer maxParts) { + this.maxParts = maxParts; + return this; + } + + /** + * Sets the part number marker. + * + * @param partNumberMarker the part number marker. + * @return this builder. + * @since 2.60.0 + */ + public Builder partNumberMarker(Integer partNumberMarker) { + this.partNumberMarker = partNumberMarker; + return this; + } + + /** + * Sets the user-project. + * + * @param userProject The user-project. + * @return This builder. + * @see x-goog-user-project + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Builds a new {@link ListPartsRequest} object. + * + * @return a new {@link ListPartsRequest} object. + * @since 2.60.0 + */ + public ListPartsRequest build() { + return new ListPartsRequest(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsResponse.java new file mode 100644 index 000000000000..4e66b0471dcf --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ListPartsResponse.java @@ -0,0 +1,362 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.annotation.JsonAlias; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.google.cloud.storage.StorageClass; +import com.google.common.base.MoreObjects; +import java.util.List; +import java.util.Objects; + +/** + * Represents a response to a list parts request. + * + * @since 2.60.0 + */ +public final class ListPartsResponse { + + @JacksonXmlProperty(localName = "Bucket") + private String bucket; + + @JacksonXmlProperty(localName = "Key") + private String key; + + @JacksonXmlProperty(localName = "UploadId") + private String uploadId; + + @JacksonXmlProperty(localName = "PartNumberMarker") + private int partNumberMarker; + + @JacksonXmlProperty(localName = "NextPartNumberMarker") + private int nextPartNumberMarker; + + @JacksonXmlProperty(localName = "MaxParts") + private int maxParts; + + @JsonAlias("truncated") // S3 returns "truncated", GCS returns "IsTruncated" + @JacksonXmlProperty(localName = "IsTruncated") + private boolean isTruncated; + + @JacksonXmlProperty(localName = "StorageClass") + private StorageClass storageClass; + + @JacksonXmlElementWrapper(useWrapping = false) + @JacksonXmlProperty(localName = "Part") + private List parts; + + private ListPartsResponse() {} + + private ListPartsResponse(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.uploadId = builder.uploadId; + this.partNumberMarker = builder.partNumberMarker; + this.nextPartNumberMarker = builder.nextPartNumberMarker; + this.maxParts = builder.maxParts; + this.isTruncated = builder.isTruncated; + this.storageClass = builder.storageClass; + this.parts = builder.parts; + } + + /** + * Creates a new {@code Builder} for {@code ListPartsResponse} objects. + * + * @return A new {@code Builder} instance. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns the bucket name. + * + * @return the bucket name. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the object name. + * + * @return the object name. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the upload ID. + * + * @return the upload ID. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * Returns the part number marker. + * + * @return the part number marker. + * @since 2.60.0 + */ + public int partNumberMarker() { + return partNumberMarker; + } + + /** + * Returns the next part number marker. + * + * @return the next part number marker. + * @since 2.60.0 + */ + public int nextPartNumberMarker() { + return nextPartNumberMarker; + } + + /** + * Returns the maximum number of parts to return. + * + * @return the maximum number of parts to return. + * @since 2.60.0 + */ + public int maxParts() { + return maxParts; + } + + /** + * Returns true if the response is truncated. + * + * @return true if the response is truncated. + * @since 2.60.0 + */ + public boolean truncated() { + return isTruncated; + } + + /** + * Returns the storage class of the object. + * + * @return the storage class of the object. + * @since 2.60.0 + */ + public StorageClass storageClass() { + return storageClass; + } + + /** + * Returns the list of parts. + * + * @return the list of parts. + * @since 2.60.0 + */ + public List parts() { + return parts; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ListPartsResponse)) { + return false; + } + ListPartsResponse that = (ListPartsResponse) o; + return Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId) + && Objects.equals(partNumberMarker, that.partNumberMarker) + && Objects.equals(nextPartNumberMarker, that.nextPartNumberMarker) + && Objects.equals(maxParts, that.maxParts) + && Objects.equals(isTruncated, that.isTruncated) + && Objects.equals(storageClass, that.storageClass) + && Objects.equals(parts, that.parts); + } + + @Override + public int hashCode() { + return Objects.hash( + bucket, + key, + uploadId, + partNumberMarker, + nextPartNumberMarker, + maxParts, + isTruncated, + storageClass, + parts); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("uploadId", uploadId) + .add("partNumberMarker", partNumberMarker) + .add("nextPartNumberMarker", nextPartNumberMarker) + .add("maxParts", maxParts) + .add("isTruncated", isTruncated) + .add("storageClass", storageClass) + .add("parts", parts) + .toString(); + } + + /** + * Builder for {@code ListPartsResponse}. + * + * @since 2.60.0 + */ + public static final class Builder { + private String bucket; + private String key; + private String uploadId; + private int partNumberMarker; + private int nextPartNumberMarker; + private int maxParts; + private boolean isTruncated; + private StorageClass storageClass; + private List parts; + + private Builder() {} + + /** + * Sets the bucket name. + * + * @param bucket The bucket name. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the object name. + * + * @param key The object name. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the upload ID. + * + * @param uploadId The upload ID. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the part number marker. + * + * @param partNumberMarker The part number marker. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder partNumberMarker(int partNumberMarker) { + this.partNumberMarker = partNumberMarker; + return this; + } + + /** + * Sets the next part number marker. + * + * @param nextPartNumberMarker The next part number marker. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder nextPartNumberMarker(int nextPartNumberMarker) { + this.nextPartNumberMarker = nextPartNumberMarker; + return this; + } + + /** + * Sets the maximum number of parts to return. + * + * @param maxParts The maximum number of parts to return. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder maxParts(int maxParts) { + this.maxParts = maxParts; + return this; + } + + /** + * Sets whether the response is truncated. + * + * @param isTruncated True if the response is truncated, false otherwise. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder truncated(boolean isTruncated) { + this.isTruncated = isTruncated; + return this; + } + + /** + * Sets the storage class of the object. + * + * @param storageClass The storage class of the object. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder storageClass(StorageClass storageClass) { + this.storageClass = storageClass; + return this; + } + + /** + * Sets the list of parts. + * + * @param parts The list of parts. + * @return The builder instance. + * @since 2.60.0 + */ + public Builder parts(List parts) { + this.parts = parts; + return this; + } + + /** + * Builds a {@code ListPartsResponse} object. + * + * @return A new {@code ListPartsResponse} instance. + * @since 2.60.0 + */ + public ListPartsResponse build() { + return new ListPartsResponse(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/MultipartUpload.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/MultipartUpload.java new file mode 100644 index 000000000000..0abe9cbdc90a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/MultipartUpload.java @@ -0,0 +1,205 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.google.cloud.storage.StorageClass; +import com.google.common.base.MoreObjects; +import java.time.OffsetDateTime; +import java.util.Objects; + +/** + * Represents a multipart upload that is in progress. + * + * @since 2.61.0 + */ +public final class MultipartUpload { + + @JacksonXmlProperty(localName = "Key") + private String key; + + @JacksonXmlProperty(localName = "UploadId") + private String uploadId; + + @JacksonXmlProperty(localName = "StorageClass") + private StorageClass storageClass; + + @JacksonXmlProperty(localName = "Initiated") + private OffsetDateTime initiated; + + private MultipartUpload() {} + + private MultipartUpload( + String key, String uploadId, StorageClass storageClass, OffsetDateTime initiated) { + this.key = key; + this.uploadId = uploadId; + this.storageClass = storageClass; + this.initiated = initiated; + } + + /** + * The object name for which the multipart upload was initiated. + * + * @return The object name. + * @since 2.61.0 + */ + public String key() { + return key; + } + + /** + * The ID of the multipart upload. + * + * @return The upload ID. + * @since 2.61.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * The storage class of the object. + * + * @return The storage class. + * @since 2.61.0 + */ + public StorageClass storageClass() { + return storageClass; + } + + /** + * The date and time at which the multipart upload was initiated. + * + * @return The initiation date and time. + * @since 2.61.0 + */ + public OffsetDateTime initiated() { + return initiated; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MultipartUpload that = (MultipartUpload) o; + return Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId) + && Objects.equals(storageClass, that.storageClass) + && Objects.equals(initiated, that.initiated); + } + + @Override + public int hashCode() { + return Objects.hash(key, uploadId, storageClass, initiated); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("key", key) + .add("uploadId", uploadId) + .add("storageClass", storageClass) + .add("initiated", initiated) + .toString(); + } + + /** + * Returns a new builder for this multipart upload. + * + * @return A new builder. + * @since 2.61.0 + */ + public static Builder newBuilder() { + return new Builder(); + } + + /** + * A builder for {@link MultipartUpload}. + * + * @since 2.61.0 + */ + public static final class Builder { + private String key; + private String uploadId; + private StorageClass storageClass; + private OffsetDateTime initiated; + + private Builder() {} + + /** + * Sets the object name for which the multipart upload was initiated. + * + * @param key The object name. + * @return This builder. + * @since 2.61.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the ID of the multipart upload. + * + * @param uploadId The upload ID. + * @return This builder. + * @since 2.61.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the storage class of the object. + * + * @param storageClass The storage class. + * @return This builder. + * @since 2.61.0 + */ + public Builder storageClass(StorageClass storageClass) { + this.storageClass = storageClass; + return this; + } + + /** + * Sets the date and time at which the multipart upload was initiated. + * + * @param initiated The initiation date and time. + * @return This builder. + * @since 2.61.0 + */ + public Builder initiated(OffsetDateTime initiated) { + this.initiated = initiated; + return this; + } + + /** + * Builds the multipart upload. + * + * @return The built multipart upload. + * @since 2.61.0 + */ + public MultipartUpload build() { + return new MultipartUpload(key, uploadId, storageClass, initiated); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ObjectLockMode.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ObjectLockMode.java new file mode 100644 index 000000000000..54ec1da6af41 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/ObjectLockMode.java @@ -0,0 +1,86 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.multipartupload.model; + +import com.google.api.core.ApiFunction; +import com.google.cloud.StringEnumType; +import com.google.cloud.StringEnumValue; + +/** + * Represents the object lock mode. See https://cloud.google.com/storage/docs/object-lock + * for details. + * + * @since 2.60.0 + */ +public final class ObjectLockMode extends StringEnumValue { + private static final long serialVersionUID = -1882734434792102329L; + + private ObjectLockMode(String constant) { + super(constant); + } + + private static final ApiFunction CONSTRUCTOR = ObjectLockMode::new; + + private static final StringEnumType type = + new StringEnumType<>(ObjectLockMode.class, CONSTRUCTOR); + + /** + * Governance mode. See https://cloud.google.com/storage/docs/object-lock + * for details. + * + * @since 2.60.0 + */ + public static final ObjectLockMode GOVERNANCE = type.createAndRegister("GOVERNANCE"); + + /** + * Compliance mode. See https://cloud.google.com/storage/docs/object-lock + * for details. + * + * @since 2.60.0 + */ + public static final ObjectLockMode COMPLIANCE = type.createAndRegister("COMPLIANCE"); + + /** + * Get the ObjectLockMode for the given String constant, and throw an exception if the constant is + * not recognized. + * + * @since 2.60.0 + */ + public static ObjectLockMode valueOfStrict(String constant) { + return type.valueOfStrict(constant); + } + + /** + * Get the ObjectLockMode for the given String constant, and allow unrecognized values. + * + * @since 2.60.0 + */ + public static ObjectLockMode valueOf(String constant) { + return type.valueOf(constant); + } + + /** + * Return the known values for ObjectLockMode. + * + * @since 2.60.0 + */ + public static ObjectLockMode[] values() { + return type.values(); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/Part.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/Part.java new file mode 100644 index 000000000000..8daa7874480d --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/Part.java @@ -0,0 +1,204 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.google.common.base.MoreObjects; +import java.time.OffsetDateTime; +import java.util.Objects; + +/** + * Represents a part of a multipart upload. + * + * @since 2.60.0 + */ +public final class Part { + + @JacksonXmlProperty(localName = "PartNumber") + private int partNumber; + + @JacksonXmlProperty(localName = "ETag") + private String eTag; + + @JacksonXmlProperty(localName = "Size") + private long size; + + @JacksonXmlProperty(localName = "LastModified") + private OffsetDateTime lastModified; + + // for jackson + private Part() {} + + private Part(Builder builder) { + this.partNumber = builder.partNumber; + this.eTag = builder.eTag; + this.size = builder.size; + this.lastModified = builder.lastModified; + } + + /** + * Returns the part number. + * + * @return the part number. + * @since 2.60.0 + */ + public int partNumber() { + return partNumber; + } + + /** + * Returns the ETag of the part. + * + * @return the ETag of the part. + * @since 2.60.0 + */ + public String eTag() { + return eTag; + } + + /** + * Returns the size of the part. + * + * @return the size of the part. + * @since 2.60.0 + */ + public long size() { + return size; + } + + /** + * Returns the last modified time of the part. + * + * @return the last modified time of the part. + * @since 2.60.0 + */ + public OffsetDateTime lastModified() { + return lastModified; + } + + /** + * Returns a new builder for this class. + * + * @return a new builder for this class. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Part)) { + return false; + } + Part that = (Part) o; + return Objects.equals(partNumber, that.partNumber) + && Objects.equals(eTag, that.eTag) + && Objects.equals(size, that.size) + && Objects.equals(lastModified, that.lastModified); + } + + @Override + public int hashCode() { + return Objects.hash(partNumber, eTag, size, lastModified); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("partNumber", partNumber) + .add("eTag", eTag) + .add("size", size) + .add("lastModified", lastModified) + .toString(); + } + + /** + * A builder for {@link Part}. + * + * @since 2.60.0 + */ + public static final class Builder { + private int partNumber; + private String eTag; + private long size; + private OffsetDateTime lastModified; + + private Builder() {} + + /** + * Sets the part number. + * + * @param partNumber the part number. + * @return this builder. + * @since 2.60.0 + */ + public Builder partNumber(int partNumber) { + this.partNumber = partNumber; + return this; + } + + /** + * Sets the ETag of the part. + * + * @param eTag the ETag of the part. + * @return this builder. + * @since 2.60.0 + */ + public Builder eTag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Sets the size of the part. + * + * @param size the size of the part. + * @return this builder. + * @since 2.60.0 + */ + public Builder size(long size) { + this.size = size; + return this; + } + + /** + * Sets the last modified time of the part. + * + * @param lastModified the last modified time of the part. + * @return this builder. + * @since 2.60.0 + */ + public Builder lastModified(OffsetDateTime lastModified) { + this.lastModified = lastModified; + return this; + } + + /** + * Builds a new {@link Part} object. + * + * @return a new {@link Part} object. + * @since 2.60.0 + */ + public Part build() { + return new Part(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartRequest.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartRequest.java new file mode 100644 index 000000000000..d063511c6bd2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartRequest.java @@ -0,0 +1,253 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.common.base.MoreObjects; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * An object to represent an upload part request. An upload part request is used to upload a single + * part of a multipart upload. + * + * @since 2.60.0 + */ +public final class UploadPartRequest { + + private final String bucket; + private final String key; + private final int partNumber; + private final String uploadId; + @Nullable private final String crc32c; + private final String userProject; + + private UploadPartRequest(Builder builder) { + this.bucket = builder.bucket; + this.key = builder.key; + this.partNumber = builder.partNumber; + this.uploadId = builder.uploadId; + this.crc32c = builder.crc32c; + this.userProject = builder.userProject; + } + + /** + * Returns the bucket to upload the part to. + * + * @return The bucket to upload the part to. + * @since 2.60.0 + */ + public String bucket() { + return bucket; + } + + /** + * Returns the key of the object to upload the part to. + * + * @return The key of the object to upload the part to. + * @since 2.60.0 + */ + public String key() { + return key; + } + + /** + * Returns the part number of the part to upload. + * + * @return The part number of the part to upload. + * @since 2.60.0 + */ + public int partNumber() { + return partNumber; + } + + /** + * Returns the upload ID of the multipart upload. + * + * @return The upload ID of the multipart upload. + * @since 2.60.0 + */ + public String uploadId() { + return uploadId; + } + + /** + * Returns the CRC32C checksum of the part to upload. + * + * @return The CRC32C checksum of the part to upload. + * @since 2.61.0 + */ + @Nullable + public String crc32c() { + return crc32c; + } + + /** + * Returns the user-project. + * + * @return the user-project. + * @see x-goog-user-project + * @since 2.61.0 + */ + public String userProject() { + return userProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof UploadPartRequest)) { + return false; + } + UploadPartRequest that = (UploadPartRequest) o; + return partNumber == that.partNumber + && Objects.equals(bucket, that.bucket) + && Objects.equals(key, that.key) + && Objects.equals(uploadId, that.uploadId) + && Objects.equals(crc32c, that.crc32c) + && Objects.equals(userProject, that.userProject); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, key, partNumber, uploadId, crc32c, userProject); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bucket", bucket) + .add("key", key) + .add("partNumber", partNumber) + .add("uploadId", uploadId) + .add("crc32c", crc32c) + .add("userProject", userProject) + .toString(); + } + + /** + * Returns a new builder for an {@link UploadPartRequest}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@link UploadPartRequest}. + * + * @since 2.60.0 + */ + public static class Builder { + private String bucket; + private String key; + private int partNumber; + private String uploadId; + @Nullable private String crc32c; + private String userProject; + + private Builder() {} + + /** + * Sets the bucket to upload the part to. + * + * @param bucket The bucket to upload the part to. + * @return This builder. + * @since 2.60.0 + */ + public Builder bucket(String bucket) { + this.bucket = bucket; + return this; + } + + /** + * Sets the key of the object to upload the part to. + * + * @param key The key of the object to upload the part to. + * @return This builder. + * @since 2.60.0 + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * Sets the part number of the part to upload. + * + * @param partNumber The part number of the part to upload. + * @return This builder. + * @since 2.60.0 + */ + public Builder partNumber(int partNumber) { + this.partNumber = partNumber; + return this; + } + + /** + * Sets the upload ID of the multipart upload. + * + * @param uploadId The upload ID of the multipart upload. + * @return This builder. + * @since 2.60.0 + */ + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + /** + * Sets the CRC32C checksum of the part to upload. + * + * @param crc32c The CRC32C checksum of the part to upload. + * @return This builder. + * @since 2.61.0 + */ + public Builder crc32c(@Nullable String crc32c) { + this.crc32c = crc32c; + return this; + } + + /** + * Sets the user-project. + * + * @param userProject The user-project. + * @return This builder. + * @see x-goog-user-project + * @since 2.61.0 + */ + public Builder userProject(String userProject) { + this.userProject = userProject; + return this; + } + + /** + * Builds the {@link UploadPartRequest}. + * + * @return The built {@link UploadPartRequest}. + * @since 2.60.0 + */ + public UploadPartRequest build() { + return new UploadPartRequest(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartResponse.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartResponse.java new file mode 100644 index 000000000000..97d9e4276f9e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/multipartupload/model/UploadPartResponse.java @@ -0,0 +1,166 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.multipartupload.model; + +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Represents the response from uploading a part in a multipart upload. It contains the ETag and + * checksums of the uploaded part. + * + * @since 2.60.0 + */ +public final class UploadPartResponse { + + private final String eTag; + private final String md5; + private final String crc32c; + + private UploadPartResponse(Builder builder) { + this.eTag = builder.etag; + this.md5 = builder.md5; + this.crc32c = builder.crc32c; + } + + /** + * Returns the ETag of the uploaded part. + * + * @return The ETag. + * @since 2.60.0 + */ + public String eTag() { + return eTag; + } + + /** + * Returns the MD5 hash of the uploaded part. + * + * @return The MD5 hash. + * @since 2.60.0 + */ + public String md5() { + return md5; + } + + /** + * Returns the CRC32C checksum of the uploaded part. + * + * @return The CRC32C checksum. + * @since 2.61.0 + */ + public String crc32c() { + return crc32c; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof UploadPartResponse)) { + return false; + } + UploadPartResponse that = (UploadPartResponse) o; + return Objects.equals(eTag, that.eTag) + && Objects.equals(md5, that.md5) + && Objects.equals(crc32c, that.crc32c); + } + + @Override + public int hashCode() { + return Objects.hash(eTag, md5, crc32c); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("etag", eTag) + .add("md5", md5) + .add("crc32c", crc32c) + .toString(); + } + + /** + * Creates a new builder for creating an {@code UploadPartResponse}. + * + * @return A new builder. + * @since 2.60.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for creating {@code UploadPartResponse} instances. + * + * @since 2.60.0 + */ + public static class Builder { + private String etag; + private String md5; + private String crc32c; + + private Builder() {} + + /** + * Sets the ETag for the uploaded part. + * + * @param etag The ETag. + * @return This builder. + * @since 2.60.0 + */ + public Builder eTag(String etag) { + this.etag = etag; + return this; + } + + /** + * Sets the MD5 hash for the uploaded part. + * + * @param md5 The MD5 hash. + * @return This builder. + * @since 2.60.0 + */ + public Builder md5(String md5) { + this.md5 = md5; + return this; + } + + /** + * Sets the CRC32C checksum for the uploaded part. + * + * @param crc32c The CRC32C checksum. + * @return This builder. + * @since 2.61.0 + */ + public Builder crc32c(String crc32c) { + this.crc32c = crc32c; + return this; + } + + /** + * Builds the {@code UploadPartResponse} object. + * + * @return The built {@code UploadPartResponse} object. + * @since 2.60.0 + */ + public UploadPartResponse build() { + return new UploadPartResponse(this); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/package-info.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/package-info.java new file mode 100644 index 000000000000..e6d0ce4ae356 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/package-info.java @@ -0,0 +1,106 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client for Cloud Storage - Unified object storage. + * + *

Here's a simple usage example the Java Storage client. This example shows how to create a + * Storage object. + * + *

{@code
+ * Storage storage = StorageOptions.getDefaultInstance().getService();
+ * BlobId blobId = BlobId.of("bucket", "blob_name");
+ * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+ * Blob blob = storage.create(blobInfo, "Hello, Cloud Storage!".getBytes(UTF_8));
+ * }
+ * + *

This second example shows how to update an object's content if the object exists. + * + *

{@code
+ * Storage storage = StorageOptions.getDefaultInstance().getService();
+ * BlobId blobId = BlobId.of("bucket", "blob_name");
+ * Blob blob = storage.get(blobId);
+ * if (blob != null) {
+ *   byte[] prevContent = blob.getContent();
+ *   System.out.println(new String(prevContent, UTF_8));
+ *   WritableByteChannel channel = blob.writer();
+ *   channel.write(ByteBuffer.wrap("Updated content".getBytes(UTF_8)));
+ *   channel.close();
+ * }
+ * }
+ * + *

For more detailed code examples, see the sample library. + * + *

When using google-cloud from outside of App/Compute Engine, you have to specify a project + * ID and provide + * credentials. + * + *

Operations in this library are generally thread safe, except for the use of + * BlobReadChannel and + * BlobWriteChannel. + * + *

The GCS Java client library includes support to GCS via gRPC. When using GCS from Google + * Compute Engine (GCE) this library enable higher total throughput across large workloads that run + * on hundreds or thousands of VMs. + * + *

At present, GCS gRPC is GA with Allowlist. To access this API, kindly contact the Google Cloud + * Storage gRPC team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to + * Allowlist. Please note that while the **service** is GA (with Allowlist), the client library + * features remain experimental and subject to change without notice. The methods to create, list, + * query, and delete HMAC keys and notifications are unavailable in gRPC transport. + * + *

This example shows how to enable gRPC with Direct Google Access only supported on Google + * Compute Engine. + * + *

{@code
+ * StorageOptions options = StorageOptions.grpc().setAttemptDirectPath(true).build();
+ * try (Storage storage = options.getService()) {
+ * BlobId blobId = BlobId.of("bucket", "blob_name");
+ * Blob blob = storage.get(blobId);
+ * if (blob != null) {
+ *   byte[] prevContent = blob.getContent();
+ *   System.out.println(new String(prevContent, UTF_8));
+ *   WritableByteChannel channel = blob.writer();
+ *   channel.write(ByteBuffer.wrap("Updated content".getBytes(UTF_8)));
+ *   channel.close();
+ * }
+ * }
+ * }
+ * + *

This example shows how to enable gRPC. + * + *

{@code
+ * StorageOptions options = StorageOptions.grpc().build();
+ * try (Storage storage = options.getService()) {
+ * BlobId blobId = BlobId.of("bucket", "blob_name");
+ * Blob blob = storage.get(blobId);
+ * if (blob != null) {
+ *   byte[] prevContent = blob.getContent();
+ *   System.out.println(new String(prevContent, UTF_8));
+ *   WritableByteChannel channel = blob.writer();
+ *   channel.write(ByteBuffer.wrap("Updated content".getBytes(UTF_8)));
+ *   channel.close();
+ * }
+ * }
+ * }
+ * + * @see Google Cloud Storage + */ +package com.google.cloud.storage; diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/StorageRpcFactory.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/StorageRpcFactory.java new file mode 100644 index 000000000000..e93cd55e7fe9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/StorageRpcFactory.java @@ -0,0 +1,26 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi; + +import com.google.cloud.spi.ServiceRpcFactory; +import com.google.cloud.storage.StorageOptions; + +/** + * An interface for Storage RPC factory. Implementation will be loaded via {@link + * java.util.ServiceLoader}. + */ +public interface StorageRpcFactory extends ServiceRpcFactory {} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpRpcContext.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpRpcContext.java new file mode 100644 index 000000000000..a62e7f99d428 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpRpcContext.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import com.google.api.core.InternalApi; +import java.util.UUID; +import java.util.function.Supplier; +import javax.annotation.Nullable; + +@InternalApi +public final class HttpRpcContext { + + private static final Object GET_INSTANCE_LOCK = new Object(); + + private static volatile HttpRpcContext instance; + + private final ThreadLocal invocationId; + private final Supplier supplier; + + HttpRpcContext(Supplier randomUUID) { + this.invocationId = new InheritableThreadLocal<>(); + this.supplier = randomUUID; + } + + @InternalApi + @Nullable + public UUID getInvocationId() { + return invocationId.get(); + } + + @InternalApi + public UUID newInvocationId() { + invocationId.set(supplier.get()); + return getInvocationId(); + } + + @InternalApi + public void clearInvocationId() { + invocationId.remove(); + } + + @InternalApi + public static HttpRpcContext init() { + return new HttpRpcContext(UUID::randomUUID); + } + + @InternalApi + public static HttpRpcContext getInstance() { + if (instance == null) { + synchronized (GET_INSTANCE_LOCK) { + if (instance == null) { + instance = init(); + } + } + } + return instance; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java new file mode 100644 index 000000000000..5f910fb7775a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java @@ -0,0 +1,1984 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; + +import com.google.api.client.googleapis.batch.BatchRequest; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.media.MediaHttpDownloader; +import com.google.api.client.http.ByteArrayContent; +import com.google.api.client.http.EmptyContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpExecuteInterceptor; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestFactory; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.client.http.HttpStatusCodes; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.InputStreamContent; +import com.google.api.client.http.json.JsonHttpContent; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.util.Data; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.Storage.BucketAccessControls; +import com.google.api.services.storage.Storage.Buckets; +import com.google.api.services.storage.Storage.Buckets.LockRetentionPolicy; +import com.google.api.services.storage.Storage.Buckets.SetIamPolicy; +import com.google.api.services.storage.Storage.Buckets.TestIamPermissions; +import com.google.api.services.storage.Storage.DefaultObjectAccessControls; +import com.google.api.services.storage.Storage.Notifications; +import com.google.api.services.storage.Storage.ObjectAccessControls; +import com.google.api.services.storage.Storage.Objects.Compose; +import com.google.api.services.storage.Storage.Objects.Delete; +import com.google.api.services.storage.Storage.Objects.Get; +import com.google.api.services.storage.Storage.Objects.Insert; +import com.google.api.services.storage.Storage.Objects.Move; +import com.google.api.services.storage.Storage.Objects.Patch; +import com.google.api.services.storage.Storage.Projects; +import com.google.api.services.storage.Storage.Projects.HmacKeys; +import com.google.api.services.storage.Storage.Projects.HmacKeys.Create; +import com.google.api.services.storage.Storage.Projects.HmacKeys.Update; +import com.google.api.services.storage.StorageRequest; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.Bucket.RetentionPolicy; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.ComposeRequest; +import com.google.api.services.storage.model.ComposeRequest.SourceObjects.ObjectPreconditions; +import com.google.api.services.storage.model.HmacKey; +import com.google.api.services.storage.model.HmacKeyMetadata; +import com.google.api.services.storage.model.HmacKeysMetadata; +import com.google.api.services.storage.model.Notification; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.Objects; +import com.google.api.services.storage.model.Policy; +import com.google.api.services.storage.model.ServiceAccount; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.TestIamPermissionsResponse; +import com.google.cloud.Tuple; +import com.google.cloud.http.CensusHttpModule; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import io.opencensus.common.Scope; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.Span; +import io.opencensus.trace.Status; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracing; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.math.BigInteger; +import java.net.FileNameMap; +import java.net.URLConnection; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.UUID; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nullable; + +public class HttpStorageRpc implements StorageRpc { + public static final String DEFAULT_PROJECTION = "full"; + public static final String NO_ACL_PROJECTION = "noAcl"; + private static final String ENCRYPTION_KEY_PREFIX = "x-goog-encryption-"; + private static final String SOURCE_ENCRYPTION_KEY_PREFIX = "x-goog-copy-source-encryption-"; + + // declare this HttpStatus code here as it's not included in java.net.HttpURLConnection + private static final int SC_REQUESTED_RANGE_NOT_SATISFIABLE = 416; + private static final boolean IS_RECORD_EVENTS = true; + private static final String X_GOOG_GCS_IDEMPOTENCY_TOKEN = "x-goog-gcs-idempotency-token"; + + private final StorageOptions options; + private final Storage storage; + private final Tracer tracer = Tracing.getTracer(); + private final HttpRequestInitializer batchRequestInitializer; + + private static final long MEGABYTE = 1024L * 1024L; + private static final FileNameMap FILE_NAME_MAP = URLConnection.getFileNameMap(); + + public HttpStorageRpc(StorageOptions options) { + this(options, new JacksonFactory()); + } + + public HttpStorageRpc(StorageOptions options, JsonFactory jsonFactory) { + HttpTransportOptions transportOptions = (HttpTransportOptions) options.getTransportOptions(); + HttpTransport transport = transportOptions.getHttpTransportFactory().create(); + HttpRequestInitializer initializer = transportOptions.getHttpRequestInitializer(options); + this.options = options; + + boolean isTm = + Arrays.stream(Thread.currentThread().getStackTrace()) + .anyMatch( + ste -> ste.getClassName().startsWith("com.google.cloud.storage.transfermanager")); + String tm = isTm ? "gccl-gcs-cmd/tm" : null; + + // Open Census initialization + String applicationName = options.getApplicationName(); + CensusHttpModule censusHttpModule = new CensusHttpModule(tracer, IS_RECORD_EVENTS); + initializer = censusHttpModule.getHttpRequestInitializer(initializer); + initializer = new InvocationIdInitializer(initializer, applicationName, tm); + batchRequestInitializer = censusHttpModule.getHttpRequestInitializer(null); + storage = + new Storage.Builder(transport, jsonFactory, initializer) + .setRootUrl(options.getHost()) + .setApplicationName(applicationName) + .build(); + } + + public Storage getStorage() { + return storage; + } + + private static final class InvocationIdInitializer implements HttpRequestInitializer { + @Nullable HttpRequestInitializer initializer; + @Nullable private final String applicationName; + @Nullable private final String tm; + + private InvocationIdInitializer( + @Nullable HttpRequestInitializer initializer, @Nullable String applicationName, String tm) { + this.initializer = initializer; + this.applicationName = applicationName; + this.tm = tm; + } + + @Override + public void initialize(HttpRequest request) throws IOException { + checkNotNull(request); + if (this.initializer != null) { + this.initializer.initialize(request); + } + request.setInterceptor( + new InvocationIdInterceptor(request.getInterceptor(), applicationName, tm)); + } + } + + private static final class InvocationIdInterceptor implements HttpExecuteInterceptor { + + private static final Collector JOINER = Collectors.joining(" "); + @Nullable private final HttpExecuteInterceptor interceptor; + @Nullable private final String applicationName; + + @Nullable private final String tm; + + private InvocationIdInterceptor( + @Nullable HttpExecuteInterceptor interceptor, @Nullable String applicationName, String tm) { + this.interceptor = interceptor; + this.applicationName = applicationName; + this.tm = tm; + } + + @Override + public void intercept(HttpRequest request) throws IOException { + checkNotNull(request); + if (this.interceptor != null) { + this.interceptor.intercept(request); + } + HttpRpcContext httpRpcContext = HttpRpcContext.getInstance(); + UUID invocationId = httpRpcContext.getInvocationId(); + final String signatureKey = "Signature="; // For V2 and V4 signedURLs + final String builtURL = request.getUrl().build(); + if (invocationId != null && !builtURL.contains(signatureKey)) { + HttpHeaders headers = request.getHeaders(); + String existing = (String) headers.get("x-goog-api-client"); + String invocationEntry = "gccl-invocation-id/" + invocationId; + final String newValue = + Stream.of(existing, invocationEntry, tm) + .filter(java.util.Objects::nonNull) + .collect(JOINER); + headers.set("x-goog-api-client", newValue); + headers.set(X_GOOG_GCS_IDEMPOTENCY_TOKEN, invocationId); + + String userAgent = headers.getUserAgent(); + if ((userAgent == null + || userAgent.isEmpty() + || (applicationName != null && !userAgent.contains(applicationName)))) { + headers.setUserAgent(applicationName); + } + } + } + } + + private class DefaultRpcBatch implements RpcBatch { + + // Batch size is limited as, due to some current service implementation details, the service + // performs better if the batches are split for better distribution. See + // https://github.com/googleapis/google-cloud-java/pull/952#issuecomment-213466772 for + // background. + private static final int MAX_BATCH_SIZE = 100; + + private final Storage storage; + private final LinkedList batches; + private int currentBatchSize; + + private DefaultRpcBatch(Storage storage) { + this.storage = storage; + batches = new LinkedList<>(); + // add OpenCensus HttpRequestInitializer + batches.add(storage.batch(batchRequestInitializer)); + } + + @Override + public void addDelete( + StorageObject storageObject, RpcBatch.Callback callback, Map options) { + try { + if (currentBatchSize == MAX_BATCH_SIZE) { + batches.add(storage.batch()); + currentBatchSize = 0; + } + Delete call = deleteCall(storageObject, options); + addIdempotencyTokenToCall(call); + call.queue(batches.getLast(), toJsonCallback(callback)); + currentBatchSize++; + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public void addPatch( + StorageObject storageObject, + RpcBatch.Callback callback, + Map options) { + try { + if (currentBatchSize == MAX_BATCH_SIZE) { + batches.add(storage.batch()); + currentBatchSize = 0; + } + Patch call = patchCall(storageObject, options); + addIdempotencyTokenToCall(call); + call.queue(batches.getLast(), toJsonCallback(callback)); + currentBatchSize++; + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public void addGet( + StorageObject storageObject, + RpcBatch.Callback callback, + Map options) { + try { + if (currentBatchSize == MAX_BATCH_SIZE) { + batches.add(storage.batch()); + currentBatchSize = 0; + } + Get call = getCall(storageObject, options); + addIdempotencyTokenToCall(call); + call.queue(batches.getLast(), toJsonCallback(callback)); + currentBatchSize++; + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public void submit() { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_BATCH_SUBMIT); + Scope scope = tracer.withSpan(span); + try { + span.putAttribute("batch size", AttributeValue.longAttributeValue(batches.size())); + for (BatchRequest batch : batches) { + // TODO(hailongwen@): instrument 'google-api-java-client' to further break down the span. + // Here we only add a annotation to at least know how much time each batch takes. + span.addAnnotation("Execute batch request"); + batch.setBatchUrl( + new GenericUrl(String.format(Locale.US, "%s/batch/storage/v1", options.getHost()))); + batch.execute(); + } + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private void addIdempotencyTokenToCall(StorageRequest call) { + HttpRpcContext instance = HttpRpcContext.getInstance(); + call.getRequestHeaders().set(X_GOOG_GCS_IDEMPOTENCY_TOKEN, instance.newInvocationId()); + instance.clearInvocationId(); + } + } + + private static JsonBatchCallback toJsonCallback(final RpcBatch.Callback callback) { + return new JsonBatchCallback() { + @Override + public void onSuccess(T response, HttpHeaders httpHeaders) throws IOException { + callback.onSuccess(response); + } + + @Override + public void onFailure(GoogleJsonError googleJsonError, HttpHeaders httpHeaders) + throws IOException { + callback.onFailure(googleJsonError); + } + }; + } + + private static StorageException translate(IOException exception) { + return StorageException.translate(exception); + } + + private static StorageException translate(GoogleJsonError exception) { + return new StorageException(exception); + } + + private static void setEncryptionHeaders( + HttpHeaders headers, String headerPrefix, Map options) { + String key = Option.CUSTOMER_SUPPLIED_KEY.getString(options); + if (key != null) { + BaseEncoding base64 = BaseEncoding.base64(); + HashFunction hashFunction = Hashing.sha256(); + headers.set(headerPrefix + "algorithm", "AES256"); + headers.set(headerPrefix + "key", key); + headers.set( + headerPrefix + "key-sha256", + base64.encode(hashFunction.hashBytes(base64.decode(key)).asBytes())); + } + } + + /** Helper method to start a span. */ + private Span startSpan(String spanName) { + return tracer.spanBuilder(spanName).setRecordEvents(IS_RECORD_EVENTS).startSpan(); + } + + @Override + public Bucket create(Bucket bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_BUCKET); + Scope scope = tracer.withSpan(span); + try { + Storage.Buckets.Insert insert = + storage + .buckets() + .insert(this.options.getProjectId(), bucket) + .setProjection(DEFAULT_PROJECTION) + .setPredefinedAcl(Option.PREDEFINED_ACL.getString(options)) + .setPredefinedDefaultObjectAcl( + Option.PREDEFINED_DEFAULT_OBJECT_ACL.getString(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setEnableObjectRetention(Option.ENABLE_OBJECT_RETENTION.getBoolean(options)); + setExtraHeaders(insert, options); + return insert.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public StorageObject create( + StorageObject storageObject, final InputStream content, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_OBJECT); + Scope scope = tracer.withSpan(span); + try { + Storage.Objects.Insert insert = + storage + .objects() + .insert( + storageObject.getBucket(), + storageObject, + new InputStreamContent(detectContentType(storageObject, options), content)); + insert.getMediaHttpUploader().setDirectUploadEnabled(true); + Boolean disableGzipContent = Option.IF_DISABLE_GZIP_CONTENT.getBoolean(options); + if (disableGzipContent != null) { + insert.setDisableGZipContent(disableGzipContent); + } + setEncryptionHeaders(insert.getRequestHeaders(), ENCRYPTION_KEY_PREFIX, options); + setExtraHeaders(insert, options); + return insert + .setProjection(DEFAULT_PROJECTION) + .setPredefinedAcl(Option.PREDEFINED_ACL.getString(options)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setKmsKeyName(Option.KMS_KEY_NAME.getString(options)) + .execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Tuple> list(Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_BUCKETS); + Scope scope = tracer.withSpan(span); + try { + Storage.Buckets.List list = + storage + .buckets() + .list(this.options.getProjectId()) + .setProjection(DEFAULT_PROJECTION) + .setPrefix(Option.PREFIX.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setReturnPartialSuccess(Option.RETURN_PARTIAL_SUCCESS.getBoolean(options)) + .setFields(Option.FIELDS.getString(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(list, options); + com.google.api.services.storage.model.Buckets bucketList = list.execute(); + Iterable buckets = + Iterables.concat( + firstNonNull(bucketList.getItems(), ImmutableList.of()), + bucketList.getUnreachable() != null + ? Lists.transform(bucketList.getUnreachable(), createUnreachableBucket()) + : ImmutableList.of()); + return Tuple.>of(bucketList.getNextPageToken(), buckets); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Tuple> list(final String bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_OBJECTS); + Scope scope = tracer.withSpan(span); + try { + Storage.Objects.List list = + storage + .objects() + .list(bucket) + .setProjection(DEFAULT_PROJECTION) + .setVersions(Option.VERSIONS.getBoolean(options)) + .setDelimiter(Option.DELIMITER.getString(options)) + .setStartOffset(Option.START_OFF_SET.getString(options)) + .setEndOffset(Option.END_OFF_SET.getString(options)) + .setMatchGlob(Option.MATCH_GLOB.getString(options)) + .setPrefix(Option.PREFIX.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setSoftDeleted(Option.SOFT_DELETED.getBoolean(options)) + .setIncludeFoldersAsPrefixes(Option.INCLUDE_FOLDERS_AS_PREFIXES.getBoolean(options)) + .setIncludeTrailingDelimiter(Option.INCLUDE_TRAILING_DELIMITER.getBoolean(options)) + .setFilter(Option.OBJECT_FILTER.getString(options)); + setExtraHeaders(list, options); + Objects objects = list.execute(); + Iterable storageObjects = + Iterables.concat( + firstNonNull(objects.getItems(), ImmutableList.of()), + objects.getPrefixes() != null + ? Lists.transform(objects.getPrefixes(), objectFromPrefix(bucket)) + : ImmutableList.of()); + return Tuple.of(objects.getNextPageToken(), storageObjects); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private static String detectContentType(StorageObject object, Map options) { + String contentType = object.getContentType(); + if (contentType != null) { + return contentType; + } + + if (Boolean.TRUE == Option.DETECT_CONTENT_TYPE.get(options)) { + contentType = FILE_NAME_MAP.getContentTypeFor(object.getName().toLowerCase(Locale.US)); + } + + return firstNonNull(contentType, "application/octet-stream"); + } + + private static Function createUnreachableBucket() { + return bucketName -> new Bucket().setName(bucketName).set("isUnreachable", "true"); + } + + private static Function objectFromPrefix(final String bucket) { + return new Function() { + @Override + public StorageObject apply(String prefix) { + return new StorageObject() + .set("isDirectory", true) + .setBucket(bucket) + .setName(prefix) + .setSize(BigInteger.ZERO); + } + }; + } + + @Override + public Bucket get(Bucket bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_BUCKET); + Scope scope = tracer.withSpan(span); + try { + Storage.Buckets.Get get = + storage + .buckets() + .get(bucket.getName()) + .setProjection(DEFAULT_PROJECTION) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setFields(Option.FIELDS.getString(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(get, options); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private Storage.Objects.Get getCall(StorageObject object, Map options) + throws IOException { + Storage.Objects.Get get = storage.objects().get(object.getBucket(), object.getName()); + setEncryptionHeaders(get.getRequestHeaders(), ENCRYPTION_KEY_PREFIX, options); + setExtraHeaders(get, options); + return get.setGeneration(object.getGeneration()) + .setProjection(DEFAULT_PROJECTION) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setFields(Option.FIELDS.getString(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setSoftDeleted(Option.SOFT_DELETED.getBoolean(options)); + } + + @Override + public StorageObject get(StorageObject object, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_OBJECT); + Scope scope = tracer.withSpan(span); + try { + return getCall(object, options).execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public StorageObject restore(StorageObject object, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_RESTORE_OBJECT); + Scope scope = tracer.withSpan(span); + try { + Storage.Objects.Restore restore = + storage.objects().restore(object.getBucket(), object.getName(), object.getGeneration()); + setExtraHeaders(restore, options); + return restore + .setProjection(DEFAULT_PROJECTION) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setCopySourceAcl(Option.COPY_SOURCE_ACL.getBoolean(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Bucket patch(Bucket bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_PATCH_BUCKET); + Scope scope = tracer.withSpan(span); + try { + RetentionPolicy retentionPolicy = bucket.getRetentionPolicy(); + if (retentionPolicy != null) { + // according to https://cloud.google.com/storage/docs/json_api/v1/buckets both effectiveTime + // and isLocked are output_only. If retentionPeriod is null, null out the whole + // RetentionPolicy. + if (retentionPolicy.getRetentionPeriod() == null) { + // Using Data.nullOf here is important here so the null value is written into the request + // json. The explicit null values tells the backend to remove the policy. + bucket.setRetentionPolicy(Data.nullOf(RetentionPolicy.class)); + } + } + + String projection = Option.PROJECTION.getString(options); + if (bucket.getIamConfiguration() != null + && bucket.getIamConfiguration().getBucketPolicyOnly() != null + && bucket.getIamConfiguration().getBucketPolicyOnly().getEnabled() != null + && bucket.getIamConfiguration().getBucketPolicyOnly().getEnabled()) { + // If BucketPolicyOnly is enabled, patch calls will fail if ACL information is included in + // the request + bucket.setDefaultObjectAcl(null); + bucket.setAcl(null); + + if (projection == null) { + projection = NO_ACL_PROJECTION; + } + } + Buckets.Patch patch = + storage + .buckets() + .patch(bucket.getName(), bucket) + .setProjection(projection == null ? DEFAULT_PROJECTION : projection) + .setPredefinedAcl(Option.PREDEFINED_ACL.getString(options)) + .setPredefinedDefaultObjectAcl( + Option.PREDEFINED_DEFAULT_OBJECT_ACL.getString(options)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(patch, options); + return patch.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private Storage.Objects.Patch patchCall(StorageObject storageObject, Map options) + throws IOException { + Storage.Objects.Patch patch = + storage + .objects() + .patch(storageObject.getBucket(), storageObject.getName(), storageObject) + .setProjection(DEFAULT_PROJECTION) + .setPredefinedAcl(Option.PREDEFINED_ACL.getString(options)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setOverrideUnlockedRetention(Option.OVERRIDE_UNLOCKED_RETENTION.getBoolean(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(patch, options); + return patch; + } + + @Override + public StorageObject patch(StorageObject storageObject, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_PATCH_OBJECT); + Scope scope = tracer.withSpan(span); + try { + return patchCall(storageObject, options).execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public boolean delete(Bucket bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_BUCKET); + Scope scope = tracer.withSpan(span); + try { + Buckets.Delete delete = + storage + .buckets() + .delete(bucket.getName()) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(delete, options); + delete.execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private Storage.Objects.Delete deleteCall(StorageObject blob, Map options) + throws IOException { + Storage.Objects.Delete delete = + storage + .objects() + .delete(blob.getBucket(), blob.getName()) + .setGeneration(blob.getGeneration()) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(delete, options); + return delete; + } + + @Override + public boolean delete(StorageObject blob, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_OBJECT); + Scope scope = tracer.withSpan(span); + try { + deleteCall(blob, options).execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public StorageObject compose( + Iterable sources, StorageObject target, Map targetOptions) { + ComposeRequest request = new ComposeRequest(); + request.setDestination(target); + List sourceObjects = new ArrayList<>(); + for (StorageObject source : sources) { + ComposeRequest.SourceObjects sourceObject = new ComposeRequest.SourceObjects(); + sourceObject.setName(source.getName()); + Long generation = source.getGeneration(); + if (generation != null) { + sourceObject.setGeneration(generation); + sourceObject.setObjectPreconditions( + new ObjectPreconditions().setIfGenerationMatch(generation)); + } + sourceObjects.add(sourceObject); + } + request.setSourceObjects(sourceObjects); + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_COMPOSE); + Scope scope = tracer.withSpan(span); + try { + Compose compose = + storage + .objects() + .compose(target.getBucket(), target.getName(), request) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(targetOptions)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(targetOptions)) + .setUserProject(Option.USER_PROJECT.getString(targetOptions)); + setEncryptionHeaders(compose.getRequestHeaders(), ENCRYPTION_KEY_PREFIX, targetOptions); + setExtraHeaders(compose, targetOptions); + return compose.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public byte[] load(StorageObject from, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LOAD); + Scope scope = tracer.withSpan(span); + try { + Storage.Objects.Get getRequest = + storage + .objects() + .get(from.getBucket(), from.getName()) + .setGeneration(from.getGeneration()) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setEncryptionHeaders(getRequest.getRequestHeaders(), ENCRYPTION_KEY_PREFIX, options); + setExtraHeaders(getRequest, options); + if (Option.RETURN_RAW_INPUT_STREAM.getBoolean(options) != null) { + getRequest.setReturnRawInputStream(Option.RETURN_RAW_INPUT_STREAM.getBoolean(options)); + } + ByteArrayOutputStream out = new ByteArrayOutputStream(); + getRequest.executeMedia().download(out); + return out.toByteArray(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public RpcBatch createBatch() { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_BATCH); + Scope scope = tracer.withSpan(span); + try { + return new DefaultRpcBatch(storage); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private Get createReadRequest(StorageObject from, Map options) throws IOException { + Get req = + storage + .objects() + .get(from.getBucket(), from.getName()) + .setGeneration(from.getGeneration()) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setEncryptionHeaders(req.getRequestHeaders(), ENCRYPTION_KEY_PREFIX, options); + setExtraHeaders(req, options); + return req; + } + + @Override + public long read( + StorageObject from, Map options, long position, OutputStream outputStream) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_READ); + Scope scope = tracer.withSpan(span); + try { + Get req = createReadRequest(from, options); + Boolean shouldReturnRawInputStream = Option.RETURN_RAW_INPUT_STREAM.getBoolean(options); + if (shouldReturnRawInputStream != null) { + req.setReturnRawInputStream(shouldReturnRawInputStream); + } else { + req.setReturnRawInputStream(false); + } + + if (position > 0) { + req.getRequestHeaders().setRange(String.format(Locale.US, "bytes=%d-", position)); + } + MediaHttpDownloader mediaHttpDownloader = req.getMediaHttpDownloader(); + mediaHttpDownloader.setDirectDownloadEnabled(true); + req.executeMedia().download(outputStream); + return mediaHttpDownloader.getNumBytesDownloaded(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == SC_REQUESTED_RANGE_NOT_SATISFIABLE) { + return 0; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Tuple read( + StorageObject from, Map options, long position, int bytes) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_READ); + Scope scope = tracer.withSpan(span); + try { + checkArgument(position >= 0, "Position should be non-negative, is " + position); + Get req = createReadRequest(from, options); + Boolean shouldReturnRawInputStream = Option.RETURN_RAW_INPUT_STREAM.getBoolean(options); + if (shouldReturnRawInputStream != null) { + req.setReturnRawInputStream(shouldReturnRawInputStream); + } else { + req.setReturnRawInputStream(true); + } + StringBuilder range = new StringBuilder(); + range.append("bytes=").append(position).append("-").append(position + bytes - 1); + HttpHeaders requestHeaders = req.getRequestHeaders(); + requestHeaders.setRange(range.toString()); + ByteArrayOutputStream output = new ByteArrayOutputStream(bytes); + req.executeMedia().download(output); + String etag = req.getLastResponseHeaders().getETag(); + return Tuple.of(etag, output.toByteArray()); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = StorageException.translate(ex); + if (serviceException.getCode() == SC_REQUESTED_RANGE_NOT_SATISFIABLE) { + return Tuple.of(null, new byte[0]); + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public void write( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last) { + writeWithResponse(uploadId, toWrite, toWriteOffset, destOffset, length, last); + } + + @Override + public long getCurrentUploadOffset(String uploadId) { + try { + GenericUrl url = new GenericUrl(uploadId); + HttpRequest httpRequest = + storage.getRequestFactory().buildPutRequest(url, new EmptyContent()); + + httpRequest.getHeaders().setContentRange("bytes */*"); + // Turn off automatic redirects. + // HTTP 308 are returned if upload is incomplete. + // See: https://cloud.google.com/storage/docs/performing-resumable-uploads + httpRequest.setFollowRedirects(false); + + HttpResponse response = null; + try { + response = httpRequest.execute(); + int code = response.getStatusCode(); + if (HttpStatusCodes.isSuccess(code)) { + // Upload completed successfully + return -1; + } + StringBuilder sb = new StringBuilder(); + sb.append("Not sure what occurred. Here's debugging information:\n"); + sb.append("Response:\n").append(response.toString()).append("\n\n"); + throw new StorageException(0, sb.toString()); + } catch (HttpResponseException ex) { + int code = ex.getStatusCode(); + if (code == 308) { + if (ex.getHeaders().getRange() == null) { + // No progress has been made. + return 0; + } + // API returns last byte received offset + String range = ex.getHeaders().getRange(); + // Return next byte offset by adding 1 to last byte received offset + return Long.parseLong(range.substring(range.indexOf("-") + 1)) + 1; + } else { + // Something else occurred like a 5xx so translate and throw. + throw translate(ex); + } + } finally { + if (response != null) { + response.disconnect(); + } + } + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes) { + try { + GenericUrl url = new GenericUrl(uploadId); + HttpRequest req = storage.getRequestFactory().buildPutRequest(url, new EmptyContent()); + req.getHeaders().setContentRange(String.format(Locale.US, "bytes */%s", totalBytes)); + req.setParser(storage.getObjectParser()); + HttpResponse response = req.execute(); + // If the response is 200 + if (response.getStatusCode() == 200) { + return response.parseAs(StorageObject.class); + } else { + throw buildStorageException(response.getStatusCode(), response.getStatusMessage()); + } + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public StorageObject writeWithResponse( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_WRITE); + Scope scope = tracer.withSpan(span); + StorageObject updatedBlob = null; + try { + if (length == 0 && !last) { + return updatedBlob; + } + GenericUrl url = new GenericUrl(uploadId); + HttpRequest httpRequest = + storage + .getRequestFactory() + .buildPutRequest(url, new ByteArrayContent(null, toWrite, toWriteOffset, length)); + long limit = destOffset + length; + StringBuilder range = new StringBuilder("bytes "); + if (length == 0) { + range.append('*'); + } else { + range.append(destOffset).append('-').append(limit - 1); + } + range.append('/'); + if (last) { + range.append(limit); + } else { + range.append('*'); + } + httpRequest.getHeaders().setContentRange(range.toString()); + if (last) { + httpRequest.setParser(storage.getObjectParser()); + } + int code; + String message; + IOException exception = null; + HttpResponse response = null; + try { + response = httpRequest.execute(); + code = response.getStatusCode(); + message = response.getStatusMessage(); + String contentType = response.getContentType(); + if (last + && (code == 200 || code == 201) + && contentType != null + && contentType.startsWith("application/json")) { + updatedBlob = response.parseAs(StorageObject.class); + } + } catch (HttpResponseException ex) { + exception = ex; + code = ex.getStatusCode(); + message = ex.getStatusMessage(); + } finally { + if (response != null) { + response.disconnect(); + } + } + if (!last && code != 308 || last && !(code == 200 || code == 201)) { + if (exception != null) { + throw exception; + } + throw buildStorageException(code, message); + } + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + return updatedBlob; + } + + @Override + public String open(StorageObject object, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_OPEN); + Scope scope = tracer.withSpan(span); + try { + String kmsKeyName = object.getKmsKeyName(); + if (kmsKeyName != null && kmsKeyName.contains("cryptoKeyVersions")) { + object.setKmsKeyName(Data.nullOf(String.class)); + } + Insert req = + storage + .objects() + .insert(object.getBucket(), object) + .setName(object.getName()) + .setProjection(Option.PROJECTION.getString(options)) + .setPredefinedAcl(Option.PREDEFINED_ACL.getString(options)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(options)) + .setIfMetagenerationNotMatch(Option.IF_METAGENERATION_NOT_MATCH.getLong(options)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(options)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)) + .setKmsKeyName(Option.KMS_KEY_NAME.getString(options)); + GenericUrl url = req.buildHttpRequestUrl(); + url.setRawPath("/upload" + url.getRawPath()); + url.set("uploadType", "resumable"); + + JsonFactory jsonFactory = storage.getJsonFactory(); + HttpRequestFactory requestFactory = storage.getRequestFactory(); + HttpRequest httpRequest = + requestFactory.buildPostRequest(url, new JsonHttpContent(jsonFactory, object)); + HttpHeaders requestHeaders = httpRequest.getHeaders(); + requestHeaders.set("X-Upload-Content-Type", detectContentType(object, options)); + Long xUploadContentLength = Option.X_UPLOAD_CONTENT_LENGTH.getLong(options); + if (xUploadContentLength != null) { + requestHeaders.set("X-Upload-Content-Length", xUploadContentLength); + } + setEncryptionHeaders(requestHeaders, "x-goog-encryption-", options); + setExtraHeaders(Option.EXTRA_HEADERS.get(options), requestHeaders); + HttpResponse response = httpRequest.execute(); + if (response.getStatusCode() != 200) { + throw buildStorageException(response.getStatusCode(), response.getStatusMessage()); + } + String location = response.getHeaders().getLocation(); + response.disconnect(); + return location; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public String open(String signedURL) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_OPEN); + Scope scope = tracer.withSpan(span); + try { + GenericUrl url = new GenericUrl(signedURL); + url.set("uploadType", "resumable"); + String bytesArrayParameters = ""; + byte[] bytesArray = new byte[bytesArrayParameters.length()]; + HttpRequestFactory requestFactory = storage.getRequestFactory(); + HttpRequest httpRequest = + requestFactory.buildPostRequest( + url, new ByteArrayContent("", bytesArray, 0, bytesArray.length)); + HttpHeaders requestHeaders = httpRequest.getHeaders(); + requestHeaders.set("X-Upload-Content-Type", ""); + requestHeaders.set("x-goog-resumable", "start"); + // Using the x-goog-api-client header causes a signature mismatch with signed URLs generated + // outside the Java storage client + requestHeaders.remove("x-goog-api-client"); + + HttpResponse response = httpRequest.execute(); + if (response.getStatusCode() != 201) { + throw buildStorageException(response.getStatusCode(), response.getStatusMessage()); + } + String location = response.getHeaders().getLocation(); + response.disconnect(); + return location; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public StorageObject moveObject( + String bucket, + String sourceObject, + String destinationObject, + Map sourceOptions, + Map targetOptions) { + + String userProject = Option.USER_PROJECT.getString(sourceOptions); + if (userProject == null) { + userProject = Option.USER_PROJECT.getString(targetOptions); + } + try { + Move move = + storage + .objects() + .move(bucket, sourceObject, destinationObject) + .setIfSourceMetagenerationMatch( + Option.IF_SOURCE_METAGENERATION_MATCH.getLong(sourceOptions)) + .setIfSourceMetagenerationNotMatch( + Option.IF_SOURCE_METAGENERATION_NOT_MATCH.getLong(sourceOptions)) + .setIfSourceGenerationMatch(Option.IF_SOURCE_GENERATION_MATCH.getLong(sourceOptions)) + .setIfSourceGenerationNotMatch( + Option.IF_SOURCE_GENERATION_NOT_MATCH.getLong(sourceOptions)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(targetOptions)) + .setIfMetagenerationNotMatch( + Option.IF_METAGENERATION_NOT_MATCH.getLong(targetOptions)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(targetOptions)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(targetOptions)) + .setUserProject(userProject); + return move.execute(); + } catch (IOException e) { + throw translate(e); + } + } + + @Override + public RewriteResponse openRewrite(RewriteRequest rewriteRequest) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_OPEN_REWRITE); + Scope scope = tracer.withSpan(span); + try { + return rewrite(rewriteRequest, null); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public RewriteResponse continueRewrite(RewriteResponse previousResponse) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CONTINUE_REWRITE); + Scope scope = tracer.withSpan(span); + try { + return rewrite(previousResponse.rewriteRequest, previousResponse.rewriteToken); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private RewriteResponse rewrite(RewriteRequest req, String token) { + try { + String userProject = Option.USER_PROJECT.getString(req.sourceOptions); + if (userProject == null) { + userProject = Option.USER_PROJECT.getString(req.targetOptions); + } + + Long maxBytesRewrittenPerCall = + req.megabytesRewrittenPerCall != null ? req.megabytesRewrittenPerCall * MEGABYTE : null; + StorageObject content = req.overrideInfo ? req.target : null; + Storage.Objects.Rewrite rewrite = + storage + .objects() + .rewrite( + req.source.getBucket(), + req.source.getName(), + req.target.getBucket(), + req.target.getName(), + content) + .setSourceGeneration(req.source.getGeneration()) + .setRewriteToken(token) + .setMaxBytesRewrittenPerCall(maxBytesRewrittenPerCall) + .setProjection(DEFAULT_PROJECTION) + .setIfSourceMetagenerationMatch( + Option.IF_SOURCE_METAGENERATION_MATCH.getLong(req.sourceOptions)) + .setIfSourceMetagenerationNotMatch( + Option.IF_SOURCE_METAGENERATION_NOT_MATCH.getLong(req.sourceOptions)) + .setIfSourceGenerationMatch( + Option.IF_SOURCE_GENERATION_MATCH.getLong(req.sourceOptions)) + .setIfSourceGenerationNotMatch( + Option.IF_SOURCE_GENERATION_NOT_MATCH.getLong(req.sourceOptions)) + .setIfMetagenerationMatch(Option.IF_METAGENERATION_MATCH.getLong(req.targetOptions)) + .setIfMetagenerationNotMatch( + Option.IF_METAGENERATION_NOT_MATCH.getLong(req.targetOptions)) + .setIfGenerationMatch(Option.IF_GENERATION_MATCH.getLong(req.targetOptions)) + .setIfGenerationNotMatch(Option.IF_GENERATION_NOT_MATCH.getLong(req.targetOptions)) + .setDestinationPredefinedAcl(Option.PREDEFINED_ACL.getString(req.targetOptions)) + .setUserProject(userProject) + .setDestinationKmsKeyName(Option.KMS_KEY_NAME.getString(req.targetOptions)); + rewrite.setDisableGZipContent(content == null); + HttpHeaders requestHeaders = rewrite.getRequestHeaders(); + setEncryptionHeaders(requestHeaders, SOURCE_ENCRYPTION_KEY_PREFIX, req.sourceOptions); + setEncryptionHeaders(requestHeaders, ENCRYPTION_KEY_PREFIX, req.targetOptions); + setExtraHeaders(rewrite, req.sourceOptions); + setExtraHeaders(rewrite, req.targetOptions); + com.google.api.services.storage.model.RewriteResponse rewriteResponse = rewrite.execute(); + return new RewriteResponse( + req, + rewriteResponse.getResource(), + rewriteResponse.getObjectSize().longValue(), + rewriteResponse.getDone(), + rewriteResponse.getRewriteToken(), + rewriteResponse.getTotalBytesRewritten().longValue()); + } catch (IOException ex) { + tracer.getCurrentSpan().setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } + } + + @Override + public BucketAccessControl getAcl(String bucket, String entity, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_BUCKET_ACL); + Scope scope = tracer.withSpan(span); + try { + BucketAccessControls.Get get = + storage + .bucketAccessControls() + .get(bucket, entity) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(get, options); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public boolean deleteAcl(String bucket, String entity, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_BUCKET_ACL); + Scope scope = tracer.withSpan(span); + try { + BucketAccessControls.Delete delete = + storage + .bucketAccessControls() + .delete(bucket, entity) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(delete, options); + delete.execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public BucketAccessControl createAcl(BucketAccessControl acl, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_BUCKET_ACL); + Scope scope = tracer.withSpan(span); + try { + BucketAccessControls.Insert insert = + storage + .bucketAccessControls() + .insert(acl.getBucket(), acl) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(insert, options); + return insert.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public BucketAccessControl patchAcl(BucketAccessControl acl, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_PATCH_BUCKET_ACL); + Scope scope = tracer.withSpan(span); + try { + BucketAccessControls.Patch patch = + storage + .bucketAccessControls() + .patch(acl.getBucket(), acl.getEntity(), acl) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(patch, options); + return patch.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public List listAcls(String bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_BUCKET_ACLS); + Scope scope = tracer.withSpan(span); + try { + BucketAccessControls.List list = + storage + .bucketAccessControls() + .list(bucket) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(list, options); + return list.execute().getItems(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl getDefaultAcl(String bucket, String entity) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_OBJECT_DEFAULT_ACL); + Scope scope = tracer.withSpan(span); + try { + DefaultObjectAccessControls.Get get = + storage.defaultObjectAccessControls().get(bucket, entity); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public boolean deleteDefaultAcl(String bucket, String entity) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_OBJECT_DEFAULT_ACL); + Scope scope = tracer.withSpan(span); + try { + DefaultObjectAccessControls.Delete delete = + storage.defaultObjectAccessControls().delete(bucket, entity); + delete.execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl createDefaultAcl(ObjectAccessControl acl) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_OBJECT_DEFAULT_ACL); + Scope scope = tracer.withSpan(span); + try { + DefaultObjectAccessControls.Insert insert = + storage.defaultObjectAccessControls().insert(acl.getBucket(), acl); + return insert.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl patchDefaultAcl(ObjectAccessControl acl) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_PATCH_OBJECT_DEFAULT_ACL); + Scope scope = tracer.withSpan(span); + try { + DefaultObjectAccessControls.Patch patch = + storage.defaultObjectAccessControls().patch(acl.getBucket(), acl.getEntity(), acl); + return patch.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public List listDefaultAcls(String bucket) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_OBJECT_DEFAULT_ACLS); + Scope scope = tracer.withSpan(span); + try { + DefaultObjectAccessControls.List list = storage.defaultObjectAccessControls().list(bucket); + return list.execute().getItems(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl getAcl(String bucket, String object, Long generation, String entity) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_OBJECT_ACL); + Scope scope = tracer.withSpan(span); + try { + ObjectAccessControls.Get get = + storage.objectAccessControls().get(bucket, object, entity).setGeneration(generation); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public boolean deleteAcl(String bucket, String object, Long generation, String entity) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_OBJECT_ACL); + Scope scope = tracer.withSpan(span); + try { + ObjectAccessControls.Delete delete = + storage.objectAccessControls().delete(bucket, object, entity).setGeneration(generation); + delete.execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl createAcl(ObjectAccessControl acl) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_OBJECT_ACL); + Scope scope = tracer.withSpan(span); + try { + ObjectAccessControls.Insert insert = + storage + .objectAccessControls() + .insert(acl.getBucket(), acl.getObject(), acl) + .setGeneration(acl.getGeneration()); + return insert.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ObjectAccessControl patchAcl(ObjectAccessControl acl) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_PATCH_OBJECT_ACL); + Scope scope = tracer.withSpan(span); + try { + ObjectAccessControls.Patch patch = + storage + .objectAccessControls() + .patch(acl.getBucket(), acl.getObject(), acl.getEntity(), acl) + .setGeneration(acl.getGeneration()); + return patch.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public List listAcls(String bucket, String object, Long generation) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_OBJECT_ACLS); + Scope scope = tracer.withSpan(span); + try { + ObjectAccessControls.List list = + storage.objectAccessControls().list(bucket, object).setGeneration(generation); + return list.execute().getItems(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public HmacKey createHmacKey(String serviceAccountEmail, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_HMAC_KEY); + Scope scope = tracer.withSpan(span); + String projectId = Option.PROJECT_ID.getString(options); + if (projectId == null) { + projectId = this.options.getProjectId(); + } + try { + Create create = + storage + .projects() + .hmacKeys() + .create(projectId, serviceAccountEmail) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(create, options); + return create.setDisableGZipContent(true).execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Tuple> listHmacKeys(Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_HMAC_KEYS); + Scope scope = tracer.withSpan(span); + String projectId = Option.PROJECT_ID.getString(options); + if (projectId == null) { + projectId = this.options.getProjectId(); + } + try { + HmacKeys.List list = + storage + .projects() + .hmacKeys() + .list(projectId) + .setServiceAccountEmail(Option.SERVICE_ACCOUNT_EMAIL.getString(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setShowDeletedKeys(Option.SHOW_DELETED_KEYS.getBoolean(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(list, options); + HmacKeysMetadata hmacKeysMetadata = list.execute(); + return Tuple.>of( + hmacKeysMetadata.getNextPageToken(), hmacKeysMetadata.getItems()); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public HmacKeyMetadata getHmacKey(String accessId, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_HMAC_KEY); + Scope scope = tracer.withSpan(span); + String projectId = Option.PROJECT_ID.getString(options); + if (projectId == null) { + projectId = this.options.getProjectId(); + } + try { + HmacKeys.Get get = + storage + .projects() + .hmacKeys() + .get(projectId, accessId) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(get, options); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public HmacKeyMetadata updateHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_UPDATE_HMAC_KEY); + Scope scope = tracer.withSpan(span); + String projectId = hmacKeyMetadata.getProjectId(); + if (projectId == null) { + projectId = this.options.getProjectId(); + } + try { + Update update = + storage + .projects() + .hmacKeys() + .update(projectId, hmacKeyMetadata.getAccessId(), hmacKeyMetadata) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(update, options); + return update.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_HMAC_KEY); + Scope scope = tracer.withSpan(span); + String projectId = hmacKeyMetadata.getProjectId(); + if (projectId == null) { + projectId = this.options.getProjectId(); + } + try { + HmacKeys.Delete delete = + storage + .projects() + .hmacKeys() + .delete(projectId, hmacKeyMetadata.getAccessId()) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(delete, options); + delete.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Policy getIamPolicy(String bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_BUCKET_IAM_POLICY); + Scope scope = tracer.withSpan(span); + try { + Storage.Buckets.GetIamPolicy getIamPolicy = + storage + .buckets() + .getIamPolicy(bucket) + .setUserProject(Option.USER_PROJECT.getString(options)); + if (null != Option.REQUESTED_POLICY_VERSION.getLong(options)) { + getIamPolicy.setOptionsRequestedPolicyVersion( + Option.REQUESTED_POLICY_VERSION.getLong(options).intValue()); + } + setExtraHeaders(getIamPolicy, options); + return getIamPolicy.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_SET_BUCKET_IAM_POLICY); + Scope scope = tracer.withSpan(span); + try { + SetIamPolicy setIamPolicy = + storage + .buckets() + .setIamPolicy(bucket, policy) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(setIamPolicy, options); + return setIamPolicy.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public TestIamPermissionsResponse testIamPermissions( + String bucket, List permissions, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_TEST_BUCKET_IAM_PERMISSIONS); + Scope scope = tracer.withSpan(span); + try { + TestIamPermissions testIamPermissions = + storage + .buckets() + .testIamPermissions(bucket, permissions) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(testIamPermissions, options); + return testIamPermissions.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public boolean deleteNotification(String bucket, String notification) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_DELETE_NOTIFICATION); + Scope scope = tracer.withSpan(span); + try { + Notifications.Delete delete = storage.notifications().delete(bucket, notification); + delete.execute(); + return true; + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public List listNotifications(String bucket) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_LIST_NOTIFICATIONS); + Scope scope = tracer.withSpan(span); + try { + Notifications.List list = storage.notifications().list(bucket); + return list.execute().getItems(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Notification createNotification(String bucket, Notification notification) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_CREATE_NOTIFICATION); + Scope scope = tracer.withSpan(span); + try { + Notifications.Insert insert = storage.notifications().insert(bucket, notification); + return insert.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public Notification getNotification(String bucket, String notification) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_NOTIFICATION); + Scope scope = tracer.withSpan(span); + try { + Notifications.Get get = storage.notifications().get(bucket, notification); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + StorageException serviceException = translate(ex); + if (serviceException.getCode() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } finally { + scope.close(); + span.end(); + } + } + + @Override + public Bucket lockRetentionPolicy(Bucket bucket, Map options) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_LOCK_RETENTION_POLICY); + Scope scope = tracer.withSpan(span); + try { + LockRetentionPolicy lockRetentionPolicy = + storage + .buckets() + .lockRetentionPolicy( + bucket.getName(), Option.IF_METAGENERATION_MATCH.getLong(options)) + .setUserProject(Option.USER_PROJECT.getString(options)); + setExtraHeaders(lockRetentionPolicy, options); + return lockRetentionPolicy.setDisableGZipContent(true).execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + Span span = startSpan(HttpStorageRpcSpans.SPAN_NAME_GET_SERVICE_ACCOUNT); + Scope scope = tracer.withSpan(span); + try { + Projects.ServiceAccount.Get get = storage.projects().serviceAccount().get(projectId); + return get.execute(); + } catch (IOException ex) { + span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage())); + throw translate(ex); + } finally { + scope.close(); + span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS); + } + } + + private static StorageException buildStorageException(int statusCode, String statusMessage) { + GoogleJsonError error = new GoogleJsonError(); + error.setCode(statusCode); + error.setMessage(statusMessage); + return translate(error); + } + + private static > void setExtraHeaders( + Request req, Map options) { + ImmutableMap extraHeaders = Option.EXTRA_HEADERS.get(options); + HttpHeaders headers = req.getRequestHeaders(); + setExtraHeaders(extraHeaders, headers); + } + + private static void setExtraHeaders( + @Nullable ImmutableMap extraHeaders, HttpHeaders headers) { + if (extraHeaders != null) { + for (Entry e : extraHeaders.entrySet()) { + String key = e.getKey(); + if (!headers.containsKey(key) || key.equals("authorization")) { + headers.set(key, e.getValue()); + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpcSpans.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpcSpans.java new file mode 100644 index 000000000000..8fa049d01980 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpcSpans.java @@ -0,0 +1,114 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import io.opencensus.trace.EndSpanOptions; +import java.util.Locale; + +/** Helper class for instrumenting {@link HttpStorageRpc} with Open Census APIs. */ +class HttpStorageRpcSpans { + // OpenCensus span name prefix, 'Sent' for client and 'RECV' for server. + static final String SPAN_NAME_CLIENT_PREFIX = "Sent"; + + static final String SPAN_NAME_CREATE_BUCKET = getTraceSpanName("create(Bucket,Map)"); + static final String SPAN_NAME_CREATE_OBJECT = + getTraceSpanName("create(StorageObject,InputStream,Map)"); + static final String SPAN_NAME_LIST_BUCKETS = getTraceSpanName("list(Map)"); + static final String SPAN_NAME_LIST_OBJECTS = getTraceSpanName("list(String,Map)"); + static final String SPAN_NAME_GET_BUCKET = getTraceSpanName("get(Bucket,Map)"); + static final String SPAN_NAME_GET_OBJECT = getTraceSpanName("get(StorageObject,Map)"); + static final String SPAN_NAME_RESTORE_OBJECT = getTraceSpanName("restore(StorageObject, Map)"); + static final String SPAN_NAME_PATCH_BUCKET = getTraceSpanName("patch(Bucket,Map)"); + static final String SPAN_NAME_PATCH_OBJECT = getTraceSpanName("patch(StorageObject,Map)"); + static final String SPAN_NAME_DELETE_BUCKET = getTraceSpanName("delete(Bucket,Map)"); + static final String SPAN_NAME_DELETE_OBJECT = getTraceSpanName("delete(StorageObject,Map)"); + static final String SPAN_NAME_CREATE_BATCH = getTraceSpanName("createBatch()"); + static final String SPAN_NAME_COMPOSE = getTraceSpanName("compose(Iterable,StorageObject,Map)"); + static final String SPAN_NAME_LOAD = getTraceSpanName("load(StorageObject,Map"); + static final String SPAN_NAME_READ = getTraceSpanName("read(StorageObject,Map,long,int)"); + static final String SPAN_NAME_OPEN = getTraceSpanName("open(StorageObject,Map)"); + static final String SPAN_NAME_WRITE = + getTraceSpanName("write(String,byte[],int,long,int,boolean)"); + static final String SPAN_NAME_OPEN_REWRITE = getTraceSpanName("openRewrite(RewriteRequest)"); + static final String SPAN_NAME_CONTINUE_REWRITE = + getTraceSpanName("continueRewrite(RewriteResponse)"); + static final String SPAN_NAME_GET_BUCKET_ACL = getTraceSpanName("getAcl(String,String,Map)"); + static final String SPAN_NAME_DELETE_BUCKET_ACL = + getTraceSpanName("deleteAcl(String,String,Map)"); + static final String SPAN_NAME_CREATE_BUCKET_ACL = + getTraceSpanName("createAcl(BucketAccessControl,Map)"); + static final String SPAN_NAME_PATCH_BUCKET_ACL = + getTraceSpanName("patchAcl(BucketAccessControl,Map)"); + static final String SPAN_NAME_LIST_BUCKET_ACLS = getTraceSpanName("listAcls(String,Map)"); + static final String SPAN_NAME_GET_OBJECT_DEFAULT_ACL = + getTraceSpanName("getDefaultAcl(String,String)"); + static final String SPAN_NAME_DELETE_OBJECT_DEFAULT_ACL = + getTraceSpanName("deleteDefaultAcl(String,String)"); + static final String SPAN_NAME_CREATE_OBJECT_DEFAULT_ACL = + getTraceSpanName("createDefaultAcl(ObjectAccessControl)"); + static final String SPAN_NAME_PATCH_OBJECT_DEFAULT_ACL = + getTraceSpanName("patchDefaultAcl(ObjectAccessControl)"); + static final String SPAN_NAME_LIST_OBJECT_DEFAULT_ACLS = + getTraceSpanName("listDefaultAcls(String)"); + static final String SPAN_NAME_GET_OBJECT_ACL = + getTraceSpanName("getAcl(String,String,Long,String)"); + static final String SPAN_NAME_DELETE_OBJECT_ACL = + getTraceSpanName("deleteAcl(String,String,Long,String)"); + static final String SPAN_NAME_CREATE_OBJECT_ACL = + getTraceSpanName("createAcl(ObjectAccessControl)"); + static final String SPAN_NAME_PATCH_OBJECT_ACL = + getTraceSpanName("patchAcl(ObjectAccessControl)"); + static final String SPAN_NAME_LIST_OBJECT_ACLS = getTraceSpanName("listAcls(String,String,Long)"); + static final String SPAN_NAME_CREATE_HMAC_KEY = getTraceSpanName("createHmacKey(String)"); + static final String SPAN_NAME_GET_HMAC_KEY = getTraceSpanName("getHmacKey(String)"); + static final String SPAN_NAME_DELETE_HMAC_KEY = getTraceSpanName("deleteHmacKey(String)"); + static final String SPAN_NAME_LIST_HMAC_KEYS = + getTraceSpanName("listHmacKeys(String,String,Long)"); + static final String SPAN_NAME_UPDATE_HMAC_KEY = + getTraceSpanName("updateHmacKey(HmacKeyMetadata)"); + static final String SPAN_NAME_GET_BUCKET_IAM_POLICY = + getTraceSpanName("getIamPolicy(String,Map)"); + static final String SPAN_NAME_SET_BUCKET_IAM_POLICY = + getTraceSpanName("setIamPolicy(String,Policy,Map)"); + static final String SPAN_NAME_TEST_BUCKET_IAM_PERMISSIONS = + getTraceSpanName("testIamPermissions(String,List,Map)"); + static final String SPAN_NAME_DELETE_NOTIFICATION = + getTraceSpanName("deleteNotification(String,String)"); + static final String SPAN_NAME_LIST_NOTIFICATIONS = getTraceSpanName("listNotifications(String)"); + static final String SPAN_NAME_CREATE_NOTIFICATION = + getTraceSpanName("createNotification(String,Notification)"); + static final String SPAN_NAME_GET_NOTIFICATION = + getTraceSpanName("getNotification(String,String)"); + static final String SPAN_LOCK_RETENTION_POLICY = + getTraceSpanName("lockRetentionPolicy(String,Long)"); + static final String SPAN_NAME_GET_SERVICE_ACCOUNT = getTraceSpanName("getServiceAccount(String)"); + static final String SPAN_NAME_BATCH_SUBMIT = + getTraceSpanName(RpcBatch.class.getName() + ".submit()"); + static final EndSpanOptions END_SPAN_OPTIONS = + EndSpanOptions.builder().setSampleToLocalSpanStore(true).build(); + + static String getTraceSpanName(String methodDescriptor) { + return String.format( + Locale.US, + "%s.%s.%s", + SPAN_NAME_CLIENT_PREFIX, + HttpStorageRpc.class.getName(), + methodDescriptor); + } + + private HttpStorageRpcSpans() {} +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/RpcBatch.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/RpcBatch.java new file mode 100644 index 000000000000..099a2e3b95d0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/RpcBatch.java @@ -0,0 +1,63 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.services.storage.model.StorageObject; +import java.util.Map; + +/** An interface for the collection of batch operations. */ +public interface RpcBatch { + + /** An interface for batch callbacks. */ + interface Callback { + + /** This method will be called upon success of the batch operation. */ + void onSuccess(T response); + + /** This method will be called upon failure of the batch operation. */ + void onFailure(GoogleJsonError googleJsonError); + } + + /** + * Adds a call to "delete storage object" to the batch, with the provided {@code callback} and + * {@code options}. + */ + void addDelete( + StorageObject storageObject, Callback callback, Map options); + + /** + * Adds a call to "patch storage object" to the batch, with the provided {@code callback} and + * {@code options}. + */ + void addPatch( + StorageObject storageObject, + Callback callback, + Map options); + + /** + * Adds a call to "get storage object" to the batch, with the provided {@code callback} and {@code + * options}. + */ + void addGet( + StorageObject storageObject, + Callback callback, + Map options); + + /** Submits a batch of requests for processing using a single RPC request to Cloud Storage. */ + void submit(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java new file mode 100644 index 000000000000..59a56df12022 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java @@ -0,0 +1,644 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.HmacKey; +import com.google.api.services.storage.model.HmacKeyMetadata; +import com.google.api.services.storage.model.Notification; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.Policy; +import com.google.api.services.storage.model.ServiceAccount; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.TestIamPermissionsResponse; +import com.google.cloud.ServiceRpc; +import com.google.cloud.Tuple; +import com.google.cloud.storage.StorageException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@InternalApi +public interface StorageRpc extends ServiceRpc { + + // These options are part of the Google Cloud storage header options + enum Option { + PREDEFINED_ACL("predefinedAcl"), + PREDEFINED_DEFAULT_OBJECT_ACL("predefinedDefaultObjectAcl"), + IF_METAGENERATION_MATCH("ifMetagenerationMatch"), + IF_METAGENERATION_NOT_MATCH("ifMetagenerationNotMatch"), + IF_GENERATION_MATCH("ifGenerationMatch"), + IF_GENERATION_NOT_MATCH("ifGenerationNotMatch"), + IF_SOURCE_METAGENERATION_MATCH("ifSourceMetagenerationMatch"), + IF_SOURCE_METAGENERATION_NOT_MATCH("ifSourceMetagenerationNotMatch"), + IF_SOURCE_GENERATION_MATCH("ifSourceGenerationMatch"), + IF_SOURCE_GENERATION_NOT_MATCH("ifSourceGenerationNotMatch"), + IF_DISABLE_GZIP_CONTENT("disableGzipContent"), + PREFIX("prefix"), + PROJECT_ID("projectId"), + PROJECTION("projection"), + MAX_RESULTS("maxResults"), + PAGE_TOKEN("pageToken"), + RETURN_PARTIAL_SUCCESS("returnPartialSuccess"), + DELIMITER("delimiter"), + START_OFF_SET("startOffset"), + END_OFF_SET("endOffset"), + MATCH_GLOB("matchGlob"), + VERSIONS("versions"), + FIELDS("fields"), + CUSTOMER_SUPPLIED_KEY("customerSuppliedKey"), + USER_PROJECT("userProject"), + KMS_KEY_NAME("kmsKeyName"), + SERVICE_ACCOUNT_EMAIL("serviceAccount"), + SHOW_DELETED_KEYS("showDeletedKeys"), + REQUESTED_POLICY_VERSION("optionsRequestedPolicyVersion"), + DETECT_CONTENT_TYPE("detectContentType"), + ENABLE_OBJECT_RETENTION("enableObjectRetention"), + RETURN_RAW_INPUT_STREAM("returnRawInputStream"), + OVERRIDE_UNLOCKED_RETENTION("overrideUnlockedRetention"), + SOFT_DELETED("softDeleted"), + COPY_SOURCE_ACL("copySourceAcl"), + GENERATION("generation"), + INCLUDE_FOLDERS_AS_PREFIXES("includeFoldersAsPrefixes"), + INCLUDE_TRAILING_DELIMITER("includeTrailingDelimiter"), + X_UPLOAD_CONTENT_LENGTH("x-upload-content-length"), + OBJECT_FILTER("objectFilter"), + /** + * An {@link com.google.common.collect.ImmutableMap ImmutableMap<String, String>} of values + * which will be set as additional headers on the request. + */ + EXTRA_HEADERS("extra_headers"); + + private final String value; + + Option(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @SuppressWarnings("unchecked") + T get(Map options) { + return (T) options.get(this); + } + + String getString(Map options) { + return get(options); + } + + Long getLong(Map options) { + return get(options); + } + + Boolean getBoolean(Map options) { + return get(options); + } + } + + class RewriteRequest { + + public final StorageObject source; + public final Map sourceOptions; + public final boolean overrideInfo; + public final StorageObject target; + public final Map targetOptions; + public final Long megabytesRewrittenPerCall; + + public RewriteRequest( + StorageObject source, + Map sourceOptions, + boolean overrideInfo, + StorageObject target, + Map targetOptions, + Long megabytesRewrittenPerCall) { + this.source = source; + this.sourceOptions = sourceOptions; + this.overrideInfo = overrideInfo; + this.target = target; + this.targetOptions = targetOptions; + this.megabytesRewrittenPerCall = megabytesRewrittenPerCall; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof RewriteRequest)) { + return false; + } + final RewriteRequest other = (RewriteRequest) obj; + return Objects.equals(this.source, other.source) + && Objects.equals(this.sourceOptions, other.sourceOptions) + && Objects.equals(this.overrideInfo, other.overrideInfo) + && Objects.equals(this.target, other.target) + && Objects.equals(this.targetOptions, other.targetOptions) + && Objects.equals(this.megabytesRewrittenPerCall, other.megabytesRewrittenPerCall); + } + + @Override + public int hashCode() { + return Objects.hash( + source, sourceOptions, overrideInfo, target, targetOptions, megabytesRewrittenPerCall); + } + } + + class RewriteResponse { + + public final RewriteRequest rewriteRequest; + public final StorageObject result; + public final long blobSize; + public final boolean isDone; + public final String rewriteToken; + public final long totalBytesRewritten; + + public RewriteResponse( + RewriteRequest rewriteRequest, + StorageObject result, + long blobSize, + boolean isDone, + String rewriteToken, + long totalBytesRewritten) { + this.rewriteRequest = rewriteRequest; + this.result = result; + this.blobSize = blobSize; + this.isDone = isDone; + this.rewriteToken = rewriteToken; + this.totalBytesRewritten = totalBytesRewritten; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof RewriteResponse)) { + return false; + } + final RewriteResponse other = (RewriteResponse) obj; + return Objects.equals(this.rewriteRequest, other.rewriteRequest) + && Objects.equals(this.result, other.result) + && Objects.equals(this.rewriteToken, other.rewriteToken) + && this.blobSize == other.blobSize + && Objects.equals(this.isDone, other.isDone) + && this.totalBytesRewritten == other.totalBytesRewritten; + } + + @Override + public int hashCode() { + return Objects.hash( + rewriteRequest, result, blobSize, isDone, rewriteToken, totalBytesRewritten); + } + } + + /** + * Creates a new bucket. + * + * @throws StorageException upon failure + */ + Bucket create(Bucket bucket, Map options); + + /** + * Creates a new storage object. + * + * @throws StorageException upon failure + */ + StorageObject create(StorageObject object, InputStream content, Map options); + + /** + * Lists the project's buckets. + * + * @throws StorageException upon failure + */ + Tuple> list(Map options); + + /** + * Lists the bucket's blobs. + * + * @throws StorageException upon failure + */ + Tuple> list(String bucket, Map options); + + /** + * Returns the requested bucket or {@code null} if not found. + * + * @throws StorageException upon failure + */ + Bucket get(Bucket bucket, Map options); + + /** + * Returns the requested storage object or {@code null} if not found. + * + * @throws StorageException upon failure + */ + StorageObject get(StorageObject object, Map options); + + /** + * If an object has been soft-deleted, restores it and returns the restored object.j + * + * @throws StorageException upon failure + */ + StorageObject restore(StorageObject object, Map options); + + /** + * Updates bucket information. + * + * @throws StorageException upon failure + */ + Bucket patch(Bucket bucket, Map options); + + /** + * Updates the storage object's information. Original metadata are merged with metadata in the + * provided {@code storageObject}. + * + * @throws StorageException upon failure + */ + StorageObject patch(StorageObject storageObject, Map options); + + /** + * Deletes the requested bucket. + * + * @return {@code true} if the bucket was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + boolean delete(Bucket bucket, Map options); + + /** + * Deletes the requested storage object. + * + * @return {@code true} if the storage object was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + boolean delete(StorageObject object, Map options); + + /** Creates an empty batch. */ + RpcBatch createBatch(); + + /** + * Sends a compose request. + * + * @throws StorageException upon failure + */ + StorageObject compose( + Iterable sources, StorageObject target, Map targetOptions); + + /** + * Reads all the bytes from a storage object. + * + * @throws StorageException upon failure + */ + byte[] load(StorageObject storageObject, Map options); + + /** + * Reads the given amount of bytes from a storage object at the given position. + * + * @throws StorageException upon failure + */ + Tuple read(StorageObject from, Map options, long position, int bytes); + + /** + * Reads all the bytes from a storage object at the given position in to outputstream using direct + * download. + * + * @return number of bytes downloaded, returns 0 if position higher than length. + * @throws StorageException upon failure + */ + long read(StorageObject from, Map options, long position, OutputStream outputStream); + + /** + * Opens a resumable upload channel for a given storage object. + * + * @throws StorageException upon failure + */ + String open(StorageObject object, Map options); + + /** + * Opens a resumable upload channel for a given signedURL. + * + * @throws StorageException upon failure + */ + String open(String signedURL); + + /** + * Writes the provided bytes to a storage object at the provided location. + * + * @throws StorageException upon failure + */ + void write( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last); + + /** + * Requests current byte offset from Cloud Storage API. Used to recover from a failure in some + * bytes were committed successfully to the open resumable session. + * + * @param uploadId resumable upload ID URL + * @throws StorageException upon failure + */ + long getCurrentUploadOffset(String uploadId); + + /** + * Attempts to retrieve the StorageObject from a completed resumable upload. When a resumable + * upload completes, the response will be the up-to-date StorageObject metadata. This up-to-date + * metadata can then be used to validate the total size of the object along with new generation + * and other information. + * + *

If for any reason, the response to the final PUT to a resumable upload is not received, this + * method can be used to query for the up-to-date StorageObject. If the upload is complete, this + * method can be used to access the StorageObject independently from any other liveness or + * conditional criteria requirements that are otherwise applicable when using {@link + * #get(StorageObject, Map)}. + * + * @param uploadId resumable upload ID URL + * @param totalBytes the total number of bytes that should have been written. + * @throws StorageException if the upload is incomplete or does not exist + */ + StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes); + + /** + * Writes the provided bytes to a storage object at the provided location. If {@code last=true} + * returns metadata of the updated object, otherwise returns null. + * + * @param uploadId resumable upload ID + * @param toWrite a portion of the content + * @param toWriteOffset starting position in the {@code toWrite} array + * @param destOffset starting position in the destination data + * @param length the number of bytes to be uploaded + * @param last true, if {@code toWrite} is the final content portion + * @throws StorageException upon failure + * @return + */ + StorageObject writeWithResponse( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last); + + StorageObject moveObject( + String bucket, + String sourceObject, + String destinationObject, + Map sourceOptions, + Map targetOptions); + + /** + * Sends a rewrite request to open a rewrite channel. + * + * @throws StorageException upon failure + */ + RewriteResponse openRewrite(RewriteRequest rewriteRequest); + + /** + * Continues rewriting on an already open rewrite channel. + * + * @throws StorageException upon failure + */ + RewriteResponse continueRewrite(RewriteResponse previousResponse); + + /** + * Returns the ACL entry for the specified entity on the specified bucket or {@code null} if not + * found. + * + * @throws StorageException upon failure + */ + BucketAccessControl getAcl(String bucket, String entity, Map options); + + /** + * Deletes the ACL entry for the specified entity on the specified bucket. + * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + boolean deleteAcl(String bucket, String entity, Map options); + + /** + * Creates a new ACL entry on the specified bucket. + * + * @throws StorageException upon failure + */ + BucketAccessControl createAcl(BucketAccessControl acl, Map options); + + /** + * Updates an ACL entry on the specified bucket. + * + * @throws StorageException upon failure + */ + BucketAccessControl patchAcl(BucketAccessControl acl, Map options); + + /** + * Lists the ACL entries for the provided bucket. + * + * @throws StorageException upon failure + */ + List listAcls(String bucket, Map options); + + /** + * Returns the default object ACL entry for the specified entity on the specified bucket or {@code + * null} if not found. + * + * @throws StorageException upon failure + */ + ObjectAccessControl getDefaultAcl(String bucket, String entity); + + /** + * Deletes the default object ACL entry for the specified entity on the specified bucket. + * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + boolean deleteDefaultAcl(String bucket, String entity); + + /** + * Creates a new default object ACL entry on the specified bucket. + * + * @throws StorageException upon failure + */ + ObjectAccessControl createDefaultAcl(ObjectAccessControl acl); + + /** + * Updates a default object ACL entry on the specified bucket. + * + * @throws StorageException upon failure + */ + ObjectAccessControl patchDefaultAcl(ObjectAccessControl acl); + + /** + * Lists the default object ACL entries for the provided bucket. + * + * @throws StorageException upon failure + */ + List listDefaultAcls(String bucket); + + /** + * Returns the ACL entry for the specified entity on the specified object or {@code null} if not + * found. + * + * @throws StorageException upon failure + */ + ObjectAccessControl getAcl(String bucket, String object, Long generation, String entity); + + /** + * Deletes the ACL entry for the specified entity on the specified object. + * + * @return {@code true} if the ACL was deleted, {@code false} if it was not found + * @throws StorageException upon failure + */ + boolean deleteAcl(String bucket, String object, Long generation, String entity); + + /** + * Creates a new ACL entry on the specified object. + * + * @throws StorageException upon failure + */ + ObjectAccessControl createAcl(ObjectAccessControl acl); + + /** + * Updates an ACL entry on the specified object. + * + * @throws StorageException upon failure + */ + ObjectAccessControl patchAcl(ObjectAccessControl acl); + + /** + * Lists the ACL entries for the provided object. + * + * @throws StorageException upon failure + */ + List listAcls(String bucket, String object, Long generation); + + /** + * Creates a new HMAC key for the provided service account email. + * + * @throws StorageException upon failure + */ + HmacKey createHmacKey(String serviceAccountEmail, Map options); + + /** + * Lists the HMAC keys for the provided service account email. + * + * @throws StorageException upon failure + */ + Tuple> listHmacKeys(Map options); + + /** + * Updates an HMAC key for the provided metadata object and returns the updated object. Only + * updates the State field. + * + * @throws StorageException upon failure + */ + HmacKeyMetadata updateHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options); + + /** + * Returns the HMAC key associated with the provided access id. + * + * @throws StorageException upon failure + */ + HmacKeyMetadata getHmacKey(String accessId, Map options); + + /** + * Deletes the HMAC key associated with the provided metadata object. + * + * @throws StorageException upon failure + */ + void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options); + + /** + * Returns the IAM policy for the specified bucket. + * + * @throws StorageException upon failure + */ + Policy getIamPolicy(String bucket, Map options); + + /** + * Updates the IAM policy for the specified bucket. + * + * @throws StorageException upon failure + */ + Policy setIamPolicy(String bucket, Policy policy, Map options); + + /** + * Tests whether the caller holds the specified permissions for the specified bucket. + * + * @throws StorageException upon failure + */ + TestIamPermissionsResponse testIamPermissions( + String bucket, List permissions, Map options); + + /** + * Deletes the notification with the specified id on the bucket. + * + * @return {@code true} if the notification has been deleted, {@code false} if not found + * @throws StorageException upon failure + */ + boolean deleteNotification(String bucket, String id); + + /** + * Retrieves the list of notifications associated with the bucket. + * + * @return a list of {@link Notification} objects that exist on the bucket. + * @throws StorageException upon failure + */ + List listNotifications(String bucket); + + /** + * Creates the notification for a given bucket. + * + * @return the created notification. + * @throws StorageException upon failure + */ + Notification createNotification(String bucket, Notification notification); + + /** + * Gets the notification with the specified id. + * + * @return the {@code Notification} object with the given id or {@code null} if not found + * @throws StorageException upon failure + */ + Notification getNotification(String bucket, String id); + + /** + * Lock retention policy for the provided bucket. + * + * @return a {@code Bucket} object of the locked bucket + * @throws StorageException upon failure + */ + Bucket lockRetentionPolicy(Bucket bucket, Map options); + + /** + * Returns the service account associated with the given project. + * + * @return the ID of the project to fetch the service account for. + * @throws StorageException upon failure + */ + ServiceAccount getServiceAccount(String projectId); + + @InternalApi + Storage getStorage(); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/RemoteStorageHelper.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/RemoteStorageHelper.java new file mode 100644 index 000000000000..404564174656 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/RemoteStorageHelper.java @@ -0,0 +1,373 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.testing; + +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetrySettings; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.common.base.Strings; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Utility to create a remote storage configuration for testing. Storage options can be obtained via + * the {@link #getOptions()} ()} method. Returned options have custom {@link + * StorageOptions#getRetrySettings()}: {@link RetrySettings#getMaxAttempts()} is {@code 10}, {@link + * RetrySettings#getMaxRetryDelay()} is {@code 30000}, {@link RetrySettings#getTotalTimeout()} is + * {@code 120000} and {@link RetrySettings#getInitialRetryDelay()} is {@code 250}. {@link + * HttpTransportOptions#getConnectTimeout()} and {@link HttpTransportOptions#getReadTimeout()} are + * both set to {@code 60000}. + */ +public class RemoteStorageHelper { + + private static final Logger log = Logger.getLogger(RemoteStorageHelper.class.getName()); + private static final String BUCKET_NAME_PREFIX = "gcloud-test-bucket-temp-"; + private final StorageOptions options; + + private RemoteStorageHelper(StorageOptions options) { + this.options = options; + } + + /** Returns a {@link StorageOptions} object to be used for testing. */ + public StorageOptions getOptions() { + return options; + } + + public static void cleanBuckets(final Storage storage, final long olderThan, long timeoutMs) { + Runnable task = + new Runnable() { + @Override + public void run() { + Page buckets = + storage.list( + Storage.BucketListOption.prefix(BUCKET_NAME_PREFIX), + Storage.BucketListOption.userProject(storage.getOptions().getProjectId())); + for (Bucket bucket : buckets.iterateAll()) { + if (bucket.getCreateTime() < olderThan) { + try { + for (Blob blob : + bucket + .list( + BlobListOption.fields( + Storage.BlobField.EVENT_BASED_HOLD, + Storage.BlobField.TEMPORARY_HOLD)) + .iterateAll()) { + if (Boolean.TRUE.equals(blob.getEventBasedHold()) + || Boolean.TRUE.equals(blob.getTemporaryHold())) { + storage.update( + blob.toBuilder().setTemporaryHold(false).setEventBasedHold(false).build(), + Storage.BlobTargetOption.userProject( + storage.getOptions().getProjectId())); + } + } + forceDelete(storage, bucket.getName()); + } catch (Exception e) { + log.info("Failed to clean buckets " + e.getMessage()); + } + } + } + } + }; + Thread thread = new Thread(task); + thread.start(); + try { + thread.join(timeoutMs); + } catch (InterruptedException e) { + log.info("cleanBuckets interrupted"); + } + } + + /** + * Deletes a bucket, even if non-empty. Objects in the bucket are listed and deleted until bucket + * deletion succeeds or {@code timeout} expires. To allow for the timeout, this method uses a + * separate thread to send the delete requests. Use {@link #forceDelete(Storage storage, String + * bucket)} if spawning an additional thread is undesirable, such as in the App Engine production + * runtime. + * + * @param storage the storage service to be used to issue requests + * @param bucket the bucket to be deleted + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return true if deletion succeeded, false if timeout expired + * @throws InterruptedException if the thread deleting the bucket is interrupted while waiting + * @throws ExecutionException if an exception was thrown while deleting bucket or bucket objects + */ + public static Boolean forceDelete(Storage storage, String bucket, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException { + return forceDelete(storage, bucket, timeout, unit, ""); + } + + /** + * Deletes a bucket, even if non-empty. Objects in the bucket are listed and deleted until bucket + * deletion succeeds or {@code timeout} expires. To allow for the timeout, this method uses a + * separate thread to send the delete requests. Use {@link #forceDelete(Storage storage, String + * bucket)} if spawning an additional thread is undesirable, such as in the App Engine production + * runtime. + * + * @param storage the storage service to be used to issue requests + * @param bucket the bucket to be deleted + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @param userProject the project to bill for requester-pays buckets (or "") + * @return true if deletion succeeded, false if timeout expired + * @throws InterruptedException if the thread deleting the bucket is interrupted while waiting + * @throws ExecutionException if an exception was thrown while deleting bucket or bucket objects + */ + public static Boolean forceDelete( + Storage storage, String bucket, long timeout, TimeUnit unit, String userProject) + throws InterruptedException, ExecutionException { + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("forceDelete-%s").build(); + ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory); + Future future = executor.submit(new DeleteBucketTask(storage, bucket, userProject)); + try { + return future.get(timeout, unit); + } catch (TimeoutException ex) { + future.cancel(true); + return false; + } finally { + executor.shutdown(); + } + } + + /** + * Deletes a bucket, even if non-empty. This method blocks until the deletion completes or fails. + * + * @param storage the storage service to be used to issue requests + * @param bucket the bucket to be deleted + * @throws StorageException if an exception is encountered during bucket deletion + */ + public static void forceDelete(Storage storage, String bucket) { + new DeleteBucketTask(storage, bucket).call(); + } + + /** Returns a bucket name generated using a random UUID. */ + public static String generateBucketName() { + return BUCKET_NAME_PREFIX + UUID.randomUUID().toString(); + } + + /** + * This method is obsolete because of a potential security risk. Use the {@link #create(String, + * GoogleCredentials)} method instead. + * + *

If you know that you will be loading credential configurations of a specific type, it is + * recommended to use a credential-type-specific `fromStream()` method. This will ensure that an + * unexpected credential type with potential for malicious intent is not loaded unintentionally. + * You might still have to do validation for certain credential types. Please follow the + * recommendation for that method. + * + *

If you are loading your credential configuration from an untrusted source and have not + * mitigated the risks (e.g. by validating the configuration yourself), make these changes as soon + * as possible to prevent security risks to your environment. + * + *

Regardless of the method used, it is always your responsibility to validate configurations + * received from external sources. + * + *

See the {@see documentation} + * for more details. + * + *

Creates a {@code RemoteStorageHelper} object for the given project id and JSON key input + * stream. + * + * @param projectId id of the project to be used for running the tests + * @param keyStream input stream for a JSON key + * @return A {@code RemoteStorageHelper} object for the provided options + * @throws com.google.cloud.storage.testing.RemoteStorageHelper.StorageHelperException if {@code + * keyStream} is not a valid JSON key stream + */ + @ObsoleteApi( + "This method is obsolete because of a potential security risk. Use the create() variant with" + + " Credential parameter instead") + public static RemoteStorageHelper create(String projectId, InputStream keyStream) + throws StorageHelperException { + try { + return create(projectId, GoogleCredentials.fromStream(keyStream)); + } catch (IOException ex) { + if (log.isLoggable(Level.WARNING)) { + log.log(Level.WARNING, ex.getMessage()); + } + throw StorageHelperException.translate(ex); + } + } + + /** + * Creates a {@code RemoteStorageHelper} object for the given project id and Credential. + * + * @param projectId id of the project to be used for running the tests + * @param credentials GoogleCredential to set to StorageOptions + * @return A {@code RemoteStorageHelper} object for the provided options + */ + public static RemoteStorageHelper create(String projectId, GoogleCredentials credentials) { + HttpTransportOptions transportOptions = + HttpStorageOptions.defaults().getDefaultTransportOptions(); + transportOptions = + transportOptions.toBuilder().setConnectTimeout(60000).setReadTimeout(60000).build(); + StorageOptions storageOptions = + StorageOptions.http() + .setCredentials(credentials) + .setProjectId(projectId) + .setRetrySettings(retrySettings()) + .setTransportOptions(transportOptions) + .build(); + return new RemoteStorageHelper(storageOptions); + } + + /** + * Creates a {@code RemoteStorageHelper} object using default project id and authentication + * credentials. + */ + public static RemoteStorageHelper create() throws StorageHelperException { + HttpTransportOptions transportOptions = + HttpStorageOptions.defaults().getDefaultTransportOptions(); + transportOptions = + transportOptions.toBuilder().setConnectTimeout(60000).setReadTimeout(60000).build(); + StorageOptions storageOptions = + StorageOptions.http() + .setRetrySettings(retrySettings()) + .setTransportOptions(transportOptions) + .build(); + return new RemoteStorageHelper(storageOptions); + } + + private static RetrySettings retrySettings() { + return RetrySettings.newBuilder() + .setMaxAttempts(10) + .setMaxRetryDelayDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(120000L)) + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.0) + .setInitialRpcTimeoutDuration(Duration.ofMillis(120000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(120000L)) + .build(); + } + + private static class DeleteBucketTask implements Callable { + + private final Storage storage; + private final String bucket; + private final String userProject; + + public DeleteBucketTask(Storage storage, String bucket) { + this.storage = storage; + this.bucket = bucket; + this.userProject = ""; + } + + public DeleteBucketTask(Storage storage, String bucket, String userProject) { + this.storage = storage; + this.bucket = bucket; + this.userProject = userProject; + } + + @Override + public Boolean call() { + while (true) { + ArrayList ids = new ArrayList<>(); + Page listedBlobs; + if (Strings.isNullOrEmpty(userProject)) { + listedBlobs = storage.list(bucket, BlobListOption.versions(true)); + } else { + listedBlobs = + storage.list( + bucket, BlobListOption.versions(true), BlobListOption.userProject(userProject)); + } + for (BlobInfo info : listedBlobs.getValues()) { + ids.add(info.getBlobId()); + } + if (!ids.isEmpty()) { + List results = storage.delete(ids); + if (!Strings.isNullOrEmpty(userProject)) { + for (int i = 0; i < results.size(); i++) { + if (!results.get(i)) { + // deleting that blob failed. Let's try in a different way. + storage.delete( + bucket, + ids.get(i).getName(), + Storage.BlobSourceOption.userProject(userProject)); + } + } + } + } + try { + if (Strings.isNullOrEmpty(userProject)) { + storage.delete(bucket); + } else { + storage.delete(bucket, Storage.BucketSourceOption.userProject(userProject)); + } + return true; + } catch (StorageException e) { + log.warning("Caught exception in Delete Bucket Task" + e.getMessage()); + if (e.getCode() == 409) { + try { + Thread.sleep(500); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + throw e; + } + } else { + throw e; + } + } + } + } + } + + public static class StorageHelperException extends RuntimeException { + + private static final long serialVersionUID = -7756074894502258736L; + + public StorageHelperException(String message) { + super(message); + } + + public StorageHelperException(String message, Throwable cause) { + super(message, cause); + } + + public static StorageHelperException translate(Exception ex) { + return new StorageHelperException(ex.getMessage(), ex); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java new file mode 100644 index 000000000000..8f835f5bf3f2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java @@ -0,0 +1,339 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.testing; + +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.HmacKey; +import com.google.api.services.storage.model.HmacKeyMetadata; +import com.google.api.services.storage.model.Notification; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.Policy; +import com.google.api.services.storage.model.ServiceAccount; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.TestIamPermissionsResponse; +import com.google.cloud.Tuple; +import com.google.cloud.storage.spi.v1.RpcBatch; +import com.google.cloud.storage.spi.v1.StorageRpc; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.List; +import java.util.Map; + +/** + * A stub implementation of {@link StorageRpc} which can be used outside of the Storage module for + * testing purposes. All the methods throw an {@code UnsupportedOperationException}. + */ +public class StorageRpcTestBase implements StorageRpc { + + @Override + public Bucket create(Bucket bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject create(StorageObject object, InputStream content, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Tuple> list(Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Tuple> list(String bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Bucket get(Bucket bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject get(StorageObject object, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject restore(StorageObject object, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Bucket patch(Bucket bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject patch(StorageObject storageObject, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean delete(Bucket bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean delete(StorageObject object, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public RpcBatch createBatch() { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject compose( + Iterable sources, StorageObject target, Map targetOptions) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public byte[] load(StorageObject storageObject, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Tuple read( + StorageObject from, Map options, long position, int bytes) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public long read( + StorageObject from, Map options, long position, OutputStream outputStream) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public String open(StorageObject object, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public String open(String signedURL) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public void write( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public long getCurrentUploadOffset(String uploadId) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject writeWithResponse( + String uploadId, + byte[] toWrite, + int toWriteOffset, + long destOffset, + int length, + boolean last) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public RewriteResponse openRewrite(RewriteRequest rewriteRequest) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public RewriteResponse continueRewrite(RewriteResponse previousResponse) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public BucketAccessControl getAcl(String bucket, String entity, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean deleteAcl(String bucket, String entity, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public BucketAccessControl createAcl(BucketAccessControl acl, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public BucketAccessControl patchAcl(BucketAccessControl acl, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public List listAcls(String bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl getDefaultAcl(String bucket, String entity) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean deleteDefaultAcl(String bucket, String entity) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl createDefaultAcl(ObjectAccessControl acl) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl patchDefaultAcl(ObjectAccessControl acl) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public List listDefaultAcls(String bucket) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl getAcl(String bucket, String object, Long generation, String entity) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean deleteAcl(String bucket, String object, Long generation, String entity) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl createAcl(ObjectAccessControl acl) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ObjectAccessControl patchAcl(ObjectAccessControl acl) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public List listAcls(String bucket, String object, Long generation) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public HmacKey createHmacKey(String serviceAccountEmail, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Tuple> listHmacKeys(Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public HmacKeyMetadata updateHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public HmacKeyMetadata getHmacKey(String accessId, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Policy getIamPolicy(String bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public TestIamPermissionsResponse testIamPermissions( + String bucket, List permissions, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public boolean deleteNotification(String bucket, String id) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public List listNotifications(String bucket) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Notification createNotification(String bucket, Notification notification) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Notification getNotification(String bucket, String id) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Bucket lockRetentionPolicy(Bucket bucket, Map options) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public StorageObject moveObject( + String bucket, + String sourceObject, + String destinationObject, + Map sourceOptions, + Map targetOptions) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public Storage getStorage() { + throw new UnsupportedOperationException("Not implemented yet"); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/package-info.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/package-info.java new file mode 100644 index 000000000000..ae3f26284687 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/package-info.java @@ -0,0 +1,40 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A testing helper for Google Cloud Storage. + * + *

A simple usage example: + * + *

Before the test: + * + *

{@code
+ * RemoteStorageHelper helper = RemoteStorageHelper.create();
+ * Storage storage = helper.getOptions().getService();
+ * String bucket = RemoteStorageHelper.generateBucketName();
+ * storage.create(BucketInfo.of(bucket));
+ * }
+ * + *

After the test: + * + *

{@code
+ * RemoteStorageHelper.forceDelete(storage, bucket, 5, TimeUnit.SECONDS);
+ * }
+ * + * @see Google Cloud + * Storage testing + */ +package com.google.cloud.storage.testing; diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/BucketNameMismatchException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/BucketNameMismatchException.java new file mode 100644 index 000000000000..dd8063b9d698 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/BucketNameMismatchException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import java.util.Locale; + +public final class BucketNameMismatchException extends RuntimeException { + + public BucketNameMismatchException(String actual, String expected) { + super( + String.format( + Locale.US, + "Bucket name in produced BlobInfo did not match bucket name from config. (%s != %s)", + actual, + expected)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ChunkedDownloadCallable.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ChunkedDownloadCallable.java new file mode 100644 index 000000000000..f64e042be375 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ChunkedDownloadCallable.java @@ -0,0 +1,99 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.StorageException; +import com.google.common.io.ByteStreams; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.Callable; + +final class ChunkedDownloadCallable implements Callable { + + private final BlobInfo originalBlob; + + private final Storage storage; + + private final Storage.BlobSourceOption[] opts; + + private final long startPosition; + + private final long endPosition; + private final Path destPath; + + ChunkedDownloadCallable( + Storage storage, + BlobInfo originalBlob, + BlobSourceOption[] opts, + Path destPath, + long startPosition, + long endPosition) { + this.originalBlob = originalBlob; + this.storage = storage; + this.opts = opts; + this.startPosition = startPosition; + this.endPosition = endPosition; + this.destPath = destPath; + } + + @Override + public DownloadSegment call() { + long bytesCopied = -1L; + try (ReadChannel rc = storage.reader(originalBlob.getBlobId(), opts); + FileChannel wc = + FileChannel.open(destPath, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { + rc.setChunkSize(0); + rc.seek(startPosition); + rc.limit(endPosition); + wc.position(startPosition); + bytesCopied = ByteStreams.copy(rc, wc); + long bytesExpected = endPosition - startPosition; + if (bytesCopied != bytesExpected) { + return DownloadSegment.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException( + new StorageException( + 0, + "Unexpected end of stream, read " + + bytesCopied + + " expected " + + bytesExpected + + " from object " + + originalBlob.getBlobId().toGsUtilUriWithGeneration())) + .build(); + } + } catch (Exception e) { + if (bytesCopied == -1) { + return DownloadSegment.newBuilder(originalBlob, TransferStatus.FAILED_TO_START) + .setException(e) + .build(); + } + return DownloadSegment.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + DownloadSegment result = + DownloadSegment.newBuilder(originalBlob, TransferStatus.SUCCESS) + .setOutputDestination(destPath) + .build(); + return result; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DefaultQos.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DefaultQos.java new file mode 100644 index 000000000000..e466a824ebba --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DefaultQos.java @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +final class DefaultQos implements Qos { + + private final long divideAndConquerThreshold; + private final long parallelCompositeUploadThreshold; + private boolean threadThresholdMet; + + private DefaultQos( + long divideAndConquerThreshold, + long parallelCompositeUploadThreshold, + boolean threadThresholdMet) { + this.divideAndConquerThreshold = divideAndConquerThreshold; + this.parallelCompositeUploadThreshold = parallelCompositeUploadThreshold; + this.threadThresholdMet = threadThresholdMet; + } + + @Override + public boolean divideAndConquer(long objectSize) { + return objectSize > divideAndConquerThreshold; + } + + @Override + public boolean parallelCompositeUpload(long objectSize) { + return threadThresholdMet && objectSize > parallelCompositeUploadThreshold; + } + + static DefaultQos of(TransferManagerConfig config) { + return new DefaultQos( + 128L * 1024 * 1024, 4L * config.getPerWorkerBufferSize(), config.getMaxWorkers() > 2); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DirectDownloadCallable.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DirectDownloadCallable.java new file mode 100644 index 000000000000..47f782e8bd72 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DirectDownloadCallable.java @@ -0,0 +1,99 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.StorageException; +import com.google.common.io.ByteStreams; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.Callable; + +final class DirectDownloadCallable implements Callable { + private final BlobInfo originalBlob; + + private final ParallelDownloadConfig parallelDownloadConfig; + private final Storage storage; + + private final Storage.BlobSourceOption[] opts; + + private final Path destPath; + + DirectDownloadCallable( + Storage storage, + BlobInfo originalBlob, + ParallelDownloadConfig parallelDownloadConfig, + BlobSourceOption[] opts, + Path destPath) { + this.originalBlob = originalBlob; + this.parallelDownloadConfig = parallelDownloadConfig; + this.storage = storage; + this.opts = opts; + this.destPath = destPath; + } + + @Override + public DownloadResult call() { + long bytesCopied = -1L; + try (ReadChannel rc = + storage.reader( + BlobId.of(parallelDownloadConfig.getBucketName(), originalBlob.getName()), opts); + FileChannel wc = + FileChannel.open( + destPath, + StandardOpenOption.WRITE, + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING)) { + rc.setChunkSize(0); + bytesCopied = ByteStreams.copy(rc, wc); + if (originalBlob.getSize() != null) { + if (bytesCopied != originalBlob.getSize()) { + return DownloadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException( + new StorageException( + 0, + "Unexpected end of stream, read " + + bytesCopied + + " expected " + + originalBlob.getSize() + + " from object " + + originalBlob.getBlobId().toGsUtilUriWithGeneration())) + .build(); + } + } + } catch (Exception e) { + if (bytesCopied == -1) { + return DownloadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_START) + .setException(e) + .build(); + } + return DownloadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + DownloadResult result = + DownloadResult.newBuilder(originalBlob, TransferStatus.SUCCESS) + .setOutputDestination(destPath) + .build(); + return result; + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadJob.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadJob.java new file mode 100644 index 000000000000..9bb6b7f59640 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadJob.java @@ -0,0 +1,147 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * A parallel download job sent to Transfer Manager. + * + * @see Builder + */ +public final class DownloadJob { + + @NonNull private final List> downloadResults; + + @NonNull private final ParallelDownloadConfig parallelDownloadConfig; + + private DownloadJob( + @NonNull List> downloadResults, + @NonNull ParallelDownloadConfig parallelDownloadConfig) { + this.downloadResults = downloadResults; + this.parallelDownloadConfig = parallelDownloadConfig; + } + + /** + * The list of {@link DownloadResult DownloadResults} for each download request Transfer Manager + * executed for this job. Note calling this method will block the invoking thread until all + * download requests are complete. + * + * @see Builder#setDownloadResults(List) + */ + public @NonNull List getDownloadResults() { + return ApiExceptions.callAndTranslateApiException(ApiFutures.allAsList(downloadResults)); + } + + /** + * The {@link ParallelDownloadConfig} used for this DownloadJob. + * + * @see Builder#setParallelDownloadConfig(ParallelDownloadConfig) + */ + public @NonNull ParallelDownloadConfig getParallelDownloadConfig() { + return parallelDownloadConfig; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DownloadJob)) { + return false; + } + DownloadJob that = (DownloadJob) o; + return downloadResults.equals(that.downloadResults) + && parallelDownloadConfig.equals(that.parallelDownloadConfig); + } + + @Override + public int hashCode() { + return Objects.hash(downloadResults, parallelDownloadConfig); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("downloadResults", downloadResults) + .add("parallelDownloadConfig", parallelDownloadConfig) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builds an instance of DownloadJob + * + * @see DownloadJob + */ + public static final class Builder { + + private @NonNull List> downloadResults; + private @MonotonicNonNull ParallelDownloadConfig parallelDownloadConfig; + + private Builder() { + this.downloadResults = ImmutableList.of(); + } + + /** + * Sets the results for a DownloadJob being performed by Transfer Manager. + * + * @return the instance of the Builder with DownloadResults modified. + * @see DownloadJob#getDownloadResults() + */ + public Builder setDownloadResults(@NonNull List> downloadResults) { + this.downloadResults = ImmutableList.copyOf(downloadResults); + return this; + } + + /** + * Sets the {@link ParallelDownloadConfig} used for this DownloadJob. + * + * @return the instance of the Builder with ParallelDownloadConfig modified. + * @see DownloadJob#getParallelDownloadConfig() + */ + public Builder setParallelDownloadConfig( + @NonNull ParallelDownloadConfig parallelDownloadConfig) { + this.parallelDownloadConfig = parallelDownloadConfig; + return this; + } + + /** + * Creates a DownloadJob object. + * + * @return {@link DownloadJob} + */ + public DownloadJob build() { + checkNotNull(downloadResults); + checkNotNull(parallelDownloadConfig); + return new DownloadJob(downloadResults, parallelDownloadConfig); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadResult.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadResult.java new file mode 100644 index 000000000000..7084e7553aec --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadResult.java @@ -0,0 +1,218 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.storage.BlobInfo; +import com.google.common.base.MoreObjects; +import java.nio.file.Path; +import java.util.Comparator; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Result for a single download performed by Transfer Manager. + * + * @see Builder + */ +public final class DownloadResult { + static final Comparator COMPARATOR = + Comparator.comparingInt(dr -> dr.getStatus().ordinal()); + + @NonNull private final BlobInfo input; + @MonotonicNonNull private final Path outputDestination; + @NonNull private final TransferStatus status; + @MonotonicNonNull private final Exception exception; + + private DownloadResult( + @NonNull BlobInfo input, + Path outputDestination, + @NonNull TransferStatus status, + Exception exception) { + this.input = input; + this.outputDestination = outputDestination; + this.status = status; + this.exception = exception; + } + + /** + * The {@link BlobInfo} for the object requested for download. + * + * @see Builder#setInput(BlobInfo) + */ + public @NonNull BlobInfo getInput() { + return input; + } + + /** + * The destination on the Filesystem the object has been written to. This field will only be + * populated if the Transfer was a {@link TransferStatus#SUCCESS SUCCESS}. + * + * @see Builder#setOutputDestination(Path) + */ + public @NonNull Path getOutputDestination() { + checkState( + status == TransferStatus.SUCCESS, + "getOutputDestination() is only valid when status is SUCCESS but status was %s", + status); + return outputDestination; + } + + /** + * The status of the download operation. + * + * @see TransferStatus + * @see Builder#setStatus(TransferStatus) + */ + public @NonNull TransferStatus getStatus() { + return status; + } + + /** + * The exception produced by a failed download operation. This field will only be populated if the + * Transfer was not {@link TransferStatus#SUCCESS success}ful or {@link TransferStatus#SKIPPED + * skipped} + * + * @see Builder#setException(Exception) + */ + public @NonNull Exception getException() { + checkState( + status == TransferStatus.FAILED_TO_FINISH || status == TransferStatus.FAILED_TO_START, + "getException() is only valid when an unexpected error has occurred but status was %s", + status); + return exception; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DownloadResult that = (DownloadResult) o; + return input.equals(that.input) + && outputDestination.equals(that.outputDestination) + && status == that.status + && exception.equals(that.exception); + } + + @Override + public int hashCode() { + return Objects.hash(input, outputDestination, status, exception); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("input", input) + .add("outputDestination", outputDestination) + .add("status", status) + .add("exception", exception) + .toString(); + } + + public static Builder newBuilder(@NonNull BlobInfo blobInfo, @NonNull TransferStatus status) { + return new Builder(blobInfo, status); + } + + /** + * Builds an instance of DownloadResult + * + * @see DownloadResult + */ + public static final class Builder { + + private @NonNull BlobInfo input; + private @MonotonicNonNull Path outputDestination; + private @NonNull TransferStatus status; + private @MonotonicNonNull Exception exception; + + private Builder(@NonNull BlobInfo input, @NonNull TransferStatus status) { + this.input = input; + this.status = status; + } + + /** + * Sets the {@link BlobInfo} for the object request for download. This field is required. + * + * @see DownloadResult#getInput() + * @return the instance of the Builder with the value for input modified. + */ + public Builder setInput(@NonNull BlobInfo input) { + this.input = input; + return this; + } + + /** + * Sets the location on the Filesystem the object has been written to. This field will only be + * populated if the Transfer was {@link TransferStatus#SUCCESS success}ful. + * + * @see DownloadResult#getOutputDestination() + * @return the instance of the Builder with the value for outputDestination modified. + */ + public Builder setOutputDestination(@NonNull Path outputDestination) { + this.outputDestination = outputDestination; + return this; + } + + /** + * Sets the status of the download.This field is required. + * + * @see TransferStatus + * @return the instance of the Builder with the value for status modified. + */ + public Builder setStatus(@NonNull TransferStatus status) { + this.status = status; + return this; + } + + /** + * Sets the Exception produced by a failed download operation. This field will only be populated + * if the Transfer was not {@link TransferStatus#SUCCESS success}ful or {@link + * TransferStatus#SKIPPED skipped} + * + * @see DownloadResult#getException() + * @return the instance of the Builder with the value for exception modified. + */ + public Builder setException(@NonNull Exception exception) { + this.exception = exception; + return this; + } + + /** + * Creates a DownloadResult object. + * + * @return {@link DownloadResult} + */ + public DownloadResult build() { + checkNotNull(input); + checkNotNull(status); + if (status == TransferStatus.SUCCESS) { + checkNotNull(outputDestination); + } else if (status == TransferStatus.FAILED_TO_START + || status == TransferStatus.FAILED_TO_FINISH) { + checkNotNull(exception); + } + return new DownloadResult(input, outputDestination, status, exception); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadSegment.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadSegment.java new file mode 100644 index 000000000000..68dcf678f4d2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/DownloadSegment.java @@ -0,0 +1,134 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.storage.BlobInfo; +import java.nio.file.Path; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class DownloadSegment { + private final BlobInfo input; + + private final Path outputDestination; + + private final TransferStatus status; + + private final Exception exception; + + private final Long generation; + + private DownloadSegment( + BlobInfo input, + Path outputDestination, + TransferStatus status, + Exception exception, + Long generation) { + this.input = input; + this.outputDestination = outputDestination; + this.status = status; + this.exception = exception; + this.generation = generation; + } + + public BlobInfo getInput() { + return input; + } + + public Path getOutputDestination() { + return outputDestination; + } + + public TransferStatus getStatus() { + return status; + } + + public Exception getException() { + return exception; + } + + public static Builder newBuilder(BlobInfo input, TransferStatus status) { + return new Builder(input, status); + } + + public Long getGeneration() { + return generation; + } + + public DownloadResult toResult() { + DownloadResult.Builder b = DownloadResult.newBuilder(input, status); + if (exception != null) { + b.setException(exception); + } + if (outputDestination != null) { + b.setOutputDestination(outputDestination); + } + return b.build(); + } + + @NonNull + public static DownloadResult reduce( + @NonNull DownloadResult result, @NonNull DownloadSegment segment) { + if (TransferStatus.COMPARE_NULL_SAFE.compare(result.getStatus(), segment.getStatus()) <= 0) { + return result; + } else { + return segment.toResult(); + } + } + + public static final class Builder { + + private BlobInfo input; + private Path outputDestination; + private TransferStatus status; + private Exception exception; + private Long generation; + + private Builder(BlobInfo input, TransferStatus status) { + this.input = input; + this.status = status; + } + + public Builder setInput(BlobInfo input) { + this.input = input; + return this; + } + + public Builder setOutputDestination(Path outputDestination) { + this.outputDestination = outputDestination; + return this; + } + + public Builder setStatus(TransferStatus status) { + this.status = status; + return this; + } + + public Builder setException(Exception exception) { + this.exception = exception; + return this; + } + + public Builder setGeneration(Long generation) { + this.generation = generation; + return this; + } + + public DownloadSegment build() { + return new DownloadSegment(input, outputDestination, status, exception, generation); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelCompositeUploadCallable.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelCompositeUploadCallable.java new file mode 100644 index 000000000000..d306c4a6cff9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelCompositeUploadCallable.java @@ -0,0 +1,94 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.transfermanager; + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.common.io.ByteStreams; +import java.nio.channels.FileChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +final class ParallelCompositeUploadCallable implements Callable { + private final Storage storage; + + private final BlobInfo originalBlob; + + private final Path sourceFile; + + private final ParallelUploadConfig parallelUploadConfig; + + private final Storage.BlobWriteOption[] opts; + + public ParallelCompositeUploadCallable( + Storage storage, + BlobInfo originalBlob, + Path sourceFile, + ParallelUploadConfig parallelUploadConfig, + BlobWriteOption[] opts) { + this.storage = storage; + this.originalBlob = originalBlob; + this.sourceFile = sourceFile; + this.parallelUploadConfig = parallelUploadConfig; + this.opts = opts; + } + + public UploadResult call() { + return uploadPCU(); + } + + private UploadResult uploadPCU() { + BlobWriteSession session = storage.blobWriteSession(originalBlob, opts); + try (WritableByteChannel writableByteChannel = session.open(); + FileChannel fc = FileChannel.open(sourceFile, StandardOpenOption.READ)) { + ByteStreams.copy(fc, writableByteChannel); + } catch (StorageException e) { + if (parallelUploadConfig.isSkipIfExists() && e.getCode() == 412) { + return UploadResult.newBuilder(originalBlob, TransferStatus.SKIPPED) + .setException(e) + .build(); + } else { + return UploadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + } catch (Exception e) { + return UploadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + try { + ApiFuture result = session.getResult(); + BlobInfo newBlob = result.get(10, TimeUnit.SECONDS); + return UploadResult.newBuilder(originalBlob, TransferStatus.SUCCESS) + .setUploadedBlob(newBlob) + .build(); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + return UploadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelDownloadConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelDownloadConfig.java new file mode 100644 index 000000000000..3e1c6e6fd11c --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelDownloadConfig.java @@ -0,0 +1,203 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Configuration for performing Parallel Downloads with {@link TransferManager}. + * + * @see Builder + */ +public final class ParallelDownloadConfig { + + @NonNull private final String stripPrefix; + @NonNull private final Path downloadDirectory; + @NonNull private final String bucketName; + @NonNull private final List optionsPerRequest; + + private ParallelDownloadConfig( + @NonNull String stripPrefix, + @NonNull Path downloadDirectory, + @NonNull String bucketName, + @NonNull List optionsPerRequest) { + this.stripPrefix = stripPrefix; + this.downloadDirectory = downloadDirectory; + this.bucketName = bucketName; + this.optionsPerRequest = optionsPerRequest; + } + + /** + * A common prefix removed from an object's name before being written to the filesystem. + * + * @see Builder#setStripPrefix(String) + */ + public @NonNull String getStripPrefix() { + return stripPrefix; + } + + /** + * The base directory in which all objects will be placed when downloaded. + * + * @see Builder#setDownloadDirectory(Path) + */ + public @NonNull Path getDownloadDirectory() { + return downloadDirectory; + } + + /** + * The bucket objects are being downloaded from. + * + * @see Builder#setBucketName(String) + */ + public @NonNull String getBucketName() { + return bucketName; + } + + /** + * A list of common BlobSourceOptions that are used for each download request. Note this list of + * options will be applied to every single request. + * + * @see Builder#setOptionsPerRequest(List) + */ + public @NonNull List getOptionsPerRequest() { + return optionsPerRequest; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ParallelDownloadConfig)) { + return false; + } + ParallelDownloadConfig that = (ParallelDownloadConfig) o; + return stripPrefix.equals(that.stripPrefix) + && downloadDirectory.equals(that.downloadDirectory) + && bucketName.equals(that.bucketName) + && optionsPerRequest.equals(that.optionsPerRequest); + } + + @Override + public int hashCode() { + return Objects.hash(stripPrefix, downloadDirectory, bucketName, optionsPerRequest); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("stripPrefix", stripPrefix) + .add("downloadDirectory", downloadDirectory) + .add("bucketName", bucketName) + .add("optionsPerRequest", optionsPerRequest) + .toString(); + } + + /** + * Builds an instance of ParallelDownloadConfig. + * + * @see ParallelDownloadConfig + */ + public static Builder newBuilder() { + return new Builder(); + } + + public static final class Builder { + + @NonNull private String stripPrefix; + @NonNull private Path downloadDirectory; + @NonNull private String bucketName; + @NonNull private List optionsPerRequest; + + private Builder() { + this.stripPrefix = ""; + this.downloadDirectory = Paths.get(""); + this.bucketName = ""; + this.optionsPerRequest = ImmutableList.of(); + } + + /** + * Sets the value for stripPrefix. This string will be removed from the beginning of all object + * names before they are written to the filesystem. + * + * @return the builder instance with the value for stripPrefix modified. + * @see ParallelDownloadConfig#getStripPrefix() + */ + public Builder setStripPrefix(String stripPrefix) { + this.stripPrefix = stripPrefix; + return this; + } + + /** + * Sets the base directory on the filesystem that all objects will be written to. + * + * @return the builder instance with the value for downloadDirectory modified. + * @see ParallelDownloadConfig#getDownloadDirectory() + */ + public Builder setDownloadDirectory(Path downloadDirectory) { + this.downloadDirectory = downloadDirectory.toAbsolutePath().normalize(); + return this; + } + + /** + * Sets the bucketName that Transfer Manager will download from. This field is required. + * + * @return the builder instance with the value for bucketName modified. + * @see ParallelDownloadConfig#getBucketName() + */ + public Builder setBucketName(String bucketName) { + this.bucketName = bucketName; + return this; + } + + /** + * Sets the BlobSourceOptions that will be applied to each download request. Note these options + * will be applied to every single download request. + * + * @return the builder instance with the value for OptionsPerRequest modified. + * @see ParallelDownloadConfig#getOptionsPerRequest() + */ + public Builder setOptionsPerRequest(List optionsPerRequest) { + this.optionsPerRequest = ImmutableList.copyOf(optionsPerRequest); + return this; + } + + /** + * Creates a ParallelDownloadConfig object. + * + * @return {@link ParallelDownloadConfig} + */ + public ParallelDownloadConfig build() { + checkNotNull(bucketName); + checkNotNull(stripPrefix); + checkNotNull(downloadDirectory); + checkNotNull(optionsPerRequest); + return new ParallelDownloadConfig( + stripPrefix, downloadDirectory, bucketName, optionsPerRequest); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelUploadConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelUploadConfig.java new file mode 100644 index 000000000000..1497210f8624 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/ParallelUploadConfig.java @@ -0,0 +1,342 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Configuration for performing Parallel Uploads with {@link TransferManager}. + * + * @see Builder + */ +public final class ParallelUploadConfig { + + private final boolean skipIfExists; + @NonNull private final String bucketName; + @NonNull private final UploadBlobInfoFactory uploadBlobInfoFactory; + + @NonNull private final List writeOptsPerRequest; + + private ParallelUploadConfig( + boolean skipIfExists, + @NonNull String bucketName, + @NonNull UploadBlobInfoFactory uploadBlobInfoFactory, + @NonNull List writeOptsPerRequest) { + this.skipIfExists = skipIfExists; + this.bucketName = bucketName; + this.uploadBlobInfoFactory = uploadBlobInfoFactory; + this.writeOptsPerRequest = applySkipIfExists(skipIfExists, writeOptsPerRequest); + } + + /** + * If set Transfer Manager will skip uploading an object if it already exists, equivalent to + * providing {@link BlobWriteOption#doesNotExist()} in {@link #getWriteOptsPerRequest()} + * + * @see Builder#setSkipIfExists(boolean) + */ + public boolean isSkipIfExists() { + return skipIfExists; + } + + /** + * A common prefix that will be applied to all object paths in the destination bucket + * + * @see Builder#setPrefix(String) + * @see Builder#setUploadBlobInfoFactory(UploadBlobInfoFactory) + * @see UploadBlobInfoFactory#prefixObjectNames(String) + */ + public @NonNull String getPrefix() { + if (uploadBlobInfoFactory instanceof PrefixObjectNames) { + PrefixObjectNames prefixObjectNames = (PrefixObjectNames) uploadBlobInfoFactory; + return prefixObjectNames.prefix; + } + return ""; + } + + /** + * The {@link UploadBlobInfoFactory} which will be used to produce a {@link BlobInfo}s based on a + * provided bucket name and file name. + * + * @see Builder#setUploadBlobInfoFactory(UploadBlobInfoFactory) + * @since 2.49.0 + */ + public @NonNull UploadBlobInfoFactory getUploadBlobInfoFactory() { + return uploadBlobInfoFactory; + } + + /** + * The bucket objects are being uploaded from + * + * @see Builder#setBucketName(String) + */ + public @NonNull String getBucketName() { + return bucketName; + } + + /** + * A list of common BlobWriteOptions, note these options will be applied to each upload request. + * + * @see Builder#setWriteOptsPerRequest(List) + */ + public @NonNull List getWriteOptsPerRequest() { + return writeOptsPerRequest; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ParallelUploadConfig that = (ParallelUploadConfig) o; + return skipIfExists == that.skipIfExists + && bucketName.equals(that.bucketName) + && uploadBlobInfoFactory.equals(that.uploadBlobInfoFactory) + && writeOptsPerRequest.equals(that.writeOptsPerRequest); + } + + @Override + public int hashCode() { + return Objects.hash(skipIfExists, bucketName, uploadBlobInfoFactory, writeOptsPerRequest); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("skipIfExists", skipIfExists) + .add("bucketName", bucketName) + .add("uploadBlobInfoFactory", uploadBlobInfoFactory) + .add("writeOptsPerRequest", writeOptsPerRequest) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + private static List applySkipIfExists( + boolean skipIfExists, List writeOptsPerRequest) { + if (skipIfExists) { + return ImmutableList.copyOf( + BlobWriteOption.dedupe(writeOptsPerRequest, BlobWriteOption.doesNotExist())); + } + return writeOptsPerRequest; + } + + /** + * Builds an instance of ParallelUploadConfig. + * + * @see ParallelUploadConfig + */ + public static final class Builder { + + private boolean skipIfExists; + private @NonNull String bucketName; + private @NonNull UploadBlobInfoFactory uploadBlobInfoFactory; + private @NonNull List writeOptsPerRequest; + + private Builder() { + this.bucketName = ""; + this.uploadBlobInfoFactory = UploadBlobInfoFactory.defaultInstance(); + this.writeOptsPerRequest = ImmutableList.of(); + } + + /** + * Sets the parameter for skipIfExists. When set to true Transfer Manager will skip uploading an + * object if it already exists. + * + * @return the builder instance with the value for skipIfExists modified. + * @see ParallelUploadConfig#isSkipIfExists() + */ + public Builder setSkipIfExists(boolean skipIfExists) { + this.skipIfExists = skipIfExists; + return this; + } + + /** + * Sets a common prefix that will be applied to all object paths in the destination bucket. + * + *

NOTE: this method and {@link #setUploadBlobInfoFactory(UploadBlobInfoFactory)} are + * mutually exclusive, and last invocation "wins". + * + * @return the builder instance with the value for prefix modified. + * @see ParallelUploadConfig#getPrefix() + * @see ParallelUploadConfig.Builder#setUploadBlobInfoFactory(UploadBlobInfoFactory) + * @see UploadBlobInfoFactory#prefixObjectNames(String) + */ + public Builder setPrefix(@NonNull String prefix) { + this.uploadBlobInfoFactory = UploadBlobInfoFactory.prefixObjectNames(prefix); + return this; + } + + /** + * Sets a {@link UploadBlobInfoFactory} which can be used to produce a custom BlobInfo based on + * a provided bucket name and file name. + * + *

The bucket name in the returned BlobInfo MUST be equal to the value provided to {@link + * #setBucketName(String)}, if not that upload will fail with a {@link + * TransferStatus#FAILED_TO_START} and a {@link BucketNameMismatchException}. + * + *

NOTE: this method and {@link #setPrefix(String)} are mutually exclusive, and last + * invocation "wins". + * + * @return the builder instance with the value for uploadBlobInfoFactory modified. + * @see ParallelUploadConfig#getPrefix() + * @see ParallelUploadConfig#getUploadBlobInfoFactory() + * @since 2.49.0 + */ + public Builder setUploadBlobInfoFactory(@NonNull UploadBlobInfoFactory uploadBlobInfoFactory) { + this.uploadBlobInfoFactory = uploadBlobInfoFactory; + return this; + } + + /** + * Sets the bucketName that Transfer Manager will upload to. This field is required. + * + * @return the builder instance with the value for bucketName modified. + * @see ParallelUploadConfig#getBucketName() + */ + public Builder setBucketName(@NonNull String bucketName) { + this.bucketName = bucketName; + return this; + } + + /** + * Sets the BlobWriteOptions that will be applied to each upload request. Note these options + * will be applied to every single upload request. + * + * @return the builder instance with the value for WriteOptsPerRequest modified. + * @see ParallelUploadConfig#getWriteOptsPerRequest() + */ + public Builder setWriteOptsPerRequest(@NonNull List writeOptsPerRequest) { + this.writeOptsPerRequest = writeOptsPerRequest; + return this; + } + + /** + * Creates a ParallelUploadConfig object. + * + * @return {@link ParallelUploadConfig} + */ + public ParallelUploadConfig build() { + checkNotNull(bucketName); + checkNotNull(uploadBlobInfoFactory); + checkNotNull(writeOptsPerRequest); + return new ParallelUploadConfig( + skipIfExists, bucketName, uploadBlobInfoFactory, writeOptsPerRequest); + } + } + + public interface UploadBlobInfoFactory { + + /** + * Method to produce a {@link BlobInfo} to be used for the upload to Cloud Storage. + * + *

The bucket name in the returned BlobInfo MUST be equal to the value provided to the {@link + * ParallelUploadConfig.Builder#setBucketName(String)}, if not that upload will fail with a + * {@link TransferStatus#FAILED_TO_START} and a {@link BucketNameMismatchException}. + * + * @param bucketName The name of the bucket to be uploaded to. The value provided here will be + * the value from {@link ParallelUploadConfig#getBucketName()}. + * @param fileName The String representation of the absolute path of the file to be uploaded + * @return The instance of {@link BlobInfo} that should be used to upload the file to Cloud + * Storage. + */ + BlobInfo apply(String bucketName, String fileName); + + /** + * Adapter factory to provide the same semantics as if using {@link Builder#setPrefix(String)} + */ + static UploadBlobInfoFactory prefixObjectNames(String prefix) { + return new PrefixObjectNames(prefix); + } + + /** The default instance which applies not modification to the provided {@code fileName} */ + static UploadBlobInfoFactory defaultInstance() { + return DefaultUploadBlobInfoFactory.INSTANCE; + } + + /** + * Convenience method to "lift" a {@link Function} that transforms the file name to an {@link + * UploadBlobInfoFactory} + */ + static UploadBlobInfoFactory transformFileName(Function fileNameTransformer) { + return (b, f) -> BlobInfo.newBuilder(b, fileNameTransformer.apply(f)).build(); + } + } + + private static final class DefaultUploadBlobInfoFactory implements UploadBlobInfoFactory { + private static final DefaultUploadBlobInfoFactory INSTANCE = new DefaultUploadBlobInfoFactory(); + + private DefaultUploadBlobInfoFactory() {} + + @Override + public BlobInfo apply(String bucketName, String fileName) { + return BlobInfo.newBuilder(bucketName, fileName).build(); + } + } + + private static final class PrefixObjectNames implements UploadBlobInfoFactory { + private final String prefix; + + private PrefixObjectNames(String prefix) { + this.prefix = prefix; + } + + @Override + public BlobInfo apply(String bucketName, String fileName) { + String separator = ""; + if (!fileName.startsWith("/")) { + separator = "/"; + } + return BlobInfo.newBuilder(bucketName, prefix + separator + fileName).build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PrefixObjectNames)) { + return false; + } + PrefixObjectNames that = (PrefixObjectNames) o; + return Objects.equals(prefix, that.prefix); + } + + @Override + public int hashCode() { + return Objects.hashCode(prefix); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("prefix", prefix).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/PathTraversalBlockedException.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/PathTraversalBlockedException.java new file mode 100644 index 000000000000..787142a7e82a --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/PathTraversalBlockedException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import java.nio.file.Path; +import java.util.Locale; + +/** + * Exception thrown when a download is blocked because the object name would result in a path + * traversal outside the target directory. + */ +public final class PathTraversalBlockedException extends RuntimeException { + + public PathTraversalBlockedException(String objectName, Path targetDirectory) { + super( + String.format( + Locale.US, + "Download of object '%s' was blocked because it would escape the target directory" + + " '%s'.", + objectName, + targetDirectory)); + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/Qos.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/Qos.java new file mode 100644 index 000000000000..492cf3d1f2dd --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/Qos.java @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +interface Qos { + + boolean divideAndConquer(long objectSize); + + boolean parallelCompositeUpload(long objectSize); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManager.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManager.java new file mode 100644 index 000000000000..6a9183cd6d40 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManager.java @@ -0,0 +1,89 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.storage.BlobInfo; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * An interface for a Transfer Manager. + * + *

Transfer Manager handles Parallel Uploads and Parallel Downloads. + */ +public interface TransferManager extends AutoCloseable { + + /** + * Uploads a list of files in parallel. This operation will not block the invoking thread, + * awaiting results should be done on the returned UploadJob. + * + *

Accepts a {@link ParallelUploadConfig} which defines the constraints of parallel uploads or + * predefined defaults. + * + *

Example of creating a parallel upload with Transfer Manager. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * Path filePath = Paths.get("/path/to/my/file.txt");
+   * Path anotherFilePath = Paths.get("/path/to/another/file.txt");
+   * List files = List.of(filePath, anotherFilePath);
+   *
+   * ParallelUploadConfig parallelUploadConfig =
+   *           ParallelUploadConfig.newBuilder()
+   *               .setBucketName(bucketName)
+   *               .build();
+   *
+   * UploadJob uploadedFiles = transferManager.uploadFiles(files, config);
+   *
+   * }
+ * + * @return an {@link UploadJob} + */ + @NonNull UploadJob uploadFiles(List files, ParallelUploadConfig config) throws IOException; + + /** + * Downloads a list of blobs in parallel. This operation will not block the invoking thread, + * awaiting results should be done on the returned DownloadJob. + * + *

Accepts a {@link ParallelDownloadConfig} which defines the constraints of parallel downloads + * or predefined defaults. + * + *

Example of creating a parallel download with Transfer Manager. + * + *

{@code
+   * String bucketName = "my-unique-bucket";
+   * String blobName = "my-blob-name";
+   * BlobId blobId = BlobId.of(bucketName, blobName);
+   * BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
+   * Path baseDir = Paths.get("/path/to/directory/");
+   *
+   * ParallelDownloadConfig parallelDownloadConfig =
+   *           ParallelDownloadConfig.newBuilder()
+   *               .setBucketName(bucketName)
+   *               .setDownloadDirectory(baseDir)
+   *               .build();
+   *
+   * DownloadJob downloadedBlobs = transferManager.downloadBlobs(files, config);
+   *
+   * }
+ * + * @return a {@link DownloadJob} + */ + @NonNull DownloadJob downloadBlobs(List blobs, ParallelDownloadConfig config); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerConfig.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerConfig.java new file mode 100644 index 000000000000..fa753aee7e55 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerConfig.java @@ -0,0 +1,298 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.StorageOptions; +import com.google.common.base.MoreObjects; +import java.util.Objects; + +/** + * Configuration for an instance of {@link TransferManager} + * + * @see Builder + */ +public final class TransferManagerConfig { + private final int maxWorkers; + private final int perWorkerBufferSize; + private final boolean allowDivideAndConquerDownload; + private final boolean allowParallelCompositeUpload; + + private final PartNamingStrategy partNamingStrategy; + + private final StorageOptions storageOptions; + + TransferManagerConfig( + int maxWorkers, + int perWorkerBufferSize, + boolean allowDivideAndConquerDownload, + boolean allowParallelCompositeUpload, + PartNamingStrategy partNamingStrategy, + StorageOptions storageOptions) { + this.maxWorkers = maxWorkers; + this.perWorkerBufferSize = perWorkerBufferSize; + this.allowDivideAndConquerDownload = allowDivideAndConquerDownload; + this.allowParallelCompositeUpload = allowParallelCompositeUpload; + this.partNamingStrategy = partNamingStrategy; + this.storageOptions = storageOptions; + } + + /** + * Maximum amount of workers to be allocated to perform work in Transfer Manager + * + * @see Builder#setMaxWorkers(int) + */ + public int getMaxWorkers() { + return maxWorkers; + } + + /** + * Buffer size allowed to each worker + * + * @see Builder#setPerWorkerBufferSize(int) + */ + public int getPerWorkerBufferSize() { + return perWorkerBufferSize; + } + + /** + * Whether to allow Transfer Manager to perform chunked Uploads/Downloads if it determines + * chunking will be beneficial + * + * @see Builder#setAllowDivideAndConquerDownload(boolean) + */ + public boolean isAllowDivideAndConquerDownload() { + return allowDivideAndConquerDownload; + } + + /** + * Whether to allow Transfer Manager to perform Parallel Composite Uploads if it determines + * chunking will be beneficial + * + * @see Builder#setAllowParallelCompositeUpload(boolean) + *

Note: Performing parallel composite uploads costs more money. Class A operations + * are performed to create each part and to perform each compose. If a storage tier other than + * STANDARD + * is used, early deletion fees apply to deletion of the parts. + *

Please see the Parallel composite + * uploads documentation for a more in depth explanation of the limitations of Parallel + * composite uploads. + */ + public boolean isAllowParallelCompositeUpload() { + return allowParallelCompositeUpload; + } + + /** + * Storage options that Transfer Manager will use to interact with Google Cloud Storage + * + * @see Builder#setStorageOptions(StorageOptions) + */ + public StorageOptions getStorageOptions() { + return storageOptions; + } + + /** + * Part Naming Strategy to be used during Parallel Composite Uploads + * + * @see Builder#setParallelCompositeUploadPartNamingStrategy(PartNamingStrategy) + */ + public PartNamingStrategy getParallelCompositeUploadPartNamingStrategy() { + return partNamingStrategy; + } + + /** The service object for {@link TransferManager} */ + public TransferManager getService() { + return new TransferManagerImpl(this, DefaultQos.of(this)); + } + + public Builder toBuilder() { + return new Builder() + .setAllowDivideAndConquerDownload(allowDivideAndConquerDownload) + .setAllowParallelCompositeUpload(allowParallelCompositeUpload) + .setMaxWorkers(maxWorkers) + .setPerWorkerBufferSize(perWorkerBufferSize) + .setStorageOptions(storageOptions); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TransferManagerConfig)) { + return false; + } + TransferManagerConfig that = (TransferManagerConfig) o; + return maxWorkers == that.maxWorkers + && perWorkerBufferSize == that.perWorkerBufferSize + && allowDivideAndConquerDownload == that.allowDivideAndConquerDownload + && allowParallelCompositeUpload == that.allowParallelCompositeUpload + && Objects.equals(storageOptions, that.storageOptions); + } + + @Override + public int hashCode() { + return Objects.hash( + maxWorkers, + perWorkerBufferSize, + allowDivideAndConquerDownload, + allowParallelCompositeUpload, + storageOptions); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("maxWorkers", maxWorkers) + .add("perWorkerBufferSize", perWorkerBufferSize) + .add("allowDivideAndConquerDownload", allowDivideAndConquerDownload) + .add("allowParallelCompositeUpload", allowParallelCompositeUpload) + .add("storageOptions", storageOptions) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builds an instance of TransferManagerConfig + * + * @see TransferManagerConfig + */ + public static class Builder { + + private int maxWorkers; + private int perWorkerBufferSize; + private boolean allowDivideAndConquerDownload; + private boolean allowParallelCompositeUpload; + + private StorageOptions storageOptions; + private PartNamingStrategy partNamingStrategy; + + private Builder() { + this.perWorkerBufferSize = 16 * 1024 * 1024; + this.maxWorkers = 2 * Runtime.getRuntime().availableProcessors(); + this.allowDivideAndConquerDownload = false; + this.allowParallelCompositeUpload = false; + this.storageOptions = StorageOptions.getDefaultInstance(); + this.partNamingStrategy = PartNamingStrategy.noPrefix(); + } + + /** + * Maximum amount of workers to be allocated to perform work in Transfer Manager + * + *

Default Value: {@code 2 * }{@link Runtime#getRuntime()}{@code .}{@link + * Runtime#availableProcessors() availableProcessors()} + * + * @return the instance of Builder with the value for maxWorkers modified. + * @see TransferManagerConfig#getMaxWorkers() + */ + public Builder setMaxWorkers(int maxWorkers) { + this.maxWorkers = maxWorkers; + return this; + } + + /** + * Buffer size allowed to each worker + * + *

Default Value: 16MiB + * + * @return the instance of Builder with the value for maxWorkers modified. + * @see TransferManagerConfig#getPerWorkerBufferSize() + */ + public Builder setPerWorkerBufferSize(int perWorkerBufferSize) { + this.perWorkerBufferSize = perWorkerBufferSize; + return this; + } + + /** + * Whether to allow Transfer Manager to perform chunked Uploads/Downloads if it determines + * chunking will be beneficial + * + *

Default Value: false + * + * @return the instance of Builder with the value for allowDivideAndConquerDownload modified. + * @see TransferManagerConfig#isAllowDivideAndConquerDownload() + */ + public Builder setAllowDivideAndConquerDownload(boolean allowDivideAndConquerDownload) { + this.allowDivideAndConquerDownload = allowDivideAndConquerDownload; + return this; + } + + /** + * Whether to allow Transfer Manager to perform Parallel Composite Uploads if it determines + * chunking will be beneficial + * + *

Default Value: false + * + * @return the instance of Builder with the value for allowDivideAndConquerDownload modified. + * @see TransferManagerConfig#isAllowDivideAndConquerDownload() + */ + public Builder setAllowParallelCompositeUpload(boolean allowParallelCompositeUpload) { + this.allowParallelCompositeUpload = allowParallelCompositeUpload; + return this; + } + + /** + * Storage options that Transfer Manager will use to interact with Google Cloud Storage + * + *

Default Value: {@link StorageOptions#getDefaultInstance()} + * + * @return the instance of Builder with the value for storageOptions modified. + * @see TransferManagerConfig#getStorageOptions() + */ + public Builder setStorageOptions(StorageOptions storageOptions) { + this.storageOptions = storageOptions; + return this; + } + + /** + * Part Naming Strategy that Transfer Manager will use during Parallel Composite Upload + * + *

Default Value: {@link PartNamingStrategy#noPrefix()} + * + * @return the instance of Builder with the value for PartNamingStrategy modified. + * @see TransferManagerConfig#getParallelCompositeUploadPartNamingStrategy() + */ + public Builder setParallelCompositeUploadPartNamingStrategy( + PartNamingStrategy partNamingStrategy) { + checkNotNull(partNamingStrategy); + this.partNamingStrategy = partNamingStrategy; + return this; + } + + /** + * Creates a TransferManagerConfig object. + * + * @return {@link TransferManagerConfig} + */ + public TransferManagerConfig build() { + return new TransferManagerConfig( + maxWorkers, + perWorkerBufferSize, + allowDivideAndConquerDownload, + allowParallelCompositeUpload, + partNamingStrategy, + storageOptions); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerImpl.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerImpl.java new file mode 100644 index 000000000000..aa1cfadf1ec0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerImpl.java @@ -0,0 +1,307 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.ListenableFutureToApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.BufferAllocationStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Deque; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.function.BinaryOperator; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class TransferManagerImpl implements TransferManager { + + private static final String USER_AGENT_ENTRY = "gcloud-tm/"; + private static final String LIBRARY_VERSION = StorageOptions.version(); + private final TransferManagerConfig transferManagerConfig; + private final ListeningExecutorService executor; + private final Qos qos; + private final Storage storage; + + private final Deque pcuQueue; + // define a unique object which we can use to synchronize modification of pcuPoller + private final Object pcuPollerSync = new Object(); + private volatile ApiFuture pcuPoller; + + TransferManagerImpl(TransferManagerConfig transferManagerConfig, Qos qos) { + this.transferManagerConfig = transferManagerConfig; + this.executor = + MoreExecutors.listeningDecorator( + Executors.newFixedThreadPool(transferManagerConfig.getMaxWorkers())); + this.qos = qos; + StorageOptions storageOptions = transferManagerConfig.getStorageOptions(); + String userAgent = storageOptions.getUserAgent(); + if (userAgent == null || !userAgent.contains(USER_AGENT_ENTRY)) { + storageOptions = + storageOptions.toBuilder() + .setHeaderProvider( + FixedHeaderProvider.create( + ImmutableMap.of("User-Agent", USER_AGENT_ENTRY + LIBRARY_VERSION))) + .build(); + } + // Create the blobWriteSessionConfig for ParallelCompositeUpload + if (transferManagerConfig.isAllowParallelCompositeUpload()) { + ParallelCompositeUploadBlobWriteSessionConfig pcuConfig = + BlobWriteSessionConfigs.parallelCompositeUpload() + .withExecutorSupplier(ExecutorSupplier.useExecutor(executor)) + .withBufferAllocationStrategy( + BufferAllocationStrategy.fixedPool( + transferManagerConfig.getMaxWorkers(), + transferManagerConfig.getPerWorkerBufferSize())) + .withPartNamingStrategy( + transferManagerConfig.getParallelCompositeUploadPartNamingStrategy()); + storageOptions = storageOptions.toBuilder().setBlobWriteSessionConfig(pcuConfig).build(); + } + this.pcuQueue = new ConcurrentLinkedDeque<>(); + this.storage = storageOptions.getService(); + } + + @Override + public void close() throws Exception { + // We only want to shutdown the executor service not the provided storage instance + executor.shutdownNow(); + executor.awaitTermination(5, TimeUnit.MINUTES); + } + + @Override + public @NonNull UploadJob uploadFiles(List files, ParallelUploadConfig config) + throws IOException { + Storage.BlobWriteOption[] opts = + config.getWriteOptsPerRequest().toArray(new BlobWriteOption[0]); + List> uploadTasks = new ArrayList<>(); + for (Path file : files) { + if (Files.isDirectory(file)) throw new IllegalStateException("Directories are not supported"); + String bucketName = config.getBucketName(); + BlobInfo blobInfo = + config.getUploadBlobInfoFactory().apply(bucketName, file.toAbsolutePath().toString()); + if (!blobInfo.getBucket().equals(bucketName)) { + uploadTasks.add( + ApiFutures.immediateFuture( + UploadResult.newBuilder(blobInfo, TransferStatus.FAILED_TO_START) + .setException(new BucketNameMismatchException(blobInfo.getBucket(), bucketName)) + .build())); + continue; + } + if (transferManagerConfig.isAllowParallelCompositeUpload() + && qos.parallelCompositeUpload(Files.size(file))) { + ParallelCompositeUploadCallable callable = + new ParallelCompositeUploadCallable(storage, blobInfo, file, config, opts); + SettableApiFuture resultFuture = SettableApiFuture.create(); + pcuQueue.add(new PendingPcuTask(callable, resultFuture)); + uploadTasks.add(resultFuture); + schedulePcuPoller(); + } else { + UploadCallable callable = + new UploadCallable(transferManagerConfig, storage, blobInfo, file, config, opts); + uploadTasks.add(convert(executor.submit(callable))); + } + } + return UploadJob.newBuilder() + .setParallelUploadConfig(config) + .setUploadResults(ImmutableList.copyOf(uploadTasks)) + .build(); + } + + @Override + public @NonNull DownloadJob downloadBlobs(List blobs, ParallelDownloadConfig config) { + Storage.BlobSourceOption[] opts = + config.getOptionsPerRequest().toArray(new Storage.BlobSourceOption[0]); + List> downloadTasks = new ArrayList<>(); + for (BlobInfo blob : blobs) { + Path destPath = TransferManagerUtils.createAndValidateDestPath(config, blob); + if (destPath == null) { + DownloadResult skipped = + DownloadResult.newBuilder(blob, TransferStatus.FAILED_TO_START) + .setException( + new PathTraversalBlockedException( + blob.getName(), config.getDownloadDirectory())) + .build(); + downloadTasks.add(ApiFutures.immediateFuture(skipped)); + continue; + } + if (transferManagerConfig.isAllowDivideAndConquerDownload()) { + BlobInfo validatedBlob = retrieveSizeAndGeneration(storage, blob, config.getBucketName()); + if (validatedBlob != null && qos.divideAndConquer(validatedBlob.getSize())) { + DownloadResult optimisticResult = + DownloadResult.newBuilder(validatedBlob, TransferStatus.SUCCESS) + .setOutputDestination(destPath) + .build(); + + List> downloadSegmentTasks = + computeRanges(validatedBlob.getSize(), transferManagerConfig.getPerWorkerBufferSize()) + .stream() + .map( + r -> + new ChunkedDownloadCallable( + storage, validatedBlob, opts, destPath, r.begin, r.end)) + .map(executor::submit) + .map(TransferManagerImpl::convert) + .collect(ImmutableList.toImmutableList()); + + downloadTasks.add( + ApiFutures.transform( + ApiFutures.allAsList(downloadSegmentTasks), + segments -> + segments.stream() + .reduce( + optimisticResult, + DownloadSegment::reduce, + BinaryOperator.minBy(DownloadResult.COMPARATOR)), + MoreExecutors.directExecutor())); + continue; + } + } + DirectDownloadCallable callable = + new DirectDownloadCallable(storage, blob, config, opts, destPath); + downloadTasks.add(convert(executor.submit(callable))); + } + + return DownloadJob.newBuilder() + .setDownloadResults(downloadTasks) + .setParallelDownloadConfig(config) + .build(); + } + + private void schedulePcuPoller() { + if (pcuPoller == null) { + synchronized (pcuPollerSync) { + if (pcuPoller == null) { + pcuPoller = convert(executor.submit(new PcuPoller())); + } + } + } + } + + private void deschedulePcuPoller() { + if (pcuPoller != null) { + synchronized (pcuPollerSync) { + if (pcuPoller != null) { + pcuPoller = null; + } + } + } + } + + private static ApiFuture convert(ListenableFuture lf) { + return new ListenableFutureToApiFuture<>(lf); + } + + private static BlobInfo retrieveSizeAndGeneration( + Storage storage, BlobInfo blobInfo, String bucketName) { + if (blobInfo.getGeneration() == null) { + return storage.get(BlobId.of(bucketName, blobInfo.getName())); + } else if (blobInfo.getSize() == null) { + return storage.get(BlobId.of(bucketName, blobInfo.getName(), blobInfo.getGeneration())); + } + return blobInfo; + } + + private static ImmutableList computeRanges(long end, long segmentSize) { + ImmutableList.Builder b = ImmutableList.builder(); + + if (end <= segmentSize) { + b.add(Range.of(0, end)); + } else { + for (long i = 0; i < end; i += segmentSize) { + b.add(Range.of(i, Math.min(i + segmentSize, end))); + } + } + return b.build(); + } + + private static final class Range { + private final long begin; + private final long end; + + private Range(long begin, long end) { + this.begin = begin; + this.end = end; + } + + public static Range of(long begin, long end) { + return new Range(begin, end); + } + } + + /** + * When performing a Parallel composite upload, the thread pool we perform work on is shared as + * the PCU worker pool. Because of this, if we submit our work to the executor service and take + * all the threads waiting for PCU uploads to complete, the PCU work doesn't have any threads + * available to itself. + * + *

This class represents a single worker that will be submitted to the executor service and + * will poll a queue to process a single PCU at a time, leaving any other threads free for PCU + * work. + */ + private final class PcuPoller implements Runnable { + + @Override + public void run() { + do { + PendingPcuTask poll = pcuQueue.poll(); + if (poll == null) { + deschedulePcuPoller(); + return; + } + + try { + UploadResult result = poll.callable.call(); + poll.resultFuture.set(result); + } catch (Throwable e) { + poll.resultFuture.setException(e); + throw e; + } + + } while (true); + } + } + + private static final class PendingPcuTask { + private final ParallelCompositeUploadCallable callable; + private final SettableApiFuture resultFuture; + + private PendingPcuTask( + ParallelCompositeUploadCallable callable, SettableApiFuture resultFuture) { + this.callable = callable; + this.resultFuture = resultFuture; + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerUtils.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerUtils.java new file mode 100644 index 000000000000..2742ffbd6e32 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferManagerUtils.java @@ -0,0 +1,54 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.StorageException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +final class TransferManagerUtils { + + private TransferManagerUtils() {} + + static Path createAndValidateDestPath(ParallelDownloadConfig config, BlobInfo originalBlob) { + Path targetDirectory = config.getDownloadDirectory(); + Path newPath = + targetDirectory + .resolve(originalBlob.getName().replaceFirst(config.getStripPrefix(), "")) + .normalize(); + + // Security check: Verify the resolved path is inside the target directory + // This catches ".." sequences that attempt to "escape" the folder. + if (!newPath.startsWith(targetDirectory)) { + return null; + } + // Check to make sure the parent directories exist + if (Files.exists(newPath.getParent())) { + return newPath; + } else { + // Make parent directories if they do not exist + try { + Files.createDirectories(newPath.getParent()); + return newPath; + } catch (IOException e) { + throw new StorageException(e); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferStatus.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferStatus.java new file mode 100644 index 000000000000..31290e935f11 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/TransferStatus.java @@ -0,0 +1,39 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import java.util.Comparator; + +/** The status of a Upload/Download operation performed by Transfer Manager. */ +public enum TransferStatus { + /** The transfer failed before bytes could be moved. */ + FAILED_TO_START, + /** The transfer failed after bytes could be moved. */ + FAILED_TO_FINISH, + /** + * The transfer failed because the object/file already exists and skipIfExists was set to true. + * + * @see ParallelUploadConfig.Builder#setSkipIfExists(boolean) + */ + SKIPPED, + /** The transfer was successful. */ + SUCCESS; + + /** A null value is considered to be greater than all values */ + static final Comparator COMPARE_NULL_SAFE = + Comparator.nullsLast(Comparator.comparingInt(TransferStatus::ordinal)); +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadCallable.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadCallable.java new file mode 100644 index 000000000000..c2b2ff21692d --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadCallable.java @@ -0,0 +1,82 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import java.nio.file.Path; +import java.util.concurrent.Callable; + +final class UploadCallable implements Callable { + private final TransferManagerConfig transferManagerConfig; + private final Storage storage; + + private final BlobInfo originalBlob; + + private final Path sourceFile; + + private final ParallelUploadConfig parallelUploadConfig; + + private final Storage.BlobWriteOption[] opts; + + public UploadCallable( + TransferManagerConfig transferManagerConfig, + Storage storage, + BlobInfo originalBlob, + Path sourceFile, + ParallelUploadConfig parallelUploadConfig, + BlobWriteOption[] opts) { + this.transferManagerConfig = transferManagerConfig; + this.storage = storage; + this.originalBlob = originalBlob; + this.sourceFile = sourceFile; + this.parallelUploadConfig = parallelUploadConfig; + this.opts = opts; + } + + public UploadResult call() throws Exception { + // TODO: Check for chunking + return uploadWithoutChunking(); + } + + private UploadResult uploadWithoutChunking() { + try { + Blob from = storage.createFrom(originalBlob, sourceFile, opts); + return UploadResult.newBuilder(originalBlob, TransferStatus.SUCCESS) + .setUploadedBlob(from.asBlobInfo()) + .build(); + } catch (StorageException e) { + if (parallelUploadConfig.isSkipIfExists() && e.getCode() == 412) { + return UploadResult.newBuilder(originalBlob, TransferStatus.SKIPPED) + .setException(e) + .build(); + } else { + // TODO: check for FAILED_TO_START conditions + return UploadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + } catch (Exception e) { + return UploadResult.newBuilder(originalBlob, TransferStatus.FAILED_TO_FINISH) + .setException(e) + .build(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadJob.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadJob.java new file mode 100644 index 000000000000..cdaf9cf54771 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadJob.java @@ -0,0 +1,147 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * A parallel upload job sent to Transfer Manager. + * + * @see Builder + */ +public final class UploadJob { + + @NonNull private final List> uploadResults; + + @NonNull private final ParallelUploadConfig parallelUploadConfig; + + private UploadJob( + @NonNull List> uploadResults, + @NonNull ParallelUploadConfig parallelUploadConfig) { + this.uploadResults = uploadResults; + this.parallelUploadConfig = parallelUploadConfig; + } + + /** + * The list of {@link UploadResult UploadResults} for each upload request Transfer Manager + * executed for this job. Note calling this method will block the invoking thread until all upload + * requests are complete. + * + * @see Builder#setUploadResults(List) + */ + public List getUploadResults() { + return ApiExceptions.callAndTranslateApiException(ApiFutures.allAsList(uploadResults)); + } + + /** + * The {@link ParallelUploadConfig} used for this UploadJob. + * + * @see Builder#setParallelUploadConfig(ParallelUploadConfig) + */ + public ParallelUploadConfig getParallelUploadConfig() { + return parallelUploadConfig; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof UploadJob)) { + return false; + } + UploadJob uploadJob = (UploadJob) o; + return uploadResults.equals(uploadJob.uploadResults) + && parallelUploadConfig.equals(uploadJob.parallelUploadConfig); + } + + @Override + public int hashCode() { + return Objects.hash(uploadResults, parallelUploadConfig); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("uploadResults", uploadResults) + .add("parallelUploadConfig", parallelUploadConfig) + .toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builds an instance of UploadJob + * + * @see UploadJob + */ + public static final class Builder { + + private @NonNull List> uploadResults; + + private @MonotonicNonNull ParallelUploadConfig parallelUploadConfig; + + private Builder() { + this.uploadResults = ImmutableList.of(); + } + + /** + * Sets the results for a UploadJob being performed by Transfer Manager. + * + * @return the instance of the Builder with UploadResults modified. + * @see UploadJob#getUploadResults() + */ + public Builder setUploadResults(@NonNull List> uploadResults) { + this.uploadResults = ImmutableList.copyOf(uploadResults); + return this; + } + + /** + * Sets the {@link ParallelUploadConfig} used for this UploadJob. + * + * @return the instance of the Builder with ParallelUploadConfig modified. + * @see UploadJob#getParallelUploadConfig() + */ + public Builder setParallelUploadConfig(@NonNull ParallelUploadConfig parallelUploadConfig) { + this.parallelUploadConfig = parallelUploadConfig; + return this; + } + + /** + * Creates a UploadJob object. + * + * @return {@link UploadJob} + */ + public UploadJob build() { + checkNotNull(uploadResults); + checkNotNull(parallelUploadConfig); + return new UploadJob(uploadResults, parallelUploadConfig); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadResult.java b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadResult.java new file mode 100644 index 000000000000..fc83830b789e --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/java/com/google/cloud/storage/transfermanager/UploadResult.java @@ -0,0 +1,214 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.storage.BlobInfo; +import com.google.common.base.MoreObjects; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * Result for a single upload performed by Transfer Manager. + * + * @see Builder + */ +public final class UploadResult { + + @NonNull private final BlobInfo input; + @NonNull private final TransferStatus status; + @MonotonicNonNull private final BlobInfo uploadedBlob; + @MonotonicNonNull private final Exception exception; + + private UploadResult( + @NonNull BlobInfo input, + @NonNull TransferStatus status, + BlobInfo uploadedBlob, + Exception exception) { + this.input = input; + this.status = status; + this.uploadedBlob = uploadedBlob; + this.exception = exception; + } + + /** + * The {@link BlobInfo} for the object requested for upload. + * + * @see Builder#setInput(BlobInfo) + */ + public @NonNull BlobInfo getInput() { + return input; + } + + /** + * The status of the upload operation. + * + * @see TransferStatus + * @see Builder#setStatus(TransferStatus) + */ + public @NonNull TransferStatus getStatus() { + return status; + } + + /** + * The {@link BlobInfo} for the Uploaded object. This field will only be populated if the Transfer + * was {@link TransferStatus#SUCCESS success}ful. + * + * @see Builder#setUploadedBlob(BlobInfo) + */ + public @NonNull BlobInfo getUploadedBlob() { + checkState( + status == TransferStatus.SUCCESS, + "getUploadedBlob() only valid when status is SUCCESS but status was %s", + status); + return uploadedBlob; + } + + /** + * The exception produced by a failed upload operation. This field will only be populated if the + * Transfer was not {@link TransferStatus#SUCCESS success}ful or {@link TransferStatus#SKIPPED + * skipped} + * + * @see Builder#setException(Exception) + */ + public @NonNull Exception getException() { + checkState( + status == TransferStatus.FAILED_TO_START || status == TransferStatus.FAILED_TO_FINISH, + "getException() is only valid when an unexpected error has occurred but status was %s", + status); + return exception; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + UploadResult that = (UploadResult) o; + return input.equals(that.input) + && status == that.status + && uploadedBlob.equals(that.uploadedBlob) + && exception.equals(that.exception); + } + + @Override + public int hashCode() { + return Objects.hash(input, status, uploadedBlob, exception); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("input", input) + .add("status", status) + .add("uploadedBlob", uploadedBlob) + .add("exception", exception) + .toString(); + } + + public static Builder newBuilder(@NonNull BlobInfo input, @NonNull TransferStatus status) { + return new Builder(input, status); + } + + /** + * Builds an instance of UploadResult + * + * @see UploadResult + */ + public static final class Builder { + + private @NonNull BlobInfo input; + private @NonNull TransferStatus status; + private @MonotonicNonNull BlobInfo uploadedBlob; + private @MonotonicNonNull Exception exception; + + private Builder(@NonNull BlobInfo input, @NonNull TransferStatus status) { + this.input = input; + this.status = status; + } + + /** + * Sets the {@link BlobInfo} for the object request for upload. This field is required. + * + * @return the Builder instance with the value for BlobInfo modified. + * @see UploadResult#getInput() + */ + public Builder setInput(@NonNull BlobInfo input) { + this.input = input; + return this; + } + + /** + * Sets the Status of the Upload request. This field is required. + * + * @return the Builder instance with the value for status modified. + * @see TransferStatus + * @see UploadResult#getStatus() + */ + public Builder setStatus(@NonNull TransferStatus status) { + this.status = status; + return this; + } + + /** + * Sets the {@link BlobInfo} for the uploaded object. This field will only be populated if the + * Transfer was a {@link TransferStatus#SUCCESS SUCCESS}. + * + * @return the Builder instance with the value for uploadedBlob modified. + */ + public Builder setUploadedBlob(@NonNull BlobInfo uploadedBlob) { + this.uploadedBlob = uploadedBlob; + return this; + } + + /** + * Sets the exception produced by a failed upload operation. This field will only be populated + * if the Transfer was not {@link TransferStatus#SUCCESS success}ful or {@link + * TransferStatus#SKIPPED skipped} + * + * @see UploadResult#getException() + * @return the Builder instance with the value for exception modified. + */ + public Builder setException(@NonNull Exception exception) { + this.exception = exception; + return this; + } + + /** + * Creates an UploadResult object. + * + * @return {@link UploadResult} + */ + public UploadResult build() { + checkNotNull(input); + checkNotNull(status); + if (status == TransferStatus.SUCCESS) { + checkNotNull(uploadedBlob); + } else if (status == TransferStatus.FAILED_TO_START + || status == TransferStatus.FAILED_TO_FINISH) { + checkNotNull(exception); + } + return new UploadResult(input, status, uploadedBlob, exception); + } + } +} diff --git a/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/proxy-config.json b/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/proxy-config.json new file mode 100644 index 000000000000..3fe6f8471fa2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/proxy-config.json @@ -0,0 +1,5 @@ +[ + { + "interfaces":["com.google.cloud.storage.spi.v1.StorageRpc"] + } +] diff --git a/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json b/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json new file mode 100644 index 000000000000..68a198fc7457 --- /dev/null +++ b/java-storage/google-cloud-storage/src/main/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json @@ -0,0 +1,4 @@ +[{ + "name":"com.google.cloud.storage.BlobInfo$ImmutableEmptyMap", + "methods":[{"name":"","parameterTypes":[] }]} +] \ No newline at end of file diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AclTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AclTest.java new file mode 100644 index 000000000000..a4d6253cd3ed --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AclTest.java @@ -0,0 +1,137 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.cloud.storage.Acl.Domain; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Entity.Type; +import com.google.cloud.storage.Acl.Group; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.RawEntity; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Conversions.Codec; +import org.junit.Test; + +public class AclTest { + + private static final Role ROLE = Role.OWNER; + private static final Entity ENTITY = User.ofAllAuthenticatedUsers(); + private static final String ETAG = "etag"; + private static final String ID = "id"; + private static final Acl ACL = Acl.newBuilder(ENTITY, ROLE).setEtag(ETAG).setId(ID).build(); + private static final Codec CODEC_ENTITY = Conversions.json().entity(); + private static final Codec CODEC_ACL_OBJECT = + Conversions.json().objectAcl(); + private static final Codec CODEC_ACL_BUCKET = + Conversions.json().bucketAcl(); + + static { + } + + @Test + public void testBuilder() { + assertEquals(ROLE, ACL.getRole()); + assertEquals(ENTITY, ACL.getEntity()); + assertEquals(ETAG, ACL.getEtag()); + assertEquals(ID, ACL.getId()); + } + + @Test + public void testToBuilder() { + assertEquals(ACL, ACL.toBuilder().build()); + Acl acl = + ACL.toBuilder() + .setEtag("otherEtag") + .setId("otherId") + .setRole(Role.READER) + .setEntity(User.ofAllUsers()) + .build(); + assertEquals(Role.READER, acl.getRole()); + assertEquals(User.ofAllUsers(), acl.getEntity()); + assertEquals("otherEtag", acl.getEtag()); + assertEquals("otherId", acl.getId()); + } + + @Test + public void testToAndFromPb() { + assertEquals(ACL, CODEC_ACL_BUCKET.decode(CODEC_ACL_BUCKET.encode(ACL))); + assertEquals(ACL, CODEC_ACL_OBJECT.decode(CODEC_ACL_OBJECT.encode(ACL))); + } + + @Test + public void testDomainEntity() { + Domain acl = new Domain("d1"); + assertEquals("d1", acl.getDomain()); + assertEquals(Type.DOMAIN, acl.getType()); + String pb = CODEC_ENTITY.encode(acl); + assertEquals(acl, CODEC_ENTITY.decode(pb)); + } + + @Test + public void testGroupEntity() { + Group acl = new Group("g1"); + assertEquals("g1", acl.getEmail()); + assertEquals(Type.GROUP, acl.getType()); + String pb = CODEC_ENTITY.encode(acl); + assertEquals(acl, CODEC_ENTITY.decode(pb)); + } + + @Test + public void testUserEntity() { + User acl = new User("u1"); + assertEquals("u1", acl.getEmail()); + assertEquals(Type.USER, acl.getType()); + String pb = CODEC_ENTITY.encode(acl); + assertEquals(acl, CODEC_ENTITY.decode(pb)); + } + + @Test + public void testProjectEntity() { + Project acl = new Project(ProjectRole.VIEWERS, "p1"); + assertEquals(ProjectRole.VIEWERS, acl.getProjectRole()); + assertEquals("p1", acl.getProjectId()); + assertEquals(Type.PROJECT, acl.getType()); + String pb = CODEC_ENTITY.encode(acl); + assertEquals(acl, CODEC_ENTITY.decode(pb)); + } + + @Test + public void testRawEntity() { + Entity acl = new RawEntity("bla"); + assertEquals("bla", acl.getValue()); + assertEquals(Type.UNKNOWN, acl.getType()); + String pb = CODEC_ENTITY.encode(acl); + assertEquals(acl, CODEC_ENTITY.decode(pb)); + } + + @Test + public void testOf() { + Acl acl = Acl.of(User.ofAllUsers(), Role.READER); + assertEquals(User.ofAllUsers(), acl.getEntity()); + assertEquals(Role.READER, acl.getRole()); + ObjectAccessControl objectPb = CODEC_ACL_OBJECT.encode(acl); + assertEquals(acl, CODEC_ACL_OBJECT.decode(objectPb)); + BucketAccessControl bucketPb = CODEC_ACL_BUCKET.encode(acl); + assertEquals(acl, CODEC_ACL_BUCKET.decode(bucketPb)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ApiFutureUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ApiFutureUtilsTest.java new file mode 100644 index 000000000000..5ed14f2cb890 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ApiFutureUtilsTest.java @@ -0,0 +1,64 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ApiFutureUtils.await; +import static com.google.cloud.storage.ApiFutureUtils.just; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.testing.junit4.StdErrCaptureRule; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; + +public final class ApiFutureUtilsTest { + + @Rule public final StdOutCaptureRule stdOut = new StdOutCaptureRule(); + @Rule public final StdErrCaptureRule stdErr = new StdErrCaptureRule(); + + @Test + public void quietAllAsList_returnsFirstFailureAndDoesNotLogLaterExceptions() throws Exception { + + // define a couple futures that will fail later + SettableApiFuture b = SettableApiFuture.create(); + SettableApiFuture c = SettableApiFuture.create(); + + ImmutableList> futures = ImmutableList.of(just("a"), b, c, just("d")); + + ApiFuture> all = ApiFutureUtils.quietAllAsList(futures); + + b.setException(new Kaboom()); + c.setException(new RuntimeException()); + + assertAll( + () -> assertThrows(Kaboom.class, () -> await(all)), + () -> assertWithMessage("stdout").that(stdOut.getCapturedOutputAsUtf8String()).isEmpty(), + () -> assertWithMessage("stderr").that(stdErr.getCapturedOutputAsUtf8String()).isEmpty()); + } + + private static final class Kaboom extends RuntimeException { + private Kaboom() { + super("Kaboom!!!"); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AsyncAppendingQueueTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AsyncAppendingQueueTest.java new file mode 100644 index 000000000000..d4c107ef6659 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AsyncAppendingQueueTest.java @@ -0,0 +1,276 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.cloud.storage.AsyncAppendingQueue.ShortCircuitException; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.NoSuchElementException; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public final class AsyncAppendingQueueTest { + + private static ExecutorService exec; + + @BeforeClass + public static void beforeClass() { + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("test-aaqt-%d").build(); + exec = Executors.newCachedThreadPool(threadFactory); + } + + @AfterClass + public static void afterClass() { + if (exec != null) { + exec.shutdownNow(); + } + } + + @Test + public void attemptingToAppendAfterClose_errors() { + Executor exec = MoreExecutors.newDirectExecutorService(); + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 3, AsyncAppendingQueueTest::agg); + + q.append(ApiFutures.immediateFuture("a")); + q.close(); + + IllegalStateException iae = + assertThrows(IllegalStateException.class, () -> q.append(ApiFutures.immediateFuture("b"))); + + assertThat(iae).hasMessageThat().contains("closed"); + } + + @Test + public void getResultPendingUntilClose() + throws ExecutionException, InterruptedException, TimeoutException { + Executor exec = MoreExecutors.newDirectExecutorService(); + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 3, AsyncAppendingQueueTest::agg); + + q.append(ApiFutures.immediateFuture("a")); + ApiFuture result = q.getResult(); + assertThrows(TimeoutException.class, () -> result.get(3, TimeUnit.MILLISECONDS)); + q.close(); + String s = result.get(10, TimeUnit.MILLISECONDS); + + assertThat(s).isEqualTo("a"); + } + + @Test + public void getResultAlwaysReturnsTheSameFuture() { + Executor exec = MoreExecutors.newDirectExecutorService(); + try (AsyncAppendingQueue q = + AsyncAppendingQueue.of(exec, 3, AsyncAppendingQueueTest::agg)) { + + q.append(ApiFutures.immediateFuture("a")); + ApiFuture result1 = q.getResult(); + ApiFuture result2 = q.getResult(); + + assertThat(result1).isSameInstanceAs(result2); + } + } + + @Test + public void closingWithoutAppending_throwNoSuchElementException() { + Executor exec = MoreExecutors.newDirectExecutorService(); + //noinspection resource + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 3, AsyncAppendingQueueTest::agg); + + ApiFuture result = q.getResult(); + NoSuchElementException nse1 = assertThrows(NoSuchElementException.class, q::close); + NoSuchElementException nse2 = + assertThrows( + NoSuchElementException.class, () -> ApiExceptions.callAndTranslateApiException(result)); + + assertThat(nse1).hasMessageThat().contains("Never appended to"); + assertThat(nse2).hasMessageThat().contains("Never appended to"); + } + + @SuppressWarnings("resource") + @Test + public void factoryMethodMustNotAccept_nullExecutor() { + assertThrows(NullPointerException.class, () -> AsyncAppendingQueue.of(null, 5, null)); + } + + @SuppressWarnings("resource") + @Test + public void factoryMethodMustNotAccept_maxElementsPerCompact_lte_1() { + Executor exec = MoreExecutors.newDirectExecutorService(); + assertThrows(IllegalArgumentException.class, () -> AsyncAppendingQueue.of(exec, 1, null)); + assertThrows(IllegalArgumentException.class, () -> AsyncAppendingQueue.of(exec, 0, null)); + assertThrows(IllegalArgumentException.class, () -> AsyncAppendingQueue.of(exec, -10, null)); + } + + @Test + public void happyPath() throws Exception { + int arity = 2; + ApiFuture result; + + try (AsyncAppendingQueue q = + AsyncAppendingQueue.of(exec, arity, AsyncAppendingQueueTest::agg)) { + + q.append(immediate("a")) + .append(immediate("b")) + .append(immediate("c")) + .append(immediate("d")) + .append(immediate("e")) + .append(immediate("f")) + .append(immediate("g")); + + result = q.getResult(); + } + assertThat(result).isNotNull(); + String s = result.get(); + assertThat(s).isEqualTo("abcdefg"); + } + + @Test + public void appendShouldShortCircuit() { + Executor exec = MoreExecutors.newDirectExecutorService(); + + AtomicInteger aggCounter = new AtomicInteger(0); + ApiFunction, String> agg = + ss -> { + aggCounter.getAndIncrement(); + return agg(ss); + }; + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 5, agg); + q.append(immediate("a")).append(immediate("b")).append(immediate("c")); + + q.append(ApiFutures.immediateFailedFuture(new Kaboom())); + + assertThrows(ShortCircuitException.class, () -> q.append(immediate("d"))); + assertThrows(CancellationException.class, q::await); + + q.close(); + + assertThat(aggCounter.get()).isEqualTo(0); + } + + @Test + public void resultFailureIfLastAppendFutureFails() { + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 2, AsyncAppendingQueueTest::agg); + SettableApiFuture d = SettableApiFuture.create(); + q.append(immediate("a")) + .append(immediate("b")) + .append(immediate("b")) + .append(immediate("c")) + .append(d); + + q.close(); + d.setException(new Kaboom()); + assertThrows(Kaboom.class, q::await); + } + + @Test + public void resultFailureIfFinalCompactFails() { + ApiFunction, String> agg = + ss -> { + if (ss.equals(ImmutableList.of("abc", "d"))) { + throw new Kaboom(); + } else { + return agg(ss); + } + }; + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 2, agg); + q.append(immediate("a")).append(immediate("b")).append(immediate("c")).append(immediate("d")); + + q.close(); + assertThrows(Kaboom.class, q::await); + } + + @Test + public void append_multipleFailingFuturesWillAlwaysReturnTheFirstFailure() { + + // define a couple futures that will fail later + SettableApiFuture b = SettableApiFuture.create(); + SettableApiFuture c = SettableApiFuture.create(); + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 6, AsyncAppendingQueueTest::agg); + q.append(immediate("a")).append(b).append(c).append(immediate("d")); + + b.setException(new Kaboom()); + c.setException(new RuntimeException()); + q.close(); + + assertThrows(Kaboom.class, q::await); + } + + @Test + public void shortCircuitOnlyHappensBeforeClose_affirmative() { + Executor exec = MoreExecutors.newDirectExecutorService(); + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 5, AsyncAppendingQueueTest::agg); + q.append(immediate("a")); + q.append(ApiFutures.immediateFailedFuture(new Kaboom())); + + assertThrows(ShortCircuitException.class, () -> q.append(immediate("d"))); + assertThrows(CancellationException.class, q::await); + + q.close(); + } + + @Test + public void shortCircuitOnlyHappensBeforeClose_negative() { + + AsyncAppendingQueue q = AsyncAppendingQueue.of(exec, 5, AsyncAppendingQueueTest::agg); + q.append(immediate("a")); + SettableApiFuture d = SettableApiFuture.create(); + q.append(d); + + q.close(); + d.setException(new Kaboom()); + + assertThrows(Kaboom.class, q::await); + } + + static String agg(ImmutableList ss) { + return ss.stream().reduce("", String::concat); + } + + static ApiFuture immediate(String s) { + return ApiFutures.immediateFuture(s); + } + + private static final class Kaboom extends RuntimeException { + private Kaboom() { + super("Kaboom!!!"); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AutoClosableFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AutoClosableFixture.java new file mode 100644 index 000000000000..01f0d4252cc3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/AutoClosableFixture.java @@ -0,0 +1,65 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.Preconditions; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +final class AutoClosableFixture<@NonNull T extends AutoCloseable> implements TestRule { + private final ThrowingSupplier supplier; + + @Nullable private T instance; + + private AutoClosableFixture(ThrowingSupplier supplier) { + this.supplier = supplier; + } + + @NonNull + public T getInstance() { + Preconditions.checkState(instance != null, "getInstance() called outside active lifecycle."); + return instance; + } + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try (T t = supplier.get()) { + instance = t; + base.evaluate(); + } finally { + instance = null; + } + } + }; + } + + static <@NonNull T extends AutoCloseable> AutoClosableFixture of( + ThrowingSupplier supplier) { + return new AutoClosableFixture<>(supplier); + } + + @FunctionalInterface + interface ThrowingSupplier<@NonNull T> { + T get() throws Exception; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackoffTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackoffTest.java new file mode 100644 index 000000000000..4c128a8c5d17 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackoffTest.java @@ -0,0 +1,242 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static java.time.Duration.ZERO; +import static java.time.Duration.ofSeconds; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.Backoff.BackoffDuration; +import com.google.cloud.storage.Backoff.BackoffResult; +import com.google.cloud.storage.Backoff.BackoffResults; +import com.google.cloud.storage.Backoff.Jitterer; +import java.time.Duration; +import org.junit.Test; + +public final class BackoffTest { + + @Test + public void interruptedBackoffOnlyAddsActualElapsedTimeToCumulative() { + Backoff backoff = + Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(11)) + // this value is critical, if instead it is 35 seconds, the test can still succeed + // even if the interrupted backoff duration isn't corrected + .setTimeout(Duration.ofSeconds(34)) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(2.0) + .build(); + + // operation failed after 1s + BackoffResult r1 = backoff.nextBackoff(Duration.ofSeconds(1)); + // start backoff of 2s + assertThat(r1).isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + // higher level failures happens only 300ms into our 2s + BackoffResult r2 = backoff.nextBackoff(Duration.ofMillis(300)); + // backoff 4s (previous was 2s w/ 2.0 multiplier = 4s) + // even though the previous backoff duration wasn't fully consumed, still use it as the basis + // for the next backoff + assertThat(r2).isEqualTo(BackoffDuration.of(Duration.ofSeconds(4))); + // another failure 3s after the 4s backoff finished + BackoffResult r3 = backoff.nextBackoff(Duration.ofSeconds(7)); + assertThat(r3).isEqualTo(BackoffDuration.of(Duration.ofSeconds(8))); + // another failure 5s after the 8s backoff finished + BackoffResult r4 = backoff.nextBackoff(Duration.ofSeconds(13)); + // 11s backoff because 11s is maxBackoff + assertThat(r4).isEqualTo(BackoffDuration.of(Duration.ofSeconds(11))); + // another failure 7s after the 11s backoff finished + BackoffResult r5 = backoff.nextBackoff(Duration.ofSeconds(18)); + // at this point it has been ~39s, which is more than our timeout of 34s + assertThat(r5).isEqualTo(BackoffResults.EXHAUSTED); + } + + @Test + public void simple() { + Backoff backoff = defaultBackoff(); + + final BackoffResult r01 = backoff.nextBackoff(Duration.ofSeconds(2)); + assertThat(r01).isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + BackoffResult r02 = backoff.nextBackoff(((BackoffDuration) r01).getDuration()); + assertThat(r02).isEqualTo(BackoffDuration.of(Duration.ofSeconds(4))); + BackoffResult r03 = backoff.nextBackoff(((BackoffDuration) r02).getDuration()); + assertThat(r03).isEqualTo(BackoffDuration.of(Duration.ofSeconds(8))); + BackoffResult r04 = backoff.nextBackoff(((BackoffDuration) r03).getDuration()); + assertThat(r04).isEqualTo(BackoffDuration.of(Duration.ofSeconds(16))); + BackoffResult r05 = backoff.nextBackoff(((BackoffDuration) r04).getDuration()); + assertThat(r05).isEqualTo(BackoffDuration.of(Duration.ofSeconds(32))); + BackoffResult r06 = backoff.nextBackoff(((BackoffDuration) r05).getDuration()); + assertThat(r06).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r07 = backoff.nextBackoff(((BackoffDuration) r06).getDuration()); + assertThat(r07).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r08 = backoff.nextBackoff(((BackoffDuration) r07).getDuration()); + assertThat(r08).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r09 = backoff.nextBackoff(((BackoffDuration) r08).getDuration()); + assertThat(r09).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r10 = backoff.nextBackoff(((BackoffDuration) r09).getDuration()); + assertThat(r10).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r11 = backoff.nextBackoff(((BackoffDuration) r10).getDuration()); + assertThat(r11).isEqualTo(BackoffDuration.of(Duration.ofSeconds(57))); + BackoffResult r12 = backoff.nextBackoff(((BackoffDuration) r11).getDuration()); + assertThat(r12).isEqualTo(BackoffDuration.of(Duration.ofSeconds(14))); + BackoffResult r13 = backoff.nextBackoff(((BackoffDuration) r12).getDuration()); + assertThat(r13).isEqualTo(BackoffResults.EXHAUSTED); + } + + @Test + public void backoffDuration_min_of_backoff_maxBackoff_remainingFromTimeout() { + Backoff backoff = defaultBackoff(); + + Duration elapsed = Duration.ofMinutes(6).plusSeconds(58); + assertThat(backoff.nextBackoff(elapsed)).isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + assertThat(backoff.nextBackoff(Duration.ofSeconds(2))).isEqualTo(BackoffResults.EXHAUSTED); + } + + @Test + public void elapsedDurationProvidedToNextBackoffMustBeGtEqZero() { + Backoff backoff = defaultBackoff(); + + Duration elapsed = Duration.ofSeconds(-1); + IllegalArgumentException iae = + assertThrows(IllegalArgumentException.class, () -> backoff.nextBackoff(elapsed)); + + assertThat(iae).hasMessageThat().isEqualTo("elapsed must be >= PT0S (PT-1S >= PT0S)"); + } + + @Test + public void resetWorks() { + Backoff backoff = + Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(5)) + .setTimeout(Duration.ofSeconds(6)) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(2.0) + .build(); + + assertThat(backoff.nextBackoff(Duration.ofSeconds(4))) + .isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + assertThat(backoff.nextBackoff(Duration.ofSeconds(2))).isEqualTo(BackoffResults.EXHAUSTED); + backoff.reset(); + assertThat(backoff.nextBackoff(Duration.ofSeconds(10))).isEqualTo(BackoffResults.EXHAUSTED); + } + + @Test + public void onceExhaustedStaysExhaustedUntilReset() { + Backoff backoff = + Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(5)) + .setTimeout(Duration.ofSeconds(5)) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(1.0) + .build(); + + assertThat(backoff.nextBackoff(Duration.ofSeconds(5))).isEqualTo(BackoffResults.EXHAUSTED); + assertThat(backoff.nextBackoff(ZERO)).isEqualTo(BackoffResults.EXHAUSTED); + backoff.reset(); + assertThat(backoff.nextBackoff(ZERO)).isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + } + + /** + * If a next computed backoff would exceed the timeout, truncate the backoff to the amount of time + * remaining until timeout. + * + *

This is primarily here to preserve behavior of {@link com.google.cloud.RetryHelper}. + */ + @Test + public void ifANextBackoffWouldExceedTheTimeoutTheBackoffDurationShouldBeTruncated_single() { + Backoff backoff = + Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(6)) + .setTimeout(Duration.ofSeconds(24)) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(2.0) + .build(); + + assertThat(backoff.nextBackoff(Duration.ofSeconds(22))) + .isEqualTo(BackoffDuration.of(ofSeconds(2))); + assertThat(backoff.nextBackoff(Duration.ofSeconds(2))).isEqualTo(BackoffResults.EXHAUSTED); + } + + /** + * If a next computed backoff would exceed the timeout, truncate the backoff to the amount of time + * remaining until timeout. + * + *

This is primarily here to preserve behavior of {@link com.google.cloud.RetryHelper}. + */ + @Test + public void ifANextBackoffWouldExceedTheTimeoutTheBackoffDurationShouldBeTruncated_multiple() { + Duration timeout = ofSeconds(24); + Backoff backoff = + Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(6)) + .setTimeout(timeout) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(2.0) + .build(); + + assertThat(backoff.getCumulativeBackoff()).isEqualTo(Duration.ZERO); + BackoffResult r1 = backoff.nextBackoff(Duration.ofSeconds(21)); + assertThat(backoff.getCumulativeBackoff()).isEqualTo(Duration.ofSeconds(21)); + assertThat(r1).isEqualTo(BackoffDuration.of(Duration.ofSeconds(2))); + BackoffResult r2 = backoff.nextBackoff(((BackoffDuration) r1).getDuration()); + assertThat(backoff.getCumulativeBackoff()).isEqualTo(Duration.ofSeconds(23)); + assertThat(r2).isEqualTo(BackoffDuration.of(Duration.ofSeconds(1))); + BackoffResult r3 = backoff.nextBackoff(((BackoffDuration) r2).getDuration()); + assertThat(backoff.getCumulativeBackoff()).isEqualTo(Duration.ofSeconds(24)); + assertThat(r3).isEqualTo(BackoffResults.EXHAUSTED); + } + + @Test + public void noJitter_alwaysReturnsInput() { + Jitterer jitterer = Jitterer.noJitter(); + Duration _5s = Duration.ofSeconds(5); + Duration _10s = Duration.ofSeconds(10); + Duration _30s = Duration.ofSeconds(30); + assertThat(jitterer.jitter(_5s)).isEqualTo(_5s); + assertThat(jitterer.jitter(_10s)).isEqualTo(_10s); + assertThat(jitterer.jitter(_30s)).isEqualTo(_30s); + } + + @Test + public void threadLocalRandomJitter_works() { + Jitterer jitterer = Jitterer.threadLocalRandom(); + Duration min = Duration.ofNanos(-1); + Duration _5s = Duration.ofSeconds(5); + Duration _10s = Duration.ofSeconds(10); + Duration _30s = Duration.ofSeconds(30); + assertThat(jitterer.jitter(_5s)).isGreaterThan(min); + assertThat(jitterer.jitter(_10s)).isGreaterThan(min); + assertThat(jitterer.jitter(_30s)).isGreaterThan(min); + + assertThat(jitterer.jitter(min)).isEqualTo(min); + } + + private static Backoff defaultBackoff() { + return Backoff.newBuilder() + .setInitialBackoff(Duration.ofSeconds(2)) + .setMaxBackoff(Duration.ofSeconds(57)) + .setTimeout(Duration.ofMinutes(7)) + .setJitterer(Jitterer.noJitter()) + .setRetryDelayMultiplier(2.0) + .build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackwardCompatibilityUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackwardCompatibilityUtilsTest.java new file mode 100644 index 000000000000..b19c7a28ff61 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BackwardCompatibilityUtilsTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule; +import com.google.cloud.storage.BucketInfo.AgeDeleteRule; +import com.google.cloud.storage.BucketInfo.CreatedBeforeDeleteRule; +import com.google.cloud.storage.BucketInfo.DeleteRule; +import com.google.cloud.storage.BucketInfo.IsLiveDeleteRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.NumNewerVersionsDeleteRule; +import com.google.cloud.storage.BucketInfo.RawDeleteRule; +import com.google.cloud.storage.Conversions.Codec; +import org.junit.Test; + +public final class BackwardCompatibilityUtilsTest { + private static final Codec codec = + BackwardCompatibilityUtils.deleteRuleCodec.andThen(Conversions.json().lifecycleRule()); + + @Test + public void testDeleteRules_conversionRoundTrip_age() { + AgeDeleteRule ageRule = new AgeDeleteRule(10); + assertEquals(10, ageRule.getDaysToLive()); + assertEquals(DeleteRule.Type.AGE, ageRule.getType()); + verifyConversionRoundTrip(ageRule); + } + + @Test + public void testDeleteRules_conversionRoundTrip_createBefore() { + CreatedBeforeDeleteRule createBeforeRule = new CreatedBeforeDeleteRule(1); + assertEquals(1, createBeforeRule.getTimeMillis()); + assertEquals(DeleteRule.Type.CREATE_BEFORE, createBeforeRule.getType()); + verifyConversionRoundTrip(createBeforeRule); + } + + @Test + public void testDeleteRules_conversionRoundTrip_numNewerVersions() { + NumNewerVersionsDeleteRule versionsRule = new NumNewerVersionsDeleteRule(2); + assertEquals(2, versionsRule.getNumNewerVersions()); + assertEquals(DeleteRule.Type.NUM_NEWER_VERSIONS, versionsRule.getType()); + verifyConversionRoundTrip(versionsRule); + } + + @Test + public void testDeleteRules_conversionRoundTrip_isLive() { + IsLiveDeleteRule isLiveRule = new IsLiveDeleteRule(true); + assertTrue(isLiveRule.isLive()); + assertEquals(DeleteRule.Type.IS_LIVE, isLiveRule.getType()); + verifyConversionRoundTrip(isLiveRule); + } + + @Test + public void testDeleteRules_conversionRoundTrip_rawRule() { + Rule rule = new Rule().set("a", "b"); + RawDeleteRule rawRule = new RawDeleteRule(rule); + verifyConversionRoundTrip(rawRule); + } + + @Test + public void testDeleteRules_conversionRoundTrip_unsupportedRule() { + Rule unsupportedRule = + new Rule().setAction(new Rule.Action().setType("This action doesn't exist")); + // if this doesn't throw an exception, unsupported rules work + codec.decode(unsupportedRule); + } + + @Test + public void testRuleMappingIsCorrect_setDeleteRules_nonEmpty() { + DeleteRule deleteRule = new AgeDeleteRule(5); + LifecycleRule lifecycleRule = BackwardCompatibilityUtils.deleteRuleCodec.encode(deleteRule); + + assertThat(lifecycleRule.getAction()).isEqualTo(LifecycleAction.newDeleteAction()); + assertThat(lifecycleRule.getCondition().getAge()).isEqualTo(5); + } + + private void verifyConversionRoundTrip(DeleteRule delRule) { + Rule encode = codec.encode(delRule); + DeleteRule decode = codec.decode(encode); + assertThat(decode).isEqualTo(delRule); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BaseConvertablePropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BaseConvertablePropertyTest.java new file mode 100644 index 000000000000..549c62a4937a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BaseConvertablePropertyTest.java @@ -0,0 +1,152 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.JqwikTest.report; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.client.json.GenericJson; +import com.google.cloud.storage.Conversions.Codec; +import com.google.protobuf.Message; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.Optional; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.Nullable; + +abstract class BaseConvertablePropertyTest< + ModelT, ProtoT extends Message, ApiaryT extends GenericJson> { + + private final TypeUsage modelTType; + private final TypeUsage protoTType; + private final TypeUsage apiaryTType; + + BaseConvertablePropertyTest() { + TypeUsage baseTypeUsage = findBaseTypeUsage(this.getClass()); + modelTType = baseTypeUsage.getTypeArgument(0); + protoTType = baseTypeUsage.getTypeArgument(1); + apiaryTType = baseTypeUsage.getTypeArgument(2); + } + + /** Provide the codec instance used to convert between {@code ModelT} and {@code ProtoT} */ + Codec grpcCodec() { + GrpcConversions instance = Conversions.grpc(); + return resolveCodec(instance, protoTType); + } + + /** Provide the codec instance used to convert between {@code ModelT} and {@code ProtoT} */ + Codec apiaryCodec() { + JsonConversions instance = Conversions.json(); + return resolveCodec(instance, apiaryTType); + } + + /** Report on detected edge cases for {@code ProtoT} */ + @Example + final void edgeCases() { + TypeUsage baseTypeUsage = findBaseTypeUsage(this.getClass()); + TypeUsage protoTType = baseTypeUsage.getTypeArgument(1); + report(protoTType); + } + + /** + * Ensure that {@code @ForAll ProtoT} the codec provided by {@link #grpcCodec} can round trip each + * {@code ProtoT} such that the provided value is equal to the round tripped value. + * + *

Note: round trip means A -> B -> A, in this case ProtoT -> ModelT -> ProtoT + */ + @Property + final void codecRoundTrip(@ForAll ProtoT p) { + Codec codec = grpcCodec(); + ModelT model = codec.decode(p); + ProtoT proto = codec.encode(model); + + assertThat(proto).isEqualTo(p); + } + + /** + * Ensure that {@code @ForAll ProtoT} the conversion chain ProtoT -> ModelT -> ApiaryT -> ModelT + * -> ProtoT results in an equal value. + * + *

This test is intended to ensue compatibility of our conversions across both gRPC and apiary + * models. + */ + @Property + final void codecCompatibilityRoundTrip(@ForAll ProtoT p) { + Codec codecG = grpcCodec(); + Codec codecA = apiaryCodec(); + + ModelT model = codecG.decode(p); + + ApiaryT apiary = codecA.encode(model); + ModelT model2 = codecA.decode(apiary); + + ProtoT actual = codecG.encode(model2); + + assertThat(actual).isEqualTo(p); + } + + private static TypeUsage findBaseTypeUsage( + @SuppressWarnings("rawtypes") Class c) { + TypeUsage curr = TypeUsage.of(c); + while (curr.getRawType() != BaseConvertablePropertyTest.class) { + Optional superclass = curr.getSuperclass(); + if (!superclass.isPresent()) { + throw new IllegalStateException( + "Unable to locate base class" + BaseConvertablePropertyTest.class.getName()); + } + curr = superclass.get(); + } + return curr; + } + + @SuppressWarnings("unchecked") + @Nullable + private Codec resolveCodec(Object instance, TypeUsage xType) { + Method[] declaredMethods = instance.getClass().getDeclaredMethods(); + Method method = findCodecMethod(declaredMethods, instance.getClass(), xType); + try { + return (Codec) method.invoke(instance); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new AssertionError("error attempting to resolve codec", e); + } + } + + private Method findCodecMethod( + Method[] declaredMethods, Class conversionsClass, TypeUsage serializedType) { + Optional collect = + Arrays.stream(declaredMethods) + .filter(m -> m.getReturnType().isAssignableFrom(Codec.class)) + .filter( + m -> { + TypeUsage returnType = TypeUsage.forType(m.getGenericReturnType()); + return modelTType.equals(returnType.getTypeArgument(0)) + && serializedType.equals(returnType.getTypeArgument(1)); + }) + .findFirst(); + assertWithMessage( + "Unable to locate Codec<%s, %s> method in %s", + modelTType, serializedType, conversionsClass) + .that(collect.isPresent()) + .isTrue(); + return collect.get(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..0364ddaf18c2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannelTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.ITAppendableUploadFakeTest.FakeStorage; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.storage.v2.BidiWriteObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +public final class BidiAppendableUnbufferedWritableByteChannelTest { + @Rule public final TestName testName = new TestName(); + + @Test + public void appendAndFinalizeOnlyPerformedIfAllBytesConsumed() throws IOException { + ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); + ChecksummedTestContent ctc = ChecksummedTestContent.gen(27); + AppendableUploadState state = + BidiUploadState.appendableNew( + BidiUploadTest.appendRequestNew, + GrpcCallContext::createDefault, + 16, + SettableApiFuture.create(), + Crc32cValue.zero()); + AtomicLong finishWriteOffset = new AtomicLong(-1); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + executor, + BidiUploadTestUtils.adaptOnlySend( + respond -> + request -> { + if (request.getFinishWrite()) { + finishWriteOffset.set( + request.getWriteOffset() + + request.getChecksummedData().getContent().size()); + } + executor.submit( + () -> { + switch ((int) request.getWriteOffset()) { + case 0: + respond.onResponse(BidiUploadTest.resourceWithSize(0)); + break; + case 4: + case 8: + // do not ack any bytes until we receive 16, this simulates + // latency on the bytes being ack'd. + break; + case 12: + respond.onResponse(BidiUploadTestUtils.incremental(8)); + break; + case 16: + respond.onResponse(BidiUploadTestUtils.incremental(12)); + break; + case 20: + respond.onResponse(BidiUploadTestUtils.incremental(16)); + break; + case 24: + BidiWriteObjectResponse.Builder b = + BidiUploadTest.resourceFor(ctc).toBuilder(); + b.getResourceBuilder() + .setFinalizeTime( + Conversions.grpc() + .timestampCodec + .encode(OffsetDateTime.now())); + respond.onResponse(b.build()); + break; + default: + respond.onError( + FakeStorage.unexpectedRequest(request, ImmutableList.of())); + break; + } + }); + }), + 3, + RetryContext.neverRetry()); + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 4, 2); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel(stream, chunkSegmenter, 4, 0); + + ByteBuffer buf = ctc.asByteBuffer(); + int written1 = channel.write(buf); + // fill up the outbound queue + assertThat(written1).isEqualTo(16); + + // asynchronously bytes will be ack'd 4 at a time, eventually there will be enough space in the + // outbound queue to allow writeAndClose to start consuming bytes. + channel.nextWriteShouldFinalize(); + int written2 = channel.writeAndClose(buf); + assertThat(written2).isEqualTo(11); + assertThat(finishWriteOffset.get()).isEqualTo(ctc.length()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java new file mode 100644 index 000000000000..4fcbd070c1fd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java @@ -0,0 +1,459 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BidiUploadTestUtils.incremental; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.fail; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Message; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.WriteObjectSpec; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.Tuple; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class BidiUploadStreamingStreamPropertyTest { + + @Example + public void edgeCases() { + JqwikTest.report( + TypeUsage.of(ScenarioWithLastWrittenRequest.class), + arbitrarySendViaScenarioWithLastWrittenRequest()); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s1() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of(ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(0)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s2() { + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 2, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(2)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s3() { + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 1L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(4)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s4() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 10L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFlush(true) + .setStateLookup(true) + .build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(10).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(11).build(), + BidiUploadTestUtils.finishAt(12))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s5() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 0, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiUploadTest.appendRequestNew, + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(0).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(1).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(2).build(), + BidiUploadTestUtils.finishAt(3))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s6() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 1, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiUploadTest.appendRequestNew, + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(1).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(2).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(3).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(4).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(5).build(), + BidiUploadTestUtils.finishAt(6))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void makeScenario_4() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 1, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of(ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(1)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void makeScenario_5() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(2)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaWithOnlyAFirstMessage_shouldSendCleanly() { + AppendableUploadState state = + BidiUploadState.appendableNew( + BidiUploadTest.appendRequestNew, + GrpcCallContext::createDefault, + 37, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.updateStateFromResponse(incremental(0)); + assertThat(state.offer(BidiUploadTest.appendRequestNew)).isTrue(); + + List expected = ImmutableList.of(BidiUploadTest.appendRequestNew); + + List actual = sinkToList(state); + + String actualS = fmt(actual); + String expectedS = fmt(expected); + + assertThat(actualS).isEqualTo(expectedS); + } + + @Property(tries = 1_000) + public void sendViaShouldCompactWithLastWrittenRequest( + @ForAll("sendViaScenarioWithLastWrittenRequest") ScenarioWithLastWrittenRequest s) { + AppendableUploadState state = s.makeBidiUploadState(); + assertThat(state.onResponse(incremental(s.beginFromOffset))).isNull(); + for (BidiWriteObjectRequest m : s.messages) { + assertThat(state.offer(m)).isTrue(); + } + state.lastSentRequestIndex = s.lastSentRequestIndex; + + List actual = sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(s.messages.size() - 1); + + if (actual.isEmpty()) { + assertThat(s.lastSentRequestIndex).isEqualTo(s.messages.size() - 1); + } + + long writeOffset = getExpectedBeginOffset(s); + assertSaneMessageSequence(actual, s.lastSentRequestIndex, writeOffset); + } + + static List sinkToList(BidiUploadState state) { + ImmutableList.Builder b = ImmutableList.builder(); + state.sendVia(b::add); + return b.build(); + } + + private static long getExpectedBeginOffset(ScenarioWithLastWrittenRequest s) { + long writeOffset = s.beginFromOffset; + if (!s.messages.isEmpty()) { + for (int i = s.messages.size() - 1; i > s.lastSentRequestIndex; i--) { + BidiWriteObjectRequest msg = s.messages.get(i); + if (msg.hasOneof(BidiUploadState.FIRST_MESSAGE_DESCRIPTOR)) { + writeOffset = s.beginFromOffset; + } else { + writeOffset = msg.getWriteOffset(); + } + } + } + return writeOffset; + } + + private static void assertSaneMessageSequence( + List actual, int lastWrittenRequest, long beginFromOffset) { + String msg = "Actual message sequence: " + fmt(actual); + if (!actual.isEmpty() && actual.get(0).hasOneof(BidiUploadState.FIRST_MESSAGE_DESCRIPTOR)) { + assertWithMessage("Received an unexpected first_message " + msg) + .that(lastWrittenRequest) + .isEqualTo(-1); + } + + long startOffset = beginFromOffset; + for (int i = 0, actualSize = actual.size(), lastIdx = actualSize - 1; i < actualSize; i++) { + BidiWriteObjectRequest req = actual.get(i); + assertWithMessage("Non-contiguous message " + msg) + .that(req.getWriteOffset()) + .isEqualTo(startOffset); + if (req.getFinishWrite()) { + assertWithMessage("finish_write: true not last " + msg).that(i).isEqualTo(lastIdx); + } + startOffset = req.getWriteOffset() + req.getChecksummedData().getContent().size(); + } + } + + @Provide("sendViaScenarioWithLastWrittenRequest") + Arbitrary arbitrarySendViaScenarioWithLastWrittenRequest() { + return beginOffset() + .flatMap( + beginOffset -> + Combinators.combine( + Arbitraries.just(beginOffset), + firstMessage(), + dataMessage(), + finishMessage()) + .as(Tuple::of)) + .flatMap( + t -> { + Long beginOffset = t.get1(); + BidiWriteObjectRequest first = t.get2(); + List<@NonNull ChecksummedData> data = t.get3(); + BidiWriteObjectRequest finish = t.get4(); + + int messageCount = data.size(); + if (first != null) { + messageCount++; + } + if (finish != null) { + messageCount++; + } + // maybe select one of our existing messages as the lastWrittenRequest + return Arbitraries.integers() + .between(-1, messageCount - 1) + .map(lwr -> makeScenario(lwr, beginOffset, first, data, finish)); + }); + } + + private static @NonNull ScenarioWithLastWrittenRequest makeScenario( + int lastSentRequest, + Long beginOffset, + BidiWriteObjectRequest first, + List cds, + BidiWriteObjectRequest last) { + long offset = beginOffset; + List data = new ArrayList<>(); + data.add(first); + for (ChecksummedData cd : cds) { + data.add( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(offset) + .setChecksummedData(cd) + .build()); + offset += cd.getContent().size(); + } + + if (last != null) { + BidiWriteObjectRequest lastWithOffset = last.toBuilder().setWriteOffset(offset).build(); + data.add(lastWithOffset); + } + + return new ScenarioWithLastWrittenRequest( + /* lastWrittenRequest= */ lastSentRequest, /* beginFromOffset= */ beginOffset, first, data); + } + + Arbitrary beginOffset() { + return Arbitraries.longs().between(0, 256 * 1024); + } + + Arbitrary<@NonNull BidiWriteObjectRequest> firstMessage() { + return Arbitraries.of( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("projects/_/buckets/b").setName("o").build()) + .build()) + .build(), + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .build()) + .build() /*, + BidiWriteObjectRequest.newBuilder() + .setUploadId("upload-id") + .build(),*/); + } + + Arbitrary<@NonNull List> dataMessage() { + // keep data fairly small, we are mainly testing message handling not data handling + return Arbitraries.integers() + .between(1, 17) + .map( + numBytes -> { + ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(numBytes)); + return content.asChecksummedData(); + }) + .list() + .ofMinSize(1) + .ofMaxSize(5); + } + + Arbitrary<@Nullable BidiWriteObjectRequest> finishMessage() { + return Arbitraries.of( + BidiWriteObjectRequest.newBuilder().setFinishWrite(true).build(), + BidiWriteObjectRequest.newBuilder().setFlush(true).setStateLookup(true).build()); + } + + private static String fmt(List l) { + return l.stream().map(StorageV2ProtoUtils::fmtProto).collect(BidiUploadTest.joiner); + } + + private static String fmt(Message msg) { + if (msg == null) { + return "null"; + } + return fmtProto(msg); + } + + static final class ScenarioWithLastWrittenRequest { + private static final long MAX_BYTES = 50_000; + private final int lastSentRequestIndex; + private final long beginFromOffset; + private final BidiWriteObjectRequest firstMessage; + private final List messages; + + private ScenarioWithLastWrittenRequest( + int lastWrittenRequest, + long beginFromOffset, + BidiWriteObjectRequest firstMessage, + List messages) { + this.lastSentRequestIndex = lastWrittenRequest; + this.beginFromOffset = beginFromOffset; + this.firstMessage = firstMessage; + this.messages = messages; + } + + public @NonNull AppendableUploadState makeBidiUploadState() { + if (firstMessage.hasWriteObjectSpec()) { + AppendableUploadState state = + BidiUploadState.appendableNew( + firstMessage, + GrpcCallContext::createDefault, + MAX_BYTES, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.totalSentBytes = beginFromOffset; + return state; + } else if (firstMessage.hasAppendObjectSpec()) { + AppendableUploadState state = + BidiUploadState.appendableTakeover( + firstMessage, + GrpcCallContext::createDefault, + MAX_BYTES, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.awaitTakeoverStateReconciliation( + () -> { + state.retrying(); + assertThat( + state.onResponse(BidiUploadTest.resourceFor(firstMessage, beginFromOffset))) + .isNull(); + }); + return state; + } else { + //noinspection JUnit5AssertionsConverter + fail("Unhandled firstMessage type: " + fmtProto(firstMessage)); + return null; + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("\n lastWrittenRequest", lastSentRequestIndex) + .add("\n beginFromOffset", beginFromOffset) + .add("\n maxBytes", MAX_BYTES) + .add("\n firstMessage", BidiUploadStreamingStreamPropertyTest.fmt(firstMessage)) + .add("\n messages", fmt(messages)) + .addValue("\n") + .toString(); + } + + private static String fmt(List msgs) { + return msgs.stream() + .map(BidiUploadStreamingStreamPropertyTest::fmt) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]")); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java new file mode 100644 index 000000000000..2774981ecd62 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java @@ -0,0 +1,2275 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BidiUploadState.appendableNew; +import static com.google.cloud.storage.BidiUploadTestUtils.adaptOnlySend; +import static com.google.cloud.storage.BidiUploadTestUtils.alwaysErrorBidiStreamingCallable; +import static com.google.cloud.storage.BidiUploadTestUtils.createSegment; +import static com.google.cloud.storage.BidiUploadTestUtils.finishAt; +import static com.google.cloud.storage.BidiUploadTestUtils.incremental; +import static com.google.cloud.storage.BidiUploadTestUtils.makeRedirect; +import static com.google.cloud.storage.BidiUploadTestUtils.packRedirectIntoAbortedException; +import static com.google.cloud.storage.BidiUploadTestUtils.timestampNow; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.defaultRetryingDeps; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static java.lang.String.format; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BidiUploadState.BaseUploadState; +import com.google.cloud.storage.BidiUploadState.State; +import com.google.cloud.storage.BidiUploadStreamingStream.RedirectHandlingResponseObserver; +import com.google.cloud.storage.BidiUploadStreamingStream.StreamRetryContextDecorator; +import com.google.cloud.storage.BidiUploadStreamingStream.StreamingResponseObserver; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.ITAppendableUploadFakeTest.FakeStorage; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.BoundType; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Range; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; +import com.google.rpc.Code; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteHandle; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Enclosed.class) +@SuppressWarnings({"unused", "UnnecessaryLocalVariable", "SameParameterValue"}) +public final class BidiUploadTest { + static final Collector joiner = joiner(1); + + private static Collector joiner(int indentation) { + String i0 = " "; + String i_1 = IntStream.range(0, indentation - 1).mapToObj(x -> i0).reduce("", String::concat); + String i = IntStream.range(0, indentation).mapToObj(x -> i0).reduce("", String::concat); + + return Collectors.joining(",\n" + i, "[\n" + i, "\n" + i_1 + "]"); + } + + public static final ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(64)); + + static final BidiWriteObjectRequest appendRequestNew = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("projects/_/buckets/b").setName("o").build()) + .setAppendable(true) + .build()) + .build(); + static final BidiWriteObjectRequest appendRequestTakeover = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .build()) + .build(); + + static final BidiWriteObjectRequest uploadId = + BidiWriteObjectRequest.newBuilder().setUploadId("uploadId").build(); + static final BidiWriteObjectRequest writeObjectSpec = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfGenerationMatch(0).build()) + .build(); + static final BidiWriteObjectRequest appendableObjectSpec = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec(AppendObjectSpec.newBuilder().setObject("obj").build()) + .build(); + + static final BidiWriteObjectRequest onlyBytes_00 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(0, 10).asChecksummedData()) + .setWriteOffset(0) + .build(); + static final BidiWriteObjectRequest onlyBytes_10 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(10, 10).asChecksummedData()) + .setWriteOffset(10) + .build(); + static final BidiWriteObjectRequest onlyBytes_20 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(20, 10).asChecksummedData()) + .setWriteOffset(20) + .build(); + static final BidiWriteObjectRequest onlyBytes_30 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(30, 10).asChecksummedData()) + .setWriteOffset(30) + .build(); + + static final BidiWriteObjectRequest onlyFlush = + BidiWriteObjectRequest.newBuilder().setFlush(true).build(); + static final BidiWriteObjectRequest onlyFinishWrite = + BidiWriteObjectRequest.newBuilder().setFinishWrite(true).build(); + + @SuppressWarnings("ClassEscapesDefinedScope") + @RunWith(Parameterized.class) + public static final class BidiUploadStateCommonTest { + + private static final Function fmt = + message -> message == null ? "null" : TextFormat.printer().shortDebugString(message); + + private final BidiUploadStateFactory factory; + + public BidiUploadStateCommonTest(BidiUploadStateFactory factory) { + this.factory = factory; + } + + @Parameters(name = "{0}") + public static ImmutableList factories() { + return ImmutableList.of(new AppendableNewFactory(), new AppendableTakeoverFactory()); + } + + @Test + public void offer() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.offer(onlyBytes_20)).isFalse(); + + assertThat(state.peekFirst()).isSameInstanceAs(uploadId); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + } + + @Test + public void setConfirmedBytesOffset_oneFullMessage() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_anyFirstMessageEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + assertThat(state.peekFirst()).isSameInstanceAs(uploadId); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_onlyFullMessagesAreEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(11)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_allMessagesAreEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(20)); + + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(20); + } + + @Test + public void multipleOfferAckCycles() { + BidiUploadState state = factory.createInitialized(); + + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(20)); + assertThat(state.getConfirmedBytes()).isEqualTo(20); + + assertThat(state.offer(onlyBytes_20)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(30); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(30)); + assertThat(state.getConfirmedBytes()).isEqualTo(30); + + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void ackOfferLessThanSent() { + BidiUploadState state = factory.createInitialized(); + + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_00); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_00); + } + + @Test + public void offerWithBytesRejectedIfNoAvailableCapacity() { + BidiUploadState state = factory.createInitialized(4); + + assertThat(state.availableCapacity()).isEqualTo(4); + assertThat( + state.offer( + BidiUploadTestUtils.createSegment(content.slice(0, 5).asChecksummedData()))) + .isFalse(); + } + + @Test + public void initialRequestMessage_shouldNotMergeIntoDataRequest() { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + + assertThat(state.peekFirst()).isEqualTo(onlyBytes_00); + } + + @Test + public void redirectToken_appendable_previousSuccessfulFlush() throws Exception { + BidiWriteObjectRequest req = appendRequestNew; + GrpcCallContext baseContext = + GrpcCallContext.createDefault() + .withExtraHeaders(ImmutableMap.of("something", ImmutableList.of("or", "other"))); + BidiUploadState state = factory.createInitialized(() -> baseContext, 17); + + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + state.pendingRetry(); + state.updateFromRedirect( + BidiWriteObjectRedirectedError.newBuilder() + .setGeneration(1) + .setRoutingToken("routing-token") + .setWriteHandle( + BidiWriteHandle.newBuilder().setHandle(ByteString.copyFromUtf8("handle")).build()) + .build()); + state.retrying(); + + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + List requests = BidiUploadTestUtils.sinkToList(state); + + BidiWriteObjectRequest expectedRequest = + appendRequestNew.toBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .setRoutingToken("routing-token") + .build()) + .setStateLookup(true) + .build(); + ImmutableMap> expectedHeaders = + ImmutableMap.of( + "something", + ImmutableList.of("or", "other"), + "x-goog-request-params", + ImmutableList.of( + "bucket=projects/_/buckets/b&appendable=true&routing_token=routing-token")); + assertAll( + () -> assertThat(requests).isEqualTo(ImmutableList.of(expectedRequest)), + () -> assertThat(actualCtx).isNotEqualTo(baseContext), + () -> assertThat(actualCtx.getExtraHeaders()).isEqualTo(expectedHeaders)); + } + + @Test + public void sendVia_onlySendsFirstMessageWhenRetrying() { + BidiUploadState state = factory.create(20); + assertThat(state.enqueueFirstMessageAndGetGrpcCallContext()).isNotNull(); + List requests1 = BidiUploadTestUtils.sinkToList(state); + assertThat(requests1).hasSize(1); + assertThat(state.onResponse(resourceWithSize(0))).isNull(); + assertThat(state.getState()).isEqualTo(State.RUNNING); + + ChecksummedTestContent.gen(20).chunkup(5).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(s -> assertThat(state.offer(s)).isTrue()); + + List requests2 = BidiUploadTestUtils.sinkToList(state); + assertThat(requests2).hasSize(4); + assertThat(requests2).containsNoneIn(requests1); + + assertThat(state.onResponse(BidiUploadTestUtils.incremental(4))).isNull(); + + state.pendingRetry(); + state.retrying(); + assertThat(state.enqueueFirstMessageAndGetGrpcCallContext()).isNotNull(); + + List requests3 = BidiUploadTestUtils.sinkToList(state); + + assertThat(requests3) + .isEqualTo( + ImmutableList.of( + BidiWriteObjectRequest.newBuilder() + .setStateLookup(true) + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .build()) + .build())); + } + + @Test + public void redirectToken_appendable_noPreviousSuccessfulFlush() throws Exception { + GrpcCallContext baseContext = + GrpcCallContext.createDefault() + .withExtraHeaders(ImmutableMap.of("something", ImmutableList.of("or", "other"))); + BidiUploadState state = factory.create(() -> baseContext, 17); + + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + List requests = BidiUploadTestUtils.sinkToList(state); + + ImmutableMap> expectedHeaders = + ImmutableMap.of( + "something", + ImmutableList.of("or", "other"), + "x-goog-request-params", + ImmutableList.of("bucket=projects/_/buckets/b&appendable=true")); + assertAll( + () -> { + // because we're running parameterized, we don't know which initial request is + // specifically needed for this assertion. But we do know all the valid request chains. + // Enumerate them here and validate that one of them is matched. + List> all = + ImmutableList.of( + ImmutableList.of(appendRequestNew), ImmutableList.of(appendRequestTakeover)); + boolean contains = all.contains(requests); + String msg = + format( + "Requests does not match a valid list of expected requests.%n" + + "expected: %s" + + "%n" + + "%n" + + "but was: %s", + all.stream() + .map(l -> l.stream().map(StorageV2ProtoUtils::fmtProto).collect(joiner(2))) + .collect(joiner), + requests.stream().map(StorageV2ProtoUtils::fmtProto).collect(joiner)); + assertWithMessage(msg).that(contains).isTrue(); + }, + () -> assertThat(actualCtx).isNotEqualTo(baseContext), + () -> assertThat(actualCtx.getExtraHeaders()).isEqualTo(expectedHeaders)); + } + + @Test + public void awaitAck_alreadyThere() throws InterruptedException { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(createSegment(2))).isTrue(); + assertThat(state.onResponse(incremental(2))).isNull(); + + state.awaitAck(2); + } + + @Test + public void awaitAck_multipleResponses() + throws InterruptedException, ExecutionException, TimeoutException { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(createSegment(4))).isTrue(); + ExecutorService exec = Executors.newSingleThreadExecutor(); + try { + Future f = + exec.submit( + () -> { + try { + Thread.sleep(10); + assertThat(state.onResponse(incremental(2))).isNull(); + Thread.sleep(10); + assertThat(state.onResponse(incremental(4))).isNull(); + return 3; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + state.awaitAck(4); + assertThat(f.get(3, TimeUnit.SECONDS)).isEqualTo(3); + } finally { + exec.shutdownNow(); + } + } + + private abstract static class BidiUploadStateFactory { + final BidiUploadState createInitialized() { + return createInitialized(25); + } + + final BidiUploadState createInitialized(long maxBytes) { + return createInitialized( + GrpcCallContext::createDefault, + maxBytes, + SettableApiFuture.create(), + Crc32cValue.zero()); + } + + final BidiUploadState createInitialized( + Supplier grpcContextSupplier, long maxBytes) { + return createInitialized( + grpcContextSupplier, maxBytes, SettableApiFuture.create(), Crc32cValue.zero()); + } + + final BidiUploadState create() { + return create(25); + } + + final BidiUploadState create(long maxBytes) { + return create( + GrpcCallContext::createDefault, + maxBytes, + SettableApiFuture.create(), + Crc32cValue.zero()); + } + + final BidiUploadState create(Supplier grpcContextSupplier, long maxBytes) { + return create( + grpcContextSupplier, maxBytes, SettableApiFuture.create(), Crc32cValue.zero()); + } + + abstract BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c); + + abstract BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c); + + @Override + public final String toString() { + return this.getClass().getSimpleName(); + } + } + + private static final class AppendableNewFactory extends BidiUploadStateFactory { + @Override + BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + BidiUploadState state = create(baseCallContext, maxBytes, resultFuture, initialCrc32c); + state.enqueueFirstMessageAndGetGrpcCallContext(); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + return state; + } + + @Override + BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + return appendableNew( + appendRequestNew, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + } + + private static final class AppendableTakeoverFactory extends BidiUploadStateFactory { + @Override + BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + BidiUploadState state = create(baseCallContext, maxBytes, resultFuture, initialCrc32c); + state.awaitTakeoverStateReconciliation( + () -> { + state.retrying(); + assertThat(state.onResponse(BidiUploadTest.resourceFor(appendRequestTakeover, 0))) + .isNull(); + }); + return state; + } + + @Override + BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + return BidiUploadState.appendableTakeover( + appendRequestTakeover, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + } + } + + public static final class BidiUploadStateConcatenateTest { + + @Test + public void concatenate_bothChecksummedData_resultsInIllegalArgument() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> BidiUploadState.concatenate(onlyBytes_00, onlyBytes_10)); + } + + @Test + public void concatenate_writeOffsetLesserWhenBothSpecified() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFinishWrite(true) + .setChecksummedData(onlyBytes_10.getChecksummedData()) + .build(); + BidiWriteObjectRequest finish_20 = onlyFinishWrite.toBuilder().setWriteOffset(20).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(onlyBytes_10, finish_20); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left0_right10_shouldBe0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(onlyBytes_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_leftFirst_rightFinish10_shouldHaveWriteOffset10() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(appendRequestNew.getWriteObjectSpec()) + .setWriteOffset(10) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = + BidiUploadState.concatenate(appendRequestNew, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_leftFirstWith00_rightFinish10_shouldHaveWriteOffset0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(appendRequestNew.getWriteObjectSpec()) + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .setWriteOffset(0) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest first_00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(first_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left00_rightFinish10_shouldHaveWriteOffset0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .setWriteOffset(0) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest first_00 = onlyBytes_00; + BidiWriteObjectRequest finish_10 = finishAt(10); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(first_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left10_rightFinish21_shouldThrowIllegalArgumentException() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> BidiUploadState.concatenate(onlyBytes_10, finishAt(21))); + } + } + + public static final class AppendableUploadStateTest { + private static @NonNull AppendableUploadState getAppendable() { + AppendableUploadState uploadState = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + uploadState.enqueueFirstMessageAndGetGrpcCallContext(); + BidiUploadTestUtils.sinkToList(uploadState); + assertThat(uploadState.onResponse(resourceWithSize(0))).isNull(); + ChecksummedTestContent.gen(10).chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(uploadState.offer(c)).isTrue()); + return uploadState; + } + + private static void runExpectNoException( + AppendableUploadState state, @NonNull BidiWriteObjectResponse response) { + StorageException se = state.onResponse(response); + assertThat(se).isNull(); + } + + private static void runExpectException( + AppendableUploadState state, + BidiWriteObjectResponse response, + UploadFailureScenario scenario) { + StorageException se = state.onResponse(response); + assertThat(se).isNotNull(); + assertThat(se).hasMessageThat().contains(scenario.getMessage()); + } + + @Test + public void onResponse_responseWithoutPersistedSizeAndWithoutResource_IllegalStateException() { + AppendableUploadState state = getAppendable(); + + IllegalStateException ise = + assertThrows( + IllegalStateException.class, + () -> { + BidiWriteObjectResponse response = BidiWriteObjectResponse.getDefaultInstance(); + StorageException se = state.onResponse(response); + if (se != null) { + throw se; + } + }); + + assertThat(ise).hasMessageThat().contains("persistedSize > -1"); + } + + @Test + public void onResponse_writeHandleUpdated() { + BidiWriteHandle handle = + BidiWriteHandle.newBuilder().setHandle(ByteString.copyFromUtf8("new-handle")).build(); + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException( + state, BidiUploadTestUtils.incremental(10).toBuilder().setWriteHandle(handle).build()); + assertThat(state.writeHandle).isEqualTo(handle); + } + + @Test + public void onResponse_notFinalizing_incremental_ackEq() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void onResponse_notFinalizing_incremental_ackLt() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_notFinalizing_incremental_ackGt() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectException( + state, BidiUploadTestUtils.incremental(11), UploadFailureScenario.SCENARIO_7); + } + + @Test + public void onResponse_notFinalizing_notIncremental_ackEq() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, resourceWithSize(10)); + } + + @Test + public void onResponse_finalizing_notIncremental_ackLt_inRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + // runExpectException(state, resourceWithSize(9), UploadFailureScenario.SCENARIO_9); + runExpectNoException(state, resourceWithSize(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_finalizing_notIncremental_ackLt_outOfRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(3)); + runExpectException(state, resourceWithSize(2), UploadFailureScenario.SCENARIO_4_1); + } + + @Test + public void onResponse_finalizing_notIncremental_ackGt() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectException(state, resourceWithSize(11), UploadFailureScenario.SCENARIO_4_2); + } + + @Test + public void onResponse_finalizing_incremental_ackLt_inRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + // runExpectException(state, incremental(9), UploadFailureScenario.SCENARIO_9); + runExpectNoException(state, BidiUploadTestUtils.incremental(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_finalizing_incremental_ackLt_outOfRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(9)); + runExpectException( + state, BidiUploadTestUtils.incremental(8), UploadFailureScenario.SCENARIO_3); + } + + @Test + public void onResponse_finalizing_incremental_ackEq() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void onResponse_finalizing_incremental_ackGt() { + // BidiUploadState state = new TestState(Flag.FINALIZING); + AppendableUploadState state = getAppendable(); + // updateBasedOnResponseRunExpectFailure(state, UploadFailureScenario.SCENARIO_2, + // incremental(11)); + } + + @Test + public void onResponse_finalizing_notIncremental_ackEq() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, resourceWithSize(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void complexSequence_1() throws Exception { + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + ChecksummedTestContent b_10 = ChecksummedTestContent.gen(10); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + b_10.chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(state.offer(c)).isTrue()); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isIn(range(-1, state.queue.size())), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(resourceWithSize(1))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isIn(range(-1, state.queue.size())), + () -> assertThat(state.confirmedBytes).isEqualTo(1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(BidiUploadTestUtils.incremental(10))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + ChunkSegment segment1 = BidiUploadTestUtils.createSegment(1); + Crc32cLengthKnown cumulative1 = cumulative0.concat(segment1.getCrc32c()); + assertThat(state.offer(segment1)).isTrue(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(11), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative1)); + BidiUploadTestUtils.sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(0); + + ChunkSegment segment2 = BidiUploadTestUtils.createSegment(2); + Crc32cLengthKnown cumulative2 = cumulative1.concat(segment2.getCrc32c()); + assertThat(state.offer(segment2)).isTrue(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(13), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(0), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative2)); + BidiUploadTestUtils.sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(1); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(BidiUploadTestUtils.incremental(11))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(13), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(0), + () -> assertThat(state.confirmedBytes).isEqualTo(11), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative2)); + } + + @Test + public void complexScenario_2_retries() throws Exception { + ChecksummedTestContent b_10 = ChecksummedTestContent.of("ABCDEFGHIJ"); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + ChecksummedTestContent abc = b_10.slice(0, 3); + ChecksummedTestContent def = b_10.slice(3, 3); + ChecksummedTestContent ghi = b_10.slice(6, 3); + ChecksummedTestContent j = b_10.slice(9, 1); + + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(abc.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(1); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(def.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(2); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(ghi.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(3); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(j.asChecksummedData()))).isTrue(); + assertThat(state.offer(flushOffset(10))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(5); + + // send incremental response, ack'ing 3 bytes + assertThat(state.onResponse(resourceFor(abc))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.confirmedBytes).isEqualTo(3), + () -> assertThat(state.isFinalizing()).isFalse()); + + // error returned, transition to pending retry + state.pendingRetry(); + // error is retryable, and backoff has elapsed, transition to retrying + state.retrying(); + // resolve the opening request and call context + // todo: better method name + state.enqueueFirstMessageAndGetGrpcCallContext(); + + BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setStateLookup(true) + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(appendRequestNew.getWriteObjectSpec().getResource().getBucket()) + .setObject(appendRequestNew.getWriteObjectSpec().getResource().getName()) + .setGeneration(1) + .build()) + .build(); + assertThat(state.lastSentRequestIndex).isEqualTo(-1); + ApiFuture reconciliation = state.beginReconciliation(); + assertThat(BidiUploadTestUtils.sinkToList(state)).isEqualTo(ImmutableList.of(reconnect)); + + assertThat(state.onResponse(BidiUploadTestUtils.incremental(6))).isNull(); + reconciliation.get(137, TimeUnit.MILLISECONDS); + + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(2); + } + + @Test + public void resultFutureNotResolvedForResourceWithoutFinalizeTime() throws Exception { + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + // simulate a scenario where multiple messages are sent to gcs before we receive any + // resource response. Even if this resource is the expected size, we should not resolved the + // result future when it doesn't have finalize_time set. + ChecksummedTestContent b_10 = ChecksummedTestContent.gen(10); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + b_10.chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(state.offer(c)).isTrue()); + assertThat(state.offer(finishAt(10))).isTrue(); + assertThat(state.onResponse(resourceFor(appendRequestNew, 10))).isNull(); + assertThat(state.getResultFuture().isDone()).isFalse(); + + BidiWriteObjectResponse response = + resourceFor( + appendRequestNew, + b -> + b.setSize(10) + .setFinalizeTime(timestampNow()) + .setChecksums( + ObjectChecksums.newBuilder().setCrc32C(b_10.getCrc32c()).build())); + assertThat(state.onResponse(response)).isNull(); + assertThat(state.getResultFuture().isDone()).isTrue(); + assertThat(state.getResultFuture().get()).isEqualTo(response); + } + + private Range range(int min, int maxExclusive) { + return Range.range(min, BoundType.CLOSED, maxExclusive, BoundType.OPEN); + } + } + + public static final class StreamingStreamTest { + + public static final int MAX_REDIRECTS_ALLOWED = 3; + @Rule public final TestName name = new TestName(); + + @Test + public void simple() throws InterruptedException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + BidiUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + long writeOffset = request.getWriteOffset(); + ByteString content = request.getChecksummedData().getContent(); + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setPersistedSize(writeOffset + content.size()) + .build()); + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + ChecksummedTestContent content = + ChecksummedTestContent.of( + DataGenerator.base64Characters().genBytes(4 * 1024 * 1024 + 17)); + List chunked = content.chunkup(2 * 1024 * 1024); + + for (ChecksummedTestContent checksummedTestContent : chunked) { + int attemptCounter = 0; + boolean accepted; + do { + attemptCounter++; + accepted = + stream.append( + BidiUploadTestUtils.createSegment(checksummedTestContent.asChecksummedData())); + if (!accepted) { + if (attemptCounter == 3) { + fail(); + } + Thread.sleep(300); + } + } while (!accepted); + } + } + + @Test + public void finishWrite_emptyObject() + throws InterruptedException, ExecutionException, TimeoutException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + AppendableUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + + ObjectChecksums expectedObjectChecksums = + ObjectChecksums.newBuilder().setCrc32C(Crc32cValue.zero().getValue()).build(); + + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(appendRequestNew)) { + respond.onResponse(BidiUploadTestUtils.incremental(0)); + } else { + assertThat(request.getFinishWrite()).isTrue(); + long writeOffset = request.getWriteOffset(); + assertThat(writeOffset).isEqualTo(0); + ObjectChecksums objectChecksums = request.getObjectChecksums(); + assertThat(objectChecksums).isEqualTo(expectedObjectChecksums); + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + appendRequestNew + .getWriteObjectSpec() + .getResource() + .toBuilder() + .setGeneration(1) + .setChecksums(objectChecksums) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + respond.onComplete(); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + stream.finishWrite(0); + BidiWriteObjectResponse response = stream.getResultFuture().get(3, TimeUnit.SECONDS); + + assertThat(response.hasResource()).isTrue(); + Object resource = response.getResource(); + assertThat(resource.getSize()).isEqualTo(0); + assertThat(resource.getChecksums()).isEqualTo(expectedObjectChecksums); + assertThat(resource.getGeneration()).isGreaterThan(0); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void finishWrite_2MessageObject() + throws InterruptedException, ExecutionException, TimeoutException { + AppendableUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + + ObjectChecksums expectedObjectChecksums = + ObjectChecksums.newBuilder() + .setCrc32C(content.slice(0, 20).asChecksummedData().getCrc32C()) + .build(); + + BidiWriteObjectRequest baseWith00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest expectedFinish = + BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setWriteOffset(20) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(content.slice(0, 20).getCrc32c()).build()) + .build(); + + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(baseWith00)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(onlyBytes_10)) { + respond.onResponse(BidiUploadTestUtils.incremental(20)); + } else if (request.equals(expectedFinish)) { + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + appendRequestNew + .getWriteObjectSpec() + .getResource() + .toBuilder() + .setSize(20) + .setGeneration(1) + .setChecksums(expectedFinish.getObjectChecksums()) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + // respond.onComplete(); + } else { + respond.onError( + FakeStorage.unexpectedRequest( + request, + ImmutableList.of(baseWith00, onlyBytes_10, expectedFinish))); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append( + BidiUploadTestUtils.createSegment(content.slice(0, 10).asChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + assertThat( + stream.append( + BidiUploadTestUtils.createSegment(content.slice(10, 10).asChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(stream.finishWrite(20)).isTrue(); + BidiWriteObjectResponse response = stream.getResultFuture().get(3, TimeUnit.SECONDS); + + assertThat(response.hasResource()).isTrue(); + Object resource = response.getResource(); + assertThat(resource.getSize()).isEqualTo(20); + assertThat(resource.getChecksums()).isEqualTo(expectedObjectChecksums); + assertThat(resource.getGeneration()).isGreaterThan(0); + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void appendDoesNotSendWhenStateDoesNotAcceptOffer() { + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public boolean offer(@NonNull ChunkSegment data) { + return false; + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.append(BidiUploadTestUtils.createSegment(content.asChecksummedData()))) + .isFalse(); + } + + @Test + public void finishWriteDoesNotSendWhenStateDoesNotAcceptOffer() { + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public boolean offer(@NonNull BidiWriteObjectRequest e) { + return false; + } + + @Override + Crc32cValue.@Nullable Crc32cLengthKnown getCumulativeCrc32c() { + return Crc32cValue.zero(); + } + + @Override + boolean isFinalizing() { + return false; + } + + @Override + long getTotalSentBytes() { + return 0; + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.finishWrite(0)).isFalse(); + } + + @Test + public void available() { + AtomicLong available = new AtomicLong(2 * 1024 * 1024); + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public long availableCapacity() { + return available.get(); + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.availableCapacity()).isEqualTo(2 * 1024 * 1024); + available.set(MAX_REDIRECTS_ALLOWED); + assertThat(stream.availableCapacity()).isEqualTo(MAX_REDIRECTS_ALLOWED); + } + + @Test + public void redirect() throws ExecutionException, InterruptedException, TimeoutException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + BaseUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 20, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiWriteObjectRequest expectedRedirectRequest1 = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .setRoutingToken("token") + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .build()) + .setStateLookup(true) + .build(); + BidiWriteObjectRequest baseWith00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest finish_20 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(20) + .setFinishWrite(true) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(content.slice(0, 20).getCrc32c()).build()) + .build(); + BidiWriteObjectRequest finish_20with10 = + finish_20.toBuilder().mergeFrom(onlyBytes_10).build(); + AtomicInteger bytes10SeenCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(baseWith00)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(onlyBytes_10)) { + int i = bytes10SeenCount.getAndIncrement(); + if (i == 0) { + BidiWriteObjectRedirectedError redirect = + BidiWriteObjectRedirectedError.newBuilder() + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .setRoutingToken("token") + .setGeneration(1) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.ABORTED_VALUE) + .setMessage("redirect") + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.ABORTED + .withDescription("redirect") + .asRuntimeException(trailers); + respond.onError( + ApiExceptionFactory.createException( + statusRuntimeException, + GrpcStatusCode.of(Status.Code.ABORTED), + true, + ErrorDetails.builder() + .setRawErrorMessages(grpcStatusDetails.getDetailsList()) + .build())); + } else { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } + } else if (request.equals(expectedRedirectRequest1)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(finish_20) || request.equals(finish_20with10)) { + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setBucket("projects/_/buckets/b") + .setName("o") + .setGeneration(1) + .setSize(20) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + respond.onComplete(); + } else { + respond.onError( + FakeStorage.unexpectedRequest( + request, + ImmutableList.of( + baseWith00, + onlyBytes_10, + expectedRedirectRequest1, + finish_20, + finish_20with10))); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_10.getChecksummedData()))) + .isTrue(); + assertThat(stream.finishWrite(20)).isTrue(); + BidiWriteObjectResponse response = stream.getResultFuture().get(1_500, TimeUnit.MILLISECONDS); + assertThat(response.hasResource()).isTrue(); + assertThat(response.getResource().getSize()).isEqualTo(20); + } + + @Test + public void canNotOpenStreamAfterFirstOpenButCanEnqueueForBackgroundRetry() { + SettableApiFuture resultFuture = SettableApiFuture.create(); + AtomicInteger streamOpenCounter = new AtomicInteger(); + BidiUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 20, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> { + streamOpenCounter.getAndIncrement(); + return request -> {}; + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + // TODO: remove when state reconciliation is better + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + stream.reset(); + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_10.getChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(stream.finishWrite(20)).isTrue(); + assertThat(streamOpenCounter.get()).isEqualTo(1); + } + + @Test + public void reset_forwardsAnyUncaughtThrowableToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void pendingRetry() { + //noinspection DataFlowIssue + checkState(false, "bad state"); + } + + @Override + long getTotalSentBytes() { + return 0; + } + + @Override + boolean offer(@NonNull BidiWriteObjectRequest e) { + return true; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + @Nullable BidiWriteObjectRequest peekLast() { + return null; + } + + @Override + void sendVia(Consumer consumer) {} + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(IllegalStateException.class); + recordErrorCalled.set(true); + } + }); + + stream.flush(); + stream.reset(); + + assertThat(recordErrorCalled.get()).isTrue(); + } + + @Test + public void restart_reconciliationErrorPropagation_failure() throws Exception { + SettableApiFuture beginReconciliation = SettableApiFuture.create(); + RuntimeException boomBoom = new RuntimeException("boom boom"); + + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void retrying() {} + + @Override + ApiFuture beginReconciliation() { + return beginReconciliation; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + void pendingRetry() {} + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + void sendVia(Consumer consumer) { + sendViaCallCount.getAndIncrement(); + } + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isSameInstanceAs(boomBoom); + recordErrorCalled.set(true); + } + }); + + stream.restart(); + beginReconciliation.setException(boomBoom); + + assertAll( + () -> assertThat(recordErrorCalled.get()).isTrue(), + () -> assertThat(sendViaCallCount.get()).isEqualTo(1)); + } + + @Test + public void restart_reconciliationErrorPropagation_success() throws Exception { + SettableApiFuture beginReconciliation = SettableApiFuture.create(); + + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void retrying() {} + + @Override + ApiFuture beginReconciliation() { + return beginReconciliation; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + void sendVia(Consumer consumer) { + sendViaCallCount.getAndIncrement(); + } + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + fail("unexpected recordError call"); + } + }); + + stream.restart(); + beginReconciliation.set(null); + + assertAll(() -> assertThat(sendViaCallCount.get()).isEqualTo(2)); + } + + /** + * imagine a reconciliation that happens across multiple retries or redirects. The stream would + * attempt to register its reconciliation callback. Make sure it's only actually registered + * once. + */ + @Test + public void longRunningReconciliationFailureOnlyReportsToRetryContextOnce() throws Exception { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + BidiWriteObjectRequest flush3 = flushOffset(3); + List recordedErrors = Collections.synchronizedList(new ArrayList<>()); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + AtomicInteger redirectCount = new AtomicInteger(0); + ScheduledExecutorService exec1 = Executors.newSingleThreadScheduledExecutor(); + ExecutorService exec2 = Executors.newCachedThreadPool(); + ScheduledExecutorService exec3 = Executors.newSingleThreadScheduledExecutor(); + RetryContext retryContext = + RetryContext.of(exec3, defaultRetryingDeps(), Retrying.neverRetry(), Jitterer.noJitter()); + CountDownLatch cdl = new CountDownLatch(2); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 15, + resultFuture, + Crc32cValue.zero()), + exec1, + adaptOnlySend( + respond -> + request -> + exec2.execute( + () -> + respond.onError( + packRedirectIntoAbortedException( + makeRedirect( + String.format( + "{redirect_%02d}", + redirectCount.incrementAndGet())))))), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return retryContext.inBackoff(); + } + + @Override + public void reset() { + retryContext.reset(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + recordedErrors.add(t); + retryContext.recordError(t, onSuccess, onFailure); + cdl.countDown(); + } + }); + + try { + stream.flush(); + assertThat(cdl.await(3, TimeUnit.SECONDS)).isTrue(); + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> stream.getResultFuture().get(3, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class); + + ImmutableList errorsForAssertion = ImmutableList.copyOf(recordedErrors); + + assertAll( + () -> assertThat(redirectCount.get()).isEqualTo(4), + () -> assertThat(errorsForAssertion).hasSize(2), + () -> + assertThat( + errorsForAssertion.stream() + .filter(t -> t instanceof AbortedException) + .count()) + .isEqualTo(1), + () -> + assertThat( + errorsForAssertion.stream() + .filter(t -> t instanceof CancellationException) + .count()) + .isEqualTo(1)); + } finally { + exec3.shutdownNow(); + exec2.shutdownNow(); + exec1.shutdownNow(); + } + } + } + + public static final class BidiUploadStreamingStreamResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void onError() { + RetryContext retryContext = RetryContext.neverRetry(); + AtomicReference failure = new AtomicReference<>(); + @NonNull BidiUploadState state = + new BidiUploadStreamingStreamResponseObserverTest.TestState( + BidiUploadStreamingStreamResponseObserverTest.Flag.NOT_FINALIZING); + StreamingResponseObserver obs = + new StreamingResponseObserver( + state, retryContext, RetryContextTest.failOnSuccess(), failure::set); + obs.onStart(TestUtils.nullStreamController()); + + RuntimeException t = new RuntimeException("Kablamo~~~"); + obs.onError(t); + + assertThat(failure.get()).isSameInstanceAs(t); + } + + enum Flag { + FINALIZING, + NOT_FINALIZING + } + + private class TestState extends BidiUploadState { + private final BidiUploadStreamingStreamResponseObserverTest.Flag flag; + + private TestState(BidiUploadStreamingStreamResponseObserverTest.Flag flag) { + super(name.getMethodName()); + this.flag = flag; + } + + @Override + public boolean isFinalizing() { + return flag == BidiUploadStreamingStreamResponseObserverTest.Flag.FINALIZING; + } + + @Override + @Nullable BidiWriteObjectRequest peekLast() { + return BidiWriteObjectRequest.newBuilder() + .setChecksummedData( + ChecksummedTestContent.gen(Math.toIntExact(getTotalSentBytes())) + .asChecksummedData()) + .build(); + } + + @Override + void updateStateFromResponse(BidiWriteObjectResponse response) { + fail("unexpected call to setConfirmedBytesOffset(" + response + ")"); + } + + @Override + long getTotalSentBytes() { + return 10; + } + } + } + + public static final class RedirectHandlingResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void tombstoned_noop() throws Exception { + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) {}, + new TestResponseObserver(), + new AtomicInteger(0), + 3, + () -> fail("beforeRedirect()"), + () -> fail("onRedirect")); + obs.flagTombstoned(); + assertAll( + () -> obs.onStart(TestUtils.nullStreamController()), + () -> obs.onResponse(BidiUploadTestUtils.incremental(10)), + obs::onComplete, + () -> obs.onError(new RuntimeException("should not cause error"))); + } + + @Test + public void onError_shouldNotDelegateWhenARedirectErrorIsSpecified() { + BidiWriteObjectRedirectedError redirect = BidiUploadTestUtils.makeRedirect("routing-token"); + + AbortedException abortedException = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect); + + AtomicBoolean beforeRedirectCalled = new AtomicBoolean(false); + AtomicBoolean onRedirectCalled = new AtomicBoolean(false); + AtomicBoolean updateFromRedirectCalled = new AtomicBoolean(false); + + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError r) { + assertThat(beforeRedirectCalled.get()).isTrue(); + assertThat(r).isEqualTo(redirect); + updateFromRedirectCalled.set(true); + } + }, + new TestResponseObserver(), + new AtomicInteger(0), + 3, + () -> beforeRedirectCalled.set(true), + () -> { + assertThat(beforeRedirectCalled.get()).isTrue(); + onRedirectCalled.set(true); + }); + + obs.onError(abortedException); + + assertThat(updateFromRedirectCalled.get()).isTrue(); + assertThat(onRedirectCalled.get()).isTrue(); + } + + @Test + public void onError_shouldDelegateWhenNoRedirectErrorIsSpecified() throws Exception { + + AbortedException abortedException = BidiUploadTestUtils.newAbortedException("{aborted}"); + + AtomicBoolean delegateOnErrorCalled = new AtomicBoolean(false); + + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) {}, + new TestResponseObserver() { + @Override + public void onError(Throwable t) { + assertThat(t).isEqualTo(abortedException); + delegateOnErrorCalled.set(true); + } + }, + new AtomicInteger(0), + 3, + () -> fail("beforeRedirect()"), + () -> fail("onRedirect")); + + obs.onError(abortedException); + + assertThat(delegateOnErrorCalled.get()).isTrue(); + } + + @Test + public void onError_shouldDelegateWhenMaxRedirectsExceeded() throws Exception { + + BidiWriteObjectRedirectedError redirect1 = BidiUploadTestUtils.makeRedirect("{token 1}"); + BidiWriteObjectRedirectedError redirect2 = BidiUploadTestUtils.makeRedirect("{token 2}"); + BidiWriteObjectRedirectedError redirect3 = BidiUploadTestUtils.makeRedirect("{token 3}"); + BidiWriteObjectRedirectedError redirect4 = BidiUploadTestUtils.makeRedirect("{token 4}"); + AbortedException abortedException1 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect1); + AbortedException abortedException2 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect2); + AbortedException abortedException3 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect3); + AbortedException abortedException4 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect4); + + AtomicInteger beforeRedirectCalled = new AtomicInteger(0); + AtomicInteger onRedirectCalled = new AtomicInteger(0); + AtomicInteger onErrorCalled = new AtomicInteger(0); + + int maxRedirectsAllowed = 3; + // the closure passed to the constructor of obs needs to do things with the obs instance + // but obs hasn't finished initializing yet. make an indirect reference to it which can be + // accessed in the closure. + AtomicReference lifecycleIsDifficult = + new AtomicReference<>(); + List redirects = new ArrayList<>(); + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + redirects.add(redirect); + } + }, + new TestResponseObserver() { + @Override + public void onError(Throwable t) { + assertThat(t).isEqualTo(abortedException4); + assertThat(t.getSuppressed()).hasLength(1); + assertThat(t.getSuppressed()[0]) + .isInstanceOf(MaxRedirectsExceededException.class); + onErrorCalled.getAndIncrement(); + } + }, + new AtomicInteger(0), + maxRedirectsAllowed, + beforeRedirectCalled::getAndIncrement, + () -> { + int i = onRedirectCalled.getAndIncrement(); + switch (i) { + case 0: + lifecycleIsDifficult.get().onError(abortedException2); + break; + case 1: + lifecycleIsDifficult.get().onError(abortedException3); + break; + case 2: + lifecycleIsDifficult.get().onError(abortedException4); + break; + default: + fail("invocation: " + i); + break; + } + }); + lifecycleIsDifficult.set(obs); + + obs.onError(abortedException1); + + assertAll( + () -> assertThat(beforeRedirectCalled.get()).isEqualTo(maxRedirectsAllowed), + () -> assertThat(onRedirectCalled.get()).isEqualTo(maxRedirectsAllowed), + () -> assertThat(onErrorCalled.get()).isEqualTo(1), + () -> assertThat(redirects).isEqualTo(ImmutableList.of(redirect1, redirect2, redirect3))); + } + + private static class TestResponseObserver implements ResponseObserver { + + @Override + public void onStart(StreamController controller) { + fail("onStart(" + controller + ")"); + } + + @Override + public void onResponse(BidiWriteObjectResponse response) { + fail("onResponse(" + fmtProto(response) + ")"); + } + + @Override + public void onError(Throwable t) { + fail("onError(" + t.getMessage() + ")"); + } + + @Override + public void onComplete() { + fail("onComplete()"); + } + } + } + + public static final class StreamRetryContextDecoratorTest { + @Test + public void onRecordError_calledBeforeRecordError() { + AtomicBoolean onRecordErrorCalled = new AtomicBoolean(false); + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + RetryContext ctx = + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(onRecordErrorCalled.get()).isTrue(); + recordErrorCalled.set(true); + } + }; + StreamRetryContextDecorator dec = + new StreamRetryContextDecorator( + ctx, new ReentrantLock(), () -> onRecordErrorCalled.set(true)); + + dec.recordError( + new RuntimeException("blamo"), + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + assertThat(recordErrorCalled.get()).isTrue(); + } + } + + public static final class StreamingResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void onResponse_stateErrorForwardedToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + StreamingResponseObserver obs = + new StreamingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + StorageException onResponse(BidiWriteObjectResponse response) { + return new StorageException(0, "test-error", null); + } + }, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(StorageException.class); + assertThat(((StorageException) t).getCode()).isEqualTo(0); + recordErrorCalled.set(true); + } + }, + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + + obs.onStart(TestUtils.nullStreamController()); + obs.onResponse(resourceWithSize(0)); + + assertThat(recordErrorCalled.get()).isTrue(); + } + + @Test + public void onResponse_exceptionFromStateOnResponseForwardedToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + StreamingResponseObserver obs = + new StreamingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + StorageException onResponse(BidiWriteObjectResponse response) { + //noinspection DataFlowIssue + checkState(false, "kblamo"); + return null; + } + }, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(IllegalStateException.class); + assertThat(t).hasMessageThat().contains("kblamo"); + recordErrorCalled.set(true); + } + }, + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + + obs.onStart(TestUtils.nullStreamController()); + obs.onResponse(resourceWithSize(0)); + + assertThat(recordErrorCalled.get()).isTrue(); + } + + @Test + public void onResponse_resetsRetryContextToEnsureRetriesArePossibleForLongWrites() { + AtomicBoolean resetCalled = new AtomicBoolean(false); + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + StreamingResponseObserver obs = + new StreamingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + StorageException onResponse(BidiWriteObjectResponse response) { + return null; + } + }, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() { + resetCalled.set(true); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + recordErrorCalled.set(true); + } + }, + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + + obs.onStart(TestUtils.nullStreamController()); + obs.onResponse(resourceWithSize(0)); + + assertThat(resetCalled.get()).isTrue(); + assertThat(recordErrorCalled.get()).isFalse(); + } + } + + static BidiWriteObjectRequest flushOffset(long offset) { + return onlyFlush.toBuilder().setWriteOffset(offset).setStateLookup(true).build(); + } + + static @NonNull BidiWriteObjectResponse resourceWithSize(int size) { + return resourceFor(appendRequestNew, size); + } + + static @NonNull BidiWriteObjectResponse resourceFor(ChecksummedTestContent ctc) { + return resourceFor( + appendRequestNew, + b -> + b.setSize(ctc.length()) + .setChecksums(ObjectChecksums.newBuilder().setCrc32C(ctc.getCrc32c()).build())); + } + + static @NonNull BidiWriteObjectResponse resourceFor(BidiWriteObjectRequest req, long size) { + return resourceFor(req, b -> b.setSize(size)); + } + + static @NonNull BidiWriteObjectResponse resourceFor( + BidiWriteObjectRequest req, UnaryOperator f) { + Object.Builder b = Object.newBuilder(); + if (req.hasWriteObjectSpec()) { + WriteObjectSpec spec = req.getWriteObjectSpec(); + b.setBucket(spec.getResource().getBucket()) + .setName(spec.getResource().getName()) + .setGeneration(1); + } else if (req.hasAppendObjectSpec()) { + AppendObjectSpec spec = req.getAppendObjectSpec(); + b.setBucket(spec.getBucket()).setName(spec.getObject()).setGeneration(spec.getGeneration()); + } else { + fail("Unhandled request shape: " + fmtProto(req)); + } + return BidiWriteObjectResponse.newBuilder().setResource(f.apply(b)).build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java new file mode 100644 index 000000000000..110fdd4510f4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java @@ -0,0 +1,200 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ClientStreamReadyObserver; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; +import com.google.protobuf.Timestamp; +import com.google.rpc.Code; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.List; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class BidiUploadTestUtils { + + private BidiUploadTestUtils() {} + + static @NonNull BidiWriteObjectRedirectedError makeRedirect(String routingToken) { + return BidiWriteObjectRedirectedError.newBuilder() + .setRoutingToken(routingToken) + .setGeneration(1) + .build(); + } + + static @NonNull AbortedException newAbortedException(String message) { + return new AbortedException(message, null, GrpcStatusCode.of(Status.Code.ABORTED), false); + } + + static @NonNull AbortedException packRedirectIntoAbortedException( + BidiWriteObjectRedirectedError redirect) { + String description = fmtProto(redirect); + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.ABORTED_VALUE) + .setMessage(description) + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.ABORTED.withDescription(description).asRuntimeException(trailers); + ErrorDetails errorDetails = + ErrorDetails.builder().setRawErrorMessages(grpcStatusDetails.getDetailsList()).build(); + return new AbortedException( + statusRuntimeException, GrpcStatusCode.of(Status.Code.ABORTED), true, errorDetails); + } + + static @NonNull BidiWriteObjectResponse incremental(long persistedSize) { + return BidiWriteObjectResponse.newBuilder().setPersistedSize(persistedSize).build(); + } + + static ChunkSegment createSegment(int length) { + return createSegment(ChecksummedTestContent.gen(length).asChecksummedData()); + } + + static ChunkSegment createSegment(ChecksummedData cd) { + ByteString content = cd.getContent(); + ChunkSegmenter segmenter = + new ChunkSegmenter( + Hasher.enabled(), ByteStringStrategy.copy(), content.size(), content.size()); + ChunkSegment[] segments = + segmenter.segmentBuffers(new ByteBuffer[] {content.asReadOnlyByteBuffer()}); + assertThat(segments).hasLength(1); + return segments[0]; + } + + static List sinkToList(BidiUploadState state) { + ImmutableList.Builder b = ImmutableList.builder(); + state.sendVia(b::add); + return b.build(); + } + + static @NonNull BidiWriteObjectRequest finishAt(int totalOffset) { + return BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setWriteOffset(totalOffset) + .build(); + } + + static BidiWriteObjectRequest withRedirectToken( + BidiWriteObjectRequest redirectReconcile, String routingToken) { + BidiWriteObjectRequest.Builder b = redirectReconcile.toBuilder(); + b.getAppendObjectSpecBuilder().setRoutingToken(routingToken); + return b.build(); + } + + static BidiWriteObjectRequest withFlushAndStateLookup(BidiWriteObjectRequest orig) { + return orig.toBuilder().setFlush(true).setStateLookup(true).build(); + } + + static Timestamp timestampNow() { + return Conversions.grpc().timestampCodec.encode(OffsetDateTime.now()); + } + + static BidiStreamingCallable + alwaysErrorBidiStreamingCallable(Status status) { + return adaptOnlySend(respond -> request -> respond.onError(status.asRuntimeException())); + } + + static BidiStreamingCallable adaptOnlySend( + Function, OnlySendClientStream> func) { + return adapt(func::apply); + } + + static BidiStreamingCallable adapt( + Function, ClientStream> func) { + return adapt( + (respond, onReady, context) -> { + ClientStream clientStream = func.apply(respond); + StreamController controller = TestUtils.nullStreamController(); + respond.onStart(controller); + return clientStream; + }); + } + + /** + * BidiStreamingCallable isn't functional even though it's a single abstract method. + * + *

Define a method that can adapt a TriFunc as the required implementation of {@link + * BidiStreamingCallable#internalCall(ResponseObserver, ClientStreamReadyObserver, + * ApiCallContext)}. + * + *

Saves several lines of boilerplate in each test. + */ + static BidiStreamingCallable adapt( + TriFunc< + ResponseObserver, + ClientStreamReadyObserver, + ApiCallContext, + ClientStream> + func) { + return new BidiStreamingCallable() { + @Override + public ClientStream internalCall( + ResponseObserver respond, + ClientStreamReadyObserver onReady, + ApiCallContext context) { + return func.apply(respond, onReady, context); + } + }; + } + + @FunctionalInterface + interface TriFunc { + R apply(A a, B b, C c); + } + + @FunctionalInterface + interface OnlySendClientStream extends ClientStream { + @Override + default void closeSendWithError(Throwable t) {} + + @Override + default void closeSend() {} + + @Override + default boolean isSendReady() { + return true; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdPropertyTest.java new file mode 100644 index 000000000000..529273179b88 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdPropertyTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.ifNonNull; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Conversions.Codec; +import com.google.cloud.storage.jqwik.StorageArbitraries; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.Object; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.ArbitrarySupplier; +import net.jqwik.api.Combinators; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; + +final class BlobIdPropertyTest { + + @Property + void codecRoundTrip(@ForAll(supplier = ObjectRefs.class) Object message) { + Codec codec = Conversions.grpc().blobId(); + BlobId model = codec.decode(message); + Object proto = codec.encode(model); + + assertThat(proto).isEqualTo(message); + } + + @Property + void codecCompatibilityRoundTrip(@ForAll(supplier = ObjectRefs.class) Object p) { + Codec codecG = Conversions.grpc().blobId(); + Codec codecA = Conversions.json().blobId(); + + BlobId model = codecG.decode(p); + + StorageObject apiary = codecA.encode(model); + BlobId model2 = codecA.decode(apiary); + + Object actual = codecG.encode(model2); + + assertThat(actual).isEqualTo(p); + } + + private static final class ObjectRefs implements ArbitrarySupplier { + + @Override + public Arbitrary get() { + return Combinators.combine( + StorageArbitraries.objects().name(), + StorageArbitraries.buckets().name(), + StorageArbitraries.generation().injectNull(0.5)) + .as( + (n, b, g) -> { + Object.Builder out = Object.newBuilder(); + ifNonNull(n, out::setName); + ifNonNull(b, BucketName::toString, out::setBucket); + ifNonNull(g, out::setGeneration); + return out.build(); + }); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdTest.java new file mode 100644 index 000000000000..37cb5a14865f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobIdTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class BlobIdTest { + + private static final BlobId BLOB = BlobId.of("b", "n"); + + @Test + public void testOf() { + BlobId blobId = BlobId.of("b", "n"); + assertEquals("b", blobId.getBucket()); + assertEquals("n", blobId.getName()); + } + + @Test + public void testToFromGsUtilUri() { + BlobId blobId = BlobId.fromGsUtilUri("gs://bucket/path/to/blob"); + assertEquals("bucket", blobId.getBucket()); + assertEquals("path/to/blob", blobId.getName()); + assertEquals("gs://bucket/path/to/blob", blobId.toGsUtilUri()); + } + + @Test + public void testToFromGsUtilUriWithGeneration() { + BlobId blobId = BlobId.fromGsUtilUri("gs://bucket/path/to/blob#1360887697105000"); + assertEquals("bucket", blobId.getBucket()); + assertEquals("path/to/blob", blobId.getName()); + assertEquals(Long.valueOf(1360887697105000L), blobId.getGeneration()); + assertEquals("gs://bucket/path/to/blob", blobId.toGsUtilUri()); + assertEquals("gs://bucket/path/to/blob#1360887697105000", blobId.toGsUtilUriWithGeneration()); + } + + @Test + public void testEquals() { + compareBlobIds(BLOB, BlobId.of("b", "n")); + } + + private void compareBlobIds(BlobId expected, BlobId value) { + assertEquals(expected, value); + assertEquals(expected.getBucket(), value.getBucket()); + assertEquals(expected.getName(), value.getName()); + assertEquals(expected.hashCode(), value.hashCode()); + } + + @Test + public void testToPbAndFromPb() { + compareBlobIds( + BLOB, Conversions.json().blobId().decode(Conversions.json().blobId().encode(BLOB))); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoPropertyTest.java new file mode 100644 index 000000000000..e70b07b32d17 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoPropertyTest.java @@ -0,0 +1,23 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.services.storage.model.StorageObject; +import com.google.storage.v2.Object; + +final class BlobInfoPropertyTest + extends BaseConvertablePropertyTest {} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoTest.java new file mode 100644 index 000000000000..6ecb483918ad --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobInfoTest.java @@ -0,0 +1,406 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Acl.Project.ProjectRole.VIEWERS; +import static com.google.cloud.storage.Acl.Role.READER; +import static com.google.cloud.storage.Acl.Role.WRITER; +import static com.google.cloud.storage.TestUtils.hashMapOf; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.BlobInfo.CustomerEncryption; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.math.BigInteger; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Test; + +public class BlobInfoTest { + + private static final List ACL = + ImmutableList.of( + Acl.of(User.ofAllAuthenticatedUsers(), READER), + Acl.of(new Project(VIEWERS, "p1"), WRITER)); + private static final Integer COMPONENT_COUNT = 2; + private static final String CONTENT_TYPE = "text/html"; + private static final String CACHE_CONTROL = "cache"; + private static final String CONTENT_DISPOSITION = "content-disposition"; + private static final String CONTENT_ENCODING = "UTF-8"; + private static final String CONTENT_LANGUAGE = "En"; + private static final String CRC32 = "FF00"; + private static final String CRC32_HEX_STRING = "145d34"; + private static final String CRC32_HEX_STRING_LEADING_ZEROS = "005d34"; + private static final String CRC32_BASE64_LEADING_ZEROS = "AF00"; + private static final Long DELETE_TIME = System.currentTimeMillis(); + private static final String ETAG = "0xFF00"; + private static final Long GENERATION = 1L; + private static final String GENERATED_ID = "B/N:1"; + private static final String MD5 = "FF00"; + private static final String MD5_HEX_STRING = "145d34"; + private static final String MD5_HEX_STRING_LEADING_ZEROS = "0006a7de52b4e0b82602ce09809523ca"; + private static final String MD5_BASE64_LEADING_ZEROS = "AAan3lK04LgmAs4JgJUjyg=="; + private static final String MEDIA_LINK = "http://media/b/n"; + private static final Map METADATA = ImmutableMap.of("n1", "v1", "n2", "v2"); + private static final Long META_GENERATION = 10L; + private static final User OWNER = new User("user@gmail.com"); + private static final String SELF_LINK = "http://storage/b/n"; + private static final Long SIZE = 1024L; + private static final Long UPDATE_TIME = DELETE_TIME - 1L; + private static final Long CREATE_TIME = UPDATE_TIME - 1L; + private static final Long CUSTOM_TIME = CREATE_TIME - 1L; + private static final String ENCRYPTION_ALGORITHM = "AES256"; + private static final String KEY_SHA256 = "keySha"; + private static final CustomerEncryption CUSTOMER_ENCRYPTION = + new CustomerEncryption(ENCRYPTION_ALGORITHM, KEY_SHA256); + private static final String KMS_KEY_NAME = + "projects/p/locations/kr-loc/keyRings/kr/cryptoKeys/key"; + private static final StorageClass STORAGE_CLASS = StorageClass.COLDLINE; + private static final Long TIME_STORAGE_CLASS_UPDATED = CREATE_TIME; + private static final Boolean EVENT_BASED_HOLD = true; + private static final Boolean TEMPORARY_HOLD = true; + private static final Long RETENTION_EXPIRATION_TIME = 10L; + private static final ObjectCustomContextPayload payload = + ObjectCustomContextPayload.newBuilder().setValue("contextValue").build(); + private static final Map customContexts = + Collections.singletonMap("contextKey", payload); + private static final ObjectContexts OBJECT_CONTEXTS = + ObjectContexts.newBuilder().setCustom(customContexts).build(); + + private static final BlobInfo BLOB_INFO = + BlobInfo.newBuilder("b", "n", GENERATION) + .setAcl(ACL) + .setComponentCount(COMPONENT_COUNT) + .setContentType(CONTENT_TYPE) + .setCacheControl(CACHE_CONTROL) + .setContentDisposition(CONTENT_DISPOSITION) + .setContentEncoding(CONTENT_ENCODING) + .setContentLanguage(CONTENT_LANGUAGE) + .setCustomerEncryption(CUSTOMER_ENCRYPTION) + .setCrc32c(CRC32) + .setDeleteTime(DELETE_TIME) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMd5(MD5) + .setMediaLink(MEDIA_LINK) + .setMetadata(METADATA) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setSize(SIZE) + .setUpdateTime(UPDATE_TIME) + .setCreateTime(CREATE_TIME) + .setCustomTime(CUSTOM_TIME) + .setStorageClass(STORAGE_CLASS) + .setTimeStorageClassUpdated(TIME_STORAGE_CLASS_UPDATED) + .setKmsKeyName(KMS_KEY_NAME) + .setEventBasedHold(EVENT_BASED_HOLD) + .setTemporaryHold(TEMPORARY_HOLD) + .setRetentionExpirationTime(RETENTION_EXPIRATION_TIME) + .setContexts(OBJECT_CONTEXTS) + .build(); + private static final BlobInfo DIRECTORY_INFO = + BlobInfo.newBuilder("b", "n/").setSize(0L).setIsDirectory(true).build(); + + @Test + public void testCustomerEncryption() { + assertEquals(ENCRYPTION_ALGORITHM, CUSTOMER_ENCRYPTION.getEncryptionAlgorithm()); + assertEquals(KEY_SHA256, CUSTOMER_ENCRYPTION.getKeySha256()); + } + + @Test + public void testToBuilder() { + compareBlobs(BLOB_INFO, BLOB_INFO.toBuilder().build()); + BlobInfo blobInfo = + BLOB_INFO.toBuilder().setBlobId(BlobId.of("b2", "n2")).setSize(200L).build(); + assertEquals("n2", blobInfo.getName()); + assertEquals("b2", blobInfo.getBucket()); + assertEquals(Long.valueOf(200), blobInfo.getSize()); + blobInfo = + blobInfo.toBuilder().setBlobId(BlobId.of("b", "n", GENERATION)).setSize(SIZE).build(); + compareBlobs(BLOB_INFO, blobInfo); + } + + @Test + public void testToBuilderSetMd5FromHexString() { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of("b2", "n2")).setMd5FromHexString(MD5_HEX_STRING).build(); + assertEquals(MD5, blobInfo.getMd5()); + } + + @Test + public void testToBuilderSetMd5FromHexStringLeadingZeros() { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of("b2", "n2")) + .setMd5FromHexString(MD5_HEX_STRING_LEADING_ZEROS) + .build(); + assertEquals(MD5_BASE64_LEADING_ZEROS, blobInfo.getMd5()); + } + + @Test + public void testToBuilderSetCrc32cFromHexString() { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of("b2", "n2")).setCrc32cFromHexString(CRC32_HEX_STRING).build(); + assertEquals(CRC32, blobInfo.getCrc32c()); + } + + @Test + public void testToBuilderSetCrc32cFromHexStringLeadingZeros() { + BlobInfo blobInfo = + BlobInfo.newBuilder(BlobId.of("b2", "n2")) + .setCrc32cFromHexString(CRC32_HEX_STRING_LEADING_ZEROS) + .build(); + assertEquals(CRC32_BASE64_LEADING_ZEROS, blobInfo.getCrc32c()); + } + + @Test + public void testToBuilderIncomplete() { + BlobInfo incompleteBlobInfo = BlobInfo.newBuilder(BlobId.of("b2", "n2")).build(); + compareBlobs(incompleteBlobInfo, incompleteBlobInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals("b", BLOB_INFO.getBucket()); + assertEquals("n", BLOB_INFO.getName()); + assertEquals(ACL, BLOB_INFO.getAcl()); + assertEquals(COMPONENT_COUNT, BLOB_INFO.getComponentCount()); + assertEquals(CONTENT_TYPE, BLOB_INFO.getContentType()); + assertEquals(CACHE_CONTROL, BLOB_INFO.getCacheControl()); + assertEquals(CONTENT_DISPOSITION, BLOB_INFO.getContentDisposition()); + assertEquals(CONTENT_ENCODING, BLOB_INFO.getContentEncoding()); + assertEquals(CONTENT_LANGUAGE, BLOB_INFO.getContentLanguage()); + assertEquals(CUSTOMER_ENCRYPTION, BLOB_INFO.getCustomerEncryption()); + assertEquals(CRC32, BLOB_INFO.getCrc32c()); + assertEquals(CRC32_HEX_STRING, BLOB_INFO.getCrc32cToHexString()); + assertEquals(DELETE_TIME, BLOB_INFO.getDeleteTime()); + assertEquals(ETAG, BLOB_INFO.getEtag()); + assertEquals(GENERATION, BLOB_INFO.getGeneration()); + assertEquals(GENERATED_ID, BLOB_INFO.getGeneratedId()); + assertEquals(MD5, BLOB_INFO.getMd5()); + assertEquals(MD5_HEX_STRING, BLOB_INFO.getMd5ToHexString()); + assertEquals(MEDIA_LINK, BLOB_INFO.getMediaLink()); + assertEquals(METADATA, BLOB_INFO.getMetadata()); + assertEquals(META_GENERATION, BLOB_INFO.getMetageneration()); + assertEquals(OWNER, BLOB_INFO.getOwner()); + assertEquals(SELF_LINK, BLOB_INFO.getSelfLink()); + assertEquals(SIZE, BLOB_INFO.getSize()); + assertEquals(UPDATE_TIME, BLOB_INFO.getUpdateTime()); + assertEquals(CREATE_TIME, BLOB_INFO.getCreateTime()); + assertEquals(CUSTOM_TIME, BLOB_INFO.getCustomTime()); + assertEquals(STORAGE_CLASS, BLOB_INFO.getStorageClass()); + assertEquals(TIME_STORAGE_CLASS_UPDATED, BLOB_INFO.getTimeStorageClassUpdated()); + assertEquals(KMS_KEY_NAME, BLOB_INFO.getKmsKeyName()); + assertEquals(EVENT_BASED_HOLD, BLOB_INFO.getEventBasedHold()); + assertEquals(TEMPORARY_HOLD, BLOB_INFO.getTemporaryHold()); + assertEquals(RETENTION_EXPIRATION_TIME, BLOB_INFO.getRetentionExpirationTime()); + assertFalse(BLOB_INFO.isDirectory()); + assertEquals("b", DIRECTORY_INFO.getBucket()); + assertEquals("n/", DIRECTORY_INFO.getName()); + assertNull(DIRECTORY_INFO.getAcl()); + assertNull(DIRECTORY_INFO.getComponentCount()); + assertNull(DIRECTORY_INFO.getContentType()); + assertNull(DIRECTORY_INFO.getCacheControl()); + assertNull(DIRECTORY_INFO.getContentDisposition()); + assertNull(DIRECTORY_INFO.getContentEncoding()); + assertNull(DIRECTORY_INFO.getContentLanguage()); + assertNull(DIRECTORY_INFO.getCustomerEncryption()); + assertNull(DIRECTORY_INFO.getCrc32c()); + assertNull(DIRECTORY_INFO.getCrc32cToHexString()); + assertNull(DIRECTORY_INFO.getCreateTime()); + assertNull(DIRECTORY_INFO.getCustomTime()); + assertNull(DIRECTORY_INFO.getDeleteTime()); + assertNull(DIRECTORY_INFO.getEtag()); + assertNull(DIRECTORY_INFO.getGeneration()); + assertNull(DIRECTORY_INFO.getGeneratedId()); + assertNull(DIRECTORY_INFO.getMd5()); + assertNull(DIRECTORY_INFO.getMd5ToHexString()); + assertNull(DIRECTORY_INFO.getMediaLink()); + assertNull(DIRECTORY_INFO.getMetadata()); + assertNull(DIRECTORY_INFO.getMetageneration()); + assertNull(DIRECTORY_INFO.getOwner()); + assertNull(DIRECTORY_INFO.getSelfLink()); + assertEquals(0L, (long) DIRECTORY_INFO.getSize()); + assertNull(DIRECTORY_INFO.getUpdateTime()); + assertTrue(DIRECTORY_INFO.isDirectory()); + } + + private void compareBlobs(BlobInfo expected, BlobInfo value) { + assertEquals(expected, value); + assertEquals(expected.getBucket(), value.getBucket()); + assertEquals(expected.getName(), value.getName()); + assertEquals(expected.getAcl(), value.getAcl()); + assertEquals(expected.getComponentCount(), value.getComponentCount()); + assertEquals(expected.getContentType(), value.getContentType()); + assertEquals(expected.getCacheControl(), value.getCacheControl()); + assertEquals(expected.getContentDisposition(), value.getContentDisposition()); + assertEquals(expected.getContentEncoding(), value.getContentEncoding()); + assertEquals(expected.getContentLanguage(), value.getContentLanguage()); + assertEquals(expected.getCustomerEncryption(), value.getCustomerEncryption()); + assertEquals(expected.getCrc32c(), value.getCrc32c()); + assertEquals(expected.getCrc32cToHexString(), value.getCrc32cToHexString()); + assertEquals(expected.getCreateTime(), value.getCreateTime()); + assertEquals(expected.getDeleteTime(), value.getDeleteTime()); + assertEquals(expected.getEtag(), value.getEtag()); + assertEquals(expected.getGeneration(), value.getGeneration()); + assertEquals(expected.getGeneratedId(), value.getGeneratedId()); + assertEquals(expected.getMd5(), value.getMd5()); + assertEquals(expected.getMd5ToHexString(), value.getMd5ToHexString()); + assertEquals(expected.getMediaLink(), value.getMediaLink()); + assertEquals(expected.getMetadata(), value.getMetadata()); + assertEquals(expected.getMetageneration(), value.getMetageneration()); + assertEquals(expected.getOwner(), value.getOwner()); + assertEquals(expected.getSelfLink(), value.getSelfLink()); + assertEquals(expected.getSize(), value.getSize()); + assertEquals(expected.getCustomTime(), value.getCustomTime()); + assertEquals(expected.getUpdateTime(), value.getUpdateTime()); + assertEquals(expected.getStorageClass(), value.getStorageClass()); + assertEquals(expected.getTimeStorageClassUpdated(), value.getTimeStorageClassUpdated()); + assertEquals(expected.getKmsKeyName(), value.getKmsKeyName()); + assertEquals(expected.getEventBasedHold(), value.getEventBasedHold()); + assertEquals(expected.getTemporaryHold(), value.getTemporaryHold()); + assertEquals(expected.getRetentionExpirationTime(), value.getRetentionExpirationTime()); + } + + private void compareCustomerEncryptions(CustomerEncryption expected, CustomerEncryption value) { + assertEquals(expected, value); + assertEquals(expected.getEncryptionAlgorithm(), value.getEncryptionAlgorithm()); + assertEquals(expected.getKeySha256(), value.getKeySha256()); + assertEquals(expected.hashCode(), value.hashCode()); + } + + @Test + public void testToPbAndFromPb() { + compareCustomerEncryptions( + CUSTOMER_ENCRYPTION, + Conversions.json() + .customerEncryption() + .decode(Conversions.json().customerEncryption().encode(CUSTOMER_ENCRYPTION))); + compareBlobs( + BLOB_INFO, + Conversions.json().blobInfo().decode(Conversions.json().blobInfo().encode(BLOB_INFO))); + BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of("b", "n")).build(); + compareBlobs( + blobInfo, + Conversions.json().blobInfo().decode(Conversions.json().blobInfo().encode(blobInfo))); + StorageObject object = + new StorageObject() + .setName("n/") + .setBucket("b") + .setSize(BigInteger.ZERO) + .set("isDirectory", true); + blobInfo = Conversions.json().blobInfo().decode(object); + assertEquals("b", blobInfo.getBucket()); + assertEquals("n/", blobInfo.getName()); + assertNull(blobInfo.getAcl()); + assertNull(blobInfo.getComponentCount()); + assertNull(blobInfo.getContentType()); + assertNull(blobInfo.getCacheControl()); + assertNull(blobInfo.getContentDisposition()); + assertNull(blobInfo.getContentEncoding()); + assertNull(blobInfo.getContentLanguage()); + assertNull(blobInfo.getCustomerEncryption()); + assertNull(blobInfo.getCrc32c()); + assertNull(blobInfo.getCrc32cToHexString()); + assertNull(blobInfo.getCreateTime()); + assertNull(blobInfo.getCustomTime()); + assertNull(blobInfo.getDeleteTime()); + assertNull(blobInfo.getEtag()); + assertNull(blobInfo.getGeneration()); + assertNull(blobInfo.getGeneratedId()); + assertNull(blobInfo.getMd5()); + assertNull(blobInfo.getMd5ToHexString()); + assertNull(blobInfo.getMediaLink()); + assertNull(blobInfo.getMetadata()); + assertNull(blobInfo.getMetageneration()); + assertNull(blobInfo.getOwner()); + assertNull(blobInfo.getSelfLink()); + assertEquals(0L, (long) blobInfo.getSize()); + assertNull(blobInfo.getUpdateTime()); + assertNull(blobInfo.getStorageClass()); + assertNull(blobInfo.getTimeStorageClassUpdated()); + assertNull(blobInfo.getKmsKeyName()); + assertNull(blobInfo.getEventBasedHold()); + assertNull(blobInfo.getTemporaryHold()); + assertNull(blobInfo.getRetentionExpirationTime()); + assertTrue(blobInfo.isDirectory()); + } + + @Test + public void testBlobId() { + assertEquals(BlobId.of("b", "n", GENERATION), BLOB_INFO.getBlobId()); + } + + @Test + public void deepFieldDiffDetectionWorksCorrectly_mutateRetrievedObject() { + BlobInfo info = + BlobInfo.newBuilder("bucket", "object") + .setContexts( + ObjectContexts.newBuilder() + .setCustom( + hashMapOf( + "c1", ObjectCustomContextPayload.newBuilder().setValue("C1").build(), + "c2", ObjectCustomContextPayload.newBuilder().setValue("C2").build())) + .build()) + .setMetadata( + hashMapOf( + "m1", "M1", + "m2", "M2")) + .build(); + + BlobInfo modified = + info.toBuilder() + .setMetadata(hashMapOf("m2", null)) + .setContexts(ObjectContexts.newBuilder().setCustom(hashMapOf("k2", null)).build()) + .build(); + Set modifiedFields = + modified.getModifiedFields().stream() + .map(NamedField::getGrpcName) + .collect(Collectors.toSet()); + + assertThat(modifiedFields).isEqualTo(ImmutableSet.of("contexts.custom.k2", "metadata.m2")); + } + + @Test + public void deepFieldDiffDetectionWorksCorrectly_declaredDiff() { + BlobInfo modified = + BlobInfo.newBuilder("bucket", "object") + .setMetadata(hashMapOf("m2", null)) + .setContexts(ObjectContexts.newBuilder().setCustom(hashMapOf("k2", null)).build()) + .build(); + Set modifiedFields = + modified.getModifiedFields().stream() + .map(UnifiedOpts.NamedField::getGrpcName) + .collect(Collectors.toSet()); + + assertThat(modifiedFields).isEqualTo(ImmutableSet.of("contexts.custom.k2", "metadata.m2")); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java new file mode 100644 index 000000000000..d52e1b7d6c65 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java @@ -0,0 +1,614 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiClock; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob.BlobSourceOption; +import com.google.cloud.storage.BlobInfo.BuilderImpl; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.BaseEncoding; +import java.io.File; +import java.net.URL; +import java.nio.file.Path; +import java.security.Key; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +public class BlobTest { + + private static final Acl ACL = Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER); + private static final Acl OTHER_ACL = Acl.of(new Project(ProjectRole.OWNERS, "p"), Role.READER); + private static final List ACLS = ImmutableList.of(ACL, OTHER_ACL); + private static final Integer COMPONENT_COUNT = 2; + private static final String CONTENT_TYPE = "text/html"; + private static final String CACHE_CONTROL = "cache"; + private static final String CONTENT_DISPOSITION = "content-disposition"; + private static final String CONTENT_ENCODING = "UTF-8"; + private static final String CONTENT_LANGUAGE = "En"; + private static final String CRC32 = "FF00"; + private static final String CRC32_HEX_STRING = "145d34"; + private static final Long DELETE_TIME = System.currentTimeMillis(); + private static final String ETAG = "0xFF00"; + private static final Long GENERATION = 1L; + private static final String GENERATED_ID = "B/N:1"; + private static final String MD5 = "FF00"; + private static final String MD5_HEX_STRING = "145d34"; + private static final String MEDIA_LINK = "http://media/b/n"; + private static final Map METADATA = ImmutableMap.of("n1", "v1", "n2", "v2"); + private static final Long META_GENERATION = 10L; + private static final User OWNER = new User("user@gmail.com"); + private static final String SELF_LINK = "http://storage/b/n"; + private static final Long SIZE = 1024L; + private static final Long UPDATE_TIME = DELETE_TIME - 1L; + private static final Long CREATE_TIME = UPDATE_TIME - 1L; + private static final Long CUSTOM_TIME = CREATE_TIME - 1L; + private static final StorageClass STORAGE_CLASS = StorageClass.COLDLINE; + private static final Long TIME_STORAGE_CLASS_UPDATED = CREATE_TIME; + private static final String ENCRYPTION_ALGORITHM = "AES256"; + private static final String KEY_SHA256 = "keySha"; + private static final BlobInfo.CustomerEncryption CUSTOMER_ENCRYPTION = + new BlobInfo.CustomerEncryption(ENCRYPTION_ALGORITHM, KEY_SHA256); + private static final String KMS_KEY_NAME = + "projects/p/locations/kr-loc/keyRings/kr/cryptoKeys/key"; + private static final Boolean EVENT_BASED_HOLD = true; + private static final Boolean TEMPORARY_HOLD = true; + private static final Long RETENTION_EXPIRATION_TIME = 10L; + private static final ObjectCustomContextPayload payload = + ObjectCustomContextPayload.newBuilder().setValue("contextValue").build(); + private static final Map customContexts = + Collections.singletonMap("contextKey", payload); + private static final ObjectContexts OBJECT_CONTEXTS = + ObjectContexts.newBuilder().setCustom(customContexts).build(); + private static final BlobInfo FULL_BLOB_INFO = + BlobInfo.newBuilder("b", "n", GENERATION) + .setAcl(ACLS) + .setComponentCount(COMPONENT_COUNT) + .setContentType(CONTENT_TYPE) + .setCacheControl(CACHE_CONTROL) + .setContentDisposition(CONTENT_DISPOSITION) + .setContentEncoding(CONTENT_ENCODING) + .setContentLanguage(CONTENT_LANGUAGE) + .setCrc32c(CRC32) + .setDeleteTime(DELETE_TIME) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMd5(MD5) + .setMediaLink(MEDIA_LINK) + .setMetadata(METADATA) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setSize(SIZE) + .setUpdateTime(UPDATE_TIME) + .setCreateTime(CREATE_TIME) + .setCustomTime(CUSTOM_TIME) + .setStorageClass(STORAGE_CLASS) + .setTimeStorageClassUpdated(TIME_STORAGE_CLASS_UPDATED) + .setCustomerEncryption(CUSTOMER_ENCRYPTION) + .setKmsKeyName(KMS_KEY_NAME) + .setEventBasedHold(EVENT_BASED_HOLD) + .setTemporaryHold(TEMPORARY_HOLD) + .setRetentionExpirationTime(RETENTION_EXPIRATION_TIME) + .setContexts(OBJECT_CONTEXTS) + .build(); + private static final BlobInfo BLOB_INFO = + BlobInfo.newBuilder("b", "n", 12345678L).setMetageneration(42L).build(); + private static final BlobInfo BLOB_INFO_NO_GENERATION = + BlobInfo.newBuilder(BLOB_INFO.getBucket(), BLOB_INFO.getName()) + .setMetageneration(42L) + .build(); + private static final BlobInfo DIRECTORY_INFO = + BlobInfo.newBuilder("b", "n/").setSize(0L).setIsDirectory(true).build(); + private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + private static final Key KEY = + new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256"); + + // This retrying setting is used by test testDownloadWithRetries. This unit test is setup + // to write one byte and then throw retryable exception, it then writes another bytes on + // second call succeeds. + private static final RetrySettings RETRY_SETTINGS = + RetrySettings.newBuilder().setMaxAttempts(2).build(); + private static final ApiClock API_CLOCK = + new ApiClock() { + @Override + public long nanoTime() { + return 42_000_000_000L; + } + + @Override + public long millisTime() { + return 42_000L; + } + }; + + private Storage storage; + private Blob blob; + private Blob expectedBlob; + private Storage serviceMockReturnsOptions = Mockito.mock(Storage.class); + private HttpStorageOptions mockOptions = Mockito.mock(HttpStorageOptions.class); + private final HttpRetryAlgorithmManager retryAlgorithmManager = + HttpStorageOptions.getDefaultInstance().getRetryAlgorithmManager(); + + @Before + public void setUp() { + storage = Mockito.mock(Storage.class); + } + + private void initializeExpectedBlob() { + when(serviceMockReturnsOptions.getOptions()).thenReturn(mockOptions); + when(mockOptions.getRetryAlgorithmManager()).thenReturn(retryAlgorithmManager); + expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(BLOB_INFO)); + } + + private void initializeBlob() { + blob = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO)); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedBlob(); + Storage.BlobGetOption[] expectedOptions = {Storage.BlobGetOption.fields()}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(expectedBlob.getBlobId(), expectedOptions)).thenReturn(expectedBlob); + initializeBlob(); + assertTrue(blob.exists()); + + verify(storage).getOptions(); + verify(storage).get(expectedBlob.getBlobId(), expectedOptions); + } + + @Test + public void testExists_False() throws Exception { + Storage.BlobGetOption[] expectedOptions = {Storage.BlobGetOption.fields()}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BLOB_INFO.getBlobId(), expectedOptions)).thenReturn(null); + initializeBlob(); + assertFalse(blob.exists()); + + verify(storage).getOptions(); + verify(storage).get(BLOB_INFO.getBlobId(), expectedOptions); + } + + @Test + public void testContent() throws Exception { + initializeExpectedBlob(); + byte[] content = {1, 2}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.readAllBytes(BLOB_INFO.getBlobId())).thenReturn(content); + initializeBlob(); + assertArrayEquals(content, blob.getContent()); + + verify(storage).getOptions(); + verify(storage).readAllBytes(BLOB_INFO.getBlobId()); + } + + @Test + public void testContentWithDecryptionKey() throws Exception { + initializeExpectedBlob(); + byte[] content = {1, 2}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.readAllBytes( + BLOB_INFO.getBlobId(), Storage.BlobSourceOption.decryptionKey(BASE64_KEY))) + .thenReturn(content); + initializeBlob(); + assertArrayEquals(content, blob.getContent(BlobSourceOption.decryptionKey(BASE64_KEY))); + assertArrayEquals(content, blob.getContent(BlobSourceOption.decryptionKey(KEY))); + + verify(storage).getOptions(); + verify(storage, times(2)) + .readAllBytes(BLOB_INFO.getBlobId(), Storage.BlobSourceOption.decryptionKey(BASE64_KEY)); + } + + @Test + public void testReload() throws Exception { + initializeExpectedBlob(); + Blob expectedReloadedBlob = expectedBlob.toBuilder().setCacheControl("c").build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BLOB_INFO_NO_GENERATION.getBlobId(), new Storage.BlobGetOption[0])) + .thenReturn(expectedReloadedBlob); + initializeBlob(); + Blob updatedBlob = blob.reload(); + assertEquals(expectedReloadedBlob, updatedBlob); + + verify(storage).getOptions(); + verify(storage).get(BLOB_INFO_NO_GENERATION.getBlobId(), new Storage.BlobGetOption[0]); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BLOB_INFO_NO_GENERATION.getBlobId(), new Storage.BlobGetOption[0])) + .thenReturn(null); + initializeBlob(); + Blob reloadedBlob = blob.reload(); + assertNull(reloadedBlob); + + verify(storage).getOptions(); + verify(storage).get(BLOB_INFO_NO_GENERATION.getBlobId(), new Storage.BlobGetOption[0]); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedBlob(); + Blob expectedReloadedBlob = expectedBlob.toBuilder().setCacheControl("c").build(); + Storage.BlobGetOption[] options = {Storage.BlobGetOption.metagenerationMatch(42L)}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BLOB_INFO_NO_GENERATION.getBlobId(), options)) + .thenReturn(expectedReloadedBlob); + initializeBlob(); + Blob updatedBlob = blob.reload(BlobSourceOption.metagenerationMatch()); + assertEquals(expectedReloadedBlob, updatedBlob); + + verify(storage).getOptions(); + verify(storage).get(BLOB_INFO_NO_GENERATION.getBlobId(), options); + } + + @Test + public void testUpdate() throws Exception { + initializeExpectedBlob(); + Blob expectedUpdatedBlob = expectedBlob.toBuilder().setCacheControl("c").build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.update(eq(expectedUpdatedBlob), new Storage.BlobTargetOption[0])) + .thenReturn(expectedUpdatedBlob); + initializeBlob(); + Blob updatedBlob = new Blob(storage, new BlobInfo.BuilderImpl(expectedUpdatedBlob)); + Blob actualUpdatedBlob = updatedBlob.update(); + assertEquals(expectedUpdatedBlob, actualUpdatedBlob); + + verify(storage, times(2)).getOptions(); + verify(storage).update(eq(expectedUpdatedBlob), new Storage.BlobTargetOption[0]); + } + + @Test + public void testDelete() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.delete(BLOB_INFO.getBlobId(), new Storage.BlobSourceOption[0])).thenReturn(true); + initializeBlob(); + assertTrue(blob.delete()); + + verify(storage).getOptions(); + verify(storage).delete(BLOB_INFO.getBlobId(), new Storage.BlobSourceOption[0]); + } + + @Test + public void testCopyToBucket() throws Exception { + initializeExpectedBlob(); + BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "n")).build(); + CopyWriter copyWriter = Mockito.mock(CopyWriter.class); + ArgumentCaptor capturedCopyRequest = ArgumentCaptor.forClass(CopyRequest.class); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.copy(capturedCopyRequest.capture())).thenReturn(copyWriter); + initializeBlob(); + CopyWriter returnedCopyWriter = blob.copyTo("bt"); + assertEquals(copyWriter, returnedCopyWriter); + assertEquals(BLOB_INFO_NO_GENERATION.getBlobId(), capturedCopyRequest.getValue().getSource()); + assertEquals(target, capturedCopyRequest.getValue().getTarget()); + assertFalse(capturedCopyRequest.getValue().overrideInfo()); + assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty()); + assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty()); + + verify(storage).getOptions(); + verify(storage).copy(capturedCopyRequest.capture()); + } + + @Test + public void testCopyTo() throws Exception { + initializeExpectedBlob(); + BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "nt")).build(); + CopyWriter copyWriter = Mockito.mock(CopyWriter.class); + ArgumentCaptor capturedCopyRequest = ArgumentCaptor.forClass(CopyRequest.class); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.copy(capturedCopyRequest.capture())).thenReturn(copyWriter); + initializeBlob(); + CopyWriter returnedCopyWriter = blob.copyTo("bt", "nt"); + assertEquals(copyWriter, returnedCopyWriter); + assertEquals(BLOB_INFO_NO_GENERATION.getBlobId(), capturedCopyRequest.getValue().getSource()); + assertEquals(target, capturedCopyRequest.getValue().getTarget()); + assertFalse(capturedCopyRequest.getValue().overrideInfo()); + assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty()); + assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty()); + + verify(storage).getOptions(); + verify(storage).copy(capturedCopyRequest.capture()); + } + + @Test + public void testCopyToBlobId() throws Exception { + initializeExpectedBlob(); + BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "nt")).build(); + BlobId targetId = BlobId.of("bt", "nt"); + CopyWriter copyWriter = Mockito.mock(CopyWriter.class); + ArgumentCaptor capturedCopyRequest = ArgumentCaptor.forClass(CopyRequest.class); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.copy(capturedCopyRequest.capture())).thenReturn(copyWriter); + initializeBlob(); + CopyWriter returnedCopyWriter = blob.copyTo(targetId); + assertEquals(copyWriter, returnedCopyWriter); + assertEquals(BLOB_INFO_NO_GENERATION.getBlobId(), capturedCopyRequest.getValue().getSource()); + assertEquals(target, capturedCopyRequest.getValue().getTarget()); + assertFalse(capturedCopyRequest.getValue().overrideInfo()); + assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty()); + assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty()); + + verify(storage).getOptions(); + verify(storage).copy(capturedCopyRequest.capture()); + } + + @Test + public void testReader() throws Exception { + initializeExpectedBlob(); + ReadChannel channel = Mockito.mock(ReadChannel.class); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.reader(BLOB_INFO.getBlobId())).thenReturn(channel); + initializeBlob(); + assertSame(channel, blob.reader()); + + verify(storage).getOptions(); + verify(storage).reader(BLOB_INFO.getBlobId()); + } + + @Test + public void testReaderWithDecryptionKey() throws Exception { + initializeExpectedBlob(); + ReadChannel channel = Mockito.mock(ReadChannel.class); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.reader(BLOB_INFO.getBlobId(), Storage.BlobSourceOption.decryptionKey(BASE64_KEY))) + .thenReturn(channel); + initializeBlob(); + assertSame(channel, blob.reader(BlobSourceOption.decryptionKey(BASE64_KEY))); + assertSame(channel, blob.reader(BlobSourceOption.decryptionKey(KEY))); + + verify(storage).getOptions(); + verify(storage, times(2)) + .reader(BLOB_INFO.getBlobId(), Storage.BlobSourceOption.decryptionKey(BASE64_KEY)); + } + + @Test + public void testSignUrl() throws Exception { + initializeExpectedBlob(); + URL url = new URL("http://localhost:123/bla"); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.signUrl(expectedBlob, 100, TimeUnit.SECONDS)).thenReturn(url); + initializeBlob(); + assertEquals(url, blob.signUrl(100, TimeUnit.SECONDS)); + + verify(storage).getOptions(); + verify(storage).signUrl(expectedBlob, 100, TimeUnit.SECONDS); + } + + @Test + public void testGetAcl() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.getAcl(BLOB_INFO.getBlobId(), User.ofAllAuthenticatedUsers())).thenReturn(ACL); + initializeBlob(); + assertEquals(ACL, blob.getAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).getAcl(BLOB_INFO.getBlobId(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testDeleteAcl() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.deleteAcl(BLOB_INFO.getBlobId(), User.ofAllAuthenticatedUsers())).thenReturn(true); + initializeBlob(); + assertTrue(blob.deleteAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).deleteAcl(BLOB_INFO.getBlobId(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testCreateAcl() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.createAcl(BLOB_INFO.getBlobId(), ACL)).thenReturn(returnedAcl); + initializeBlob(); + assertEquals(returnedAcl, blob.createAcl(ACL)); + + verify(storage).getOptions(); + verify(storage).createAcl(BLOB_INFO.getBlobId(), ACL); + } + + @Test + public void testUpdateAcl() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.updateAcl(BLOB_INFO.getBlobId(), ACL)).thenReturn(returnedAcl); + initializeBlob(); + assertEquals(returnedAcl, blob.updateAcl(ACL)); + + verify(storage).getOptions(); + verify(storage).updateAcl(BLOB_INFO.getBlobId(), ACL); + } + + @Test + public void testListAcls() throws Exception { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.listAcls(BLOB_INFO.getBlobId())).thenReturn(ACLS); + initializeBlob(); + assertEquals(ACLS, blob.listAcls()); + + verify(storage).getOptions(); + verify(storage).listAcls(BLOB_INFO.getBlobId()); + } + + @Test + public void testToBuilder() { + when(storage.getOptions()).thenReturn(mockOptions); + Blob fullBlob = new Blob(storage, new BlobInfo.BuilderImpl(FULL_BLOB_INFO)); + assertEquals(fullBlob, fullBlob.toBuilder().build()); + Blob simpleBlob = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO)); + assertEquals(simpleBlob, simpleBlob.toBuilder().build()); + Blob directory = new Blob(storage, new BlobInfo.BuilderImpl(DIRECTORY_INFO)); + assertEquals(directory, directory.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedBlob(); + when(storage.getOptions()).thenReturn(mockOptions); + Blob.Builder builder = new Blob.Builder(new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO))); + Blob blob = + builder + .setAcl(ACLS) + .setComponentCount(COMPONENT_COUNT) + .setContentType(CONTENT_TYPE) + .setCacheControl(CACHE_CONTROL) + .setContentDisposition(CONTENT_DISPOSITION) + .setContentEncoding(CONTENT_ENCODING) + .setContentLanguage(CONTENT_LANGUAGE) + .setCrc32c(CRC32) + .setCreateTime(CREATE_TIME) + .setCustomTime(CUSTOM_TIME) + .setStorageClass(STORAGE_CLASS) + .setTimeStorageClassUpdated(TIME_STORAGE_CLASS_UPDATED) + .setCustomerEncryption(CUSTOMER_ENCRYPTION) + .setKmsKeyName(KMS_KEY_NAME) + .setEventBasedHold(EVENT_BASED_HOLD) + .setTemporaryHold(TEMPORARY_HOLD) + .setRetentionExpirationTime(RETENTION_EXPIRATION_TIME) + .setDeleteTime(DELETE_TIME) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMd5(MD5) + .setMediaLink(MEDIA_LINK) + .setMetadata(METADATA) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setSize(SIZE) + .setUpdateTime(UPDATE_TIME) + .build(); + assertEquals("b", blob.getBucket()); + assertEquals("n", blob.getName()); + assertEquals(ACLS, blob.getAcl()); + assertEquals(COMPONENT_COUNT, blob.getComponentCount()); + assertEquals(CONTENT_TYPE, blob.getContentType()); + assertEquals(CACHE_CONTROL, blob.getCacheControl()); + assertEquals(CONTENT_DISPOSITION, blob.getContentDisposition()); + assertEquals(CONTENT_ENCODING, blob.getContentEncoding()); + assertEquals(CONTENT_LANGUAGE, blob.getContentLanguage()); + assertEquals(CRC32, blob.getCrc32c()); + assertEquals(CRC32_HEX_STRING, blob.getCrc32cToHexString()); + assertEquals(CREATE_TIME, blob.getCreateTime()); + assertEquals(CUSTOM_TIME, blob.getCustomTime()); + assertEquals(STORAGE_CLASS, blob.getStorageClass()); + assertEquals(TIME_STORAGE_CLASS_UPDATED, blob.getTimeStorageClassUpdated()); + assertEquals(CUSTOMER_ENCRYPTION, blob.getCustomerEncryption()); + assertEquals(KMS_KEY_NAME, blob.getKmsKeyName()); + assertEquals(EVENT_BASED_HOLD, blob.getEventBasedHold()); + assertEquals(TEMPORARY_HOLD, blob.getTemporaryHold()); + assertEquals(RETENTION_EXPIRATION_TIME, blob.getRetentionExpirationTime()); + assertEquals(DELETE_TIME, blob.getDeleteTime()); + assertEquals(ETAG, blob.getEtag()); + assertEquals(GENERATED_ID, blob.getGeneratedId()); + assertEquals(MD5, blob.getMd5()); + assertEquals(MD5_HEX_STRING, blob.getMd5ToHexString()); + assertEquals(MEDIA_LINK, blob.getMediaLink()); + assertEquals(METADATA, blob.getMetadata()); + assertEquals(META_GENERATION, blob.getMetageneration()); + assertEquals(OWNER, blob.getOwner()); + assertEquals(SELF_LINK, blob.getSelfLink()); + assertEquals(SIZE, blob.getSize()); + assertEquals(UPDATE_TIME, blob.getUpdateTime()); + assertEquals(storage.getOptions(), blob.getStorage().getOptions()); + assertFalse(blob.isDirectory()); + builder = new Blob.Builder(new Blob(storage, new BlobInfo.BuilderImpl(DIRECTORY_INFO))); + blob = builder.setBlobId(BlobId.of("b", "n/")).setIsDirectory(true).setSize(0L).build(); + assertEquals("b", blob.getBucket()); + assertEquals("n/", blob.getName()); + assertNull(blob.getAcl()); + assertNull(blob.getComponentCount()); + assertNull(blob.getContentType()); + assertNull(blob.getCacheControl()); + assertNull(blob.getContentDisposition()); + assertNull(blob.getContentEncoding()); + assertNull(blob.getContentLanguage()); + assertNull(blob.getCrc32c()); + assertNull(blob.getCrc32cToHexString()); + assertNull(blob.getCreateTime()); + assertNull(blob.getStorageClass()); + assertNull(blob.getTimeStorageClassUpdated()); + assertNull(blob.getCustomerEncryption()); + assertNull(blob.getKmsKeyName()); + assertNull(blob.getEventBasedHold()); + assertNull(blob.getTemporaryHold()); + assertNull(blob.getRetentionExpirationTime()); + assertNull(blob.getDeleteTime()); + assertNull(blob.getEtag()); + assertNull(blob.getGeneratedId()); + assertNull(blob.getMd5()); + assertNull(blob.getMd5ToHexString()); + assertNull(blob.getMediaLink()); + assertNull(blob.getMetadata()); + assertNull(blob.getMetageneration()); + assertNull(blob.getOwner()); + assertNull(blob.getSelfLink()); + assertEquals(0L, (long) blob.getSize()); + assertNull(blob.getUpdateTime()); + assertNull(blob.getCustomTime()); + assertTrue(blob.isDirectory()); + } + + @Test + public void testDownloadTo() throws Exception { + File file = File.createTempFile("blob", ".tmp"); + Path path = file.toPath(); + + Storage s = Mockito.mock(Storage.class); + Blob blob = new Blob(s, new BuilderImpl(BlobInfo.newBuilder("buck", "obj").build())); + + Mockito.doNothing().when(s).downloadTo(blob.getBlobId(), path); + blob.downloadTo(path); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoPropertyTest.java new file mode 100644 index 000000000000..1cfdc48d01d9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoPropertyTest.java @@ -0,0 +1,23 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.storage.v2.Bucket; + +final class BucketInfoPropertyTest + extends BaseConvertablePropertyTest< + BucketInfo, Bucket, com.google.api.services.storage.model.Bucket> {} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoTest.java new file mode 100644 index 000000000000..2e3cb4bd9f4b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketInfoTest.java @@ -0,0 +1,538 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Acl.Project.ProjectRole.VIEWERS; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.api.client.json.JsonGenerator; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.util.DateTime; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.Bucket.Lifecycle; +import com.google.api.services.storage.model.Bucket.Lifecycle.Rule; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.BucketInfo.AgeDeleteRule; +import com.google.cloud.storage.BucketInfo.DeleteRule; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.AbortIncompleteMPUAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.DeleteLifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.BucketInfo.LifecycleRule.SetStorageClassLifecycleAction; +import com.google.cloud.storage.BucketInfo.PublicAccessPrevention; +import com.google.cloud.storage.Conversions.Codec; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.io.StringWriter; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.junit.Test; + +public class BucketInfoTest { + + private static final List ACL = + ImmutableList.of( + Acl.of(User.ofAllAuthenticatedUsers(), Role.READER), + Acl.of(new Project(VIEWERS, "p1"), Role.WRITER)); + private static final String ETAG = "0xFF00"; + private static final String GENERATED_ID = "B/N:1"; + private static final Long META_GENERATION = 10L; + private static final User OWNER = new User("user@gmail.com"); + private static final String SELF_LINK = "http://storage/b/n"; + private static final Long CREATE_TIME = System.currentTimeMillis(); + private static final Long UPDATE_TIME = CREATE_TIME; + private static final List CORS = Collections.singletonList(Cors.newBuilder().build()); + private static final List DEFAULT_ACL = + Collections.singletonList(Acl.of(User.ofAllAuthenticatedUsers(), Role.WRITER)); + + @SuppressWarnings({"unchecked", "deprecation"}) + private static final List DELETE_RULES = + Collections.singletonList(new AgeDeleteRule(5)); + + private static final List LIFECYCLE_RULES = + Collections.singletonList( + new BucketInfo.LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder().setAge(5).build())); + private static final String INDEX_PAGE = "index.html"; + private static final BucketInfo.IamConfiguration IAM_CONFIGURATION = + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .setUniformBucketLevelAccessLockedTime(System.currentTimeMillis()) + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.ENFORCED) + .build(); + private static final BucketInfo.Logging LOGGING = + BucketInfo.Logging.newBuilder() + .setLogBucket("test-bucket") + .setLogObjectPrefix("test-") + .build(); + private static final String NOT_FOUND_PAGE = "error.html"; + private static final String LOCATION = "ASIA"; + private static final StorageClass STORAGE_CLASS = StorageClass.STANDARD; + private static final StorageClass ARCHIVE_STORAGE_CLASS = StorageClass.ARCHIVE; + private static final String DEFAULT_KMS_KEY_NAME = + "projects/p/locations/kr-loc/keyRings/kr/cryptoKeys/key"; + private static final Boolean VERSIONING_ENABLED = true; + private static final Map BUCKET_LABELS = + TestUtils.hashMapOf("label1", "value1", "label2", null); + + private static final Boolean REQUESTER_PAYS = true; + private static final Boolean DEFAULT_EVENT_BASED_HOLD = true; + private static final Long RETENTION_EFFECTIVE_TIME = 10L; + private static final Long RETENTION_PERIOD = 10L; + private static final Boolean RETENTION_POLICY_IS_LOCKED = false; + private static final List LOCATION_TYPES = + ImmutableList.of("multi-region", "region", "dual-region"); + private static final String LOCATION_TYPE = "multi-region"; + + @SuppressWarnings({"unchecked", "deprecation"}) + private static final BucketInfo BUCKET_INFO = + BucketInfo.newBuilder("b") + .setAcl(ACL) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setCors(CORS) + .setCreateTime(CREATE_TIME) + .setUpdateTime(UPDATE_TIME) + .setDefaultAcl(DEFAULT_ACL) + .setDeleteRules(DELETE_RULES) + .setLifecycleRules(LIFECYCLE_RULES) + .setIndexPage(INDEX_PAGE) + .setIamConfiguration(IAM_CONFIGURATION) + .setNotFoundPage(NOT_FOUND_PAGE) + .setLocation(LOCATION) + .setLocationType(LOCATION_TYPE) + .setStorageClass(STORAGE_CLASS) + .setVersioningEnabled(VERSIONING_ENABLED) + .setLabels(BUCKET_LABELS) + .setRequesterPays(REQUESTER_PAYS) + .setDefaultKmsKeyName(DEFAULT_KMS_KEY_NAME) + .setDefaultEventBasedHold(DEFAULT_EVENT_BASED_HOLD) + .setRetentionEffectiveTime(RETENTION_EFFECTIVE_TIME) + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(RETENTION_POLICY_IS_LOCKED) + .setLogging(LOGGING) + .build(); + + @SuppressWarnings({"unchecked", "deprecation"}) + private static final BucketInfo BUCKET_INFO_ARCHIVE = + BucketInfo.newBuilder("b") + .setAcl(ACL) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setCors(CORS) + .setCreateTime(CREATE_TIME) + .setUpdateTime(UPDATE_TIME) + .setDefaultAcl(DEFAULT_ACL) + .setDeleteRules(DELETE_RULES) + .setLifecycleRules(LIFECYCLE_RULES) + .setIndexPage(INDEX_PAGE) + .setIamConfiguration(IAM_CONFIGURATION) + .setNotFoundPage(NOT_FOUND_PAGE) + .setLocation(LOCATION) + .setLocationType(LOCATION_TYPE) + .setStorageClass(ARCHIVE_STORAGE_CLASS) + .setVersioningEnabled(VERSIONING_ENABLED) + .setLabels(BUCKET_LABELS) + .setRequesterPays(REQUESTER_PAYS) + .setDefaultKmsKeyName(DEFAULT_KMS_KEY_NAME) + .setDefaultEventBasedHold(DEFAULT_EVENT_BASED_HOLD) + .setRetentionEffectiveTime(RETENTION_EFFECTIVE_TIME) + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(RETENTION_POLICY_IS_LOCKED) + .setLogging(LOGGING) + .build(); + + private static final Lifecycle EMPTY_LIFECYCLE = lifecycle(Collections.emptyList()); + + @Test + public void testToBuilder() throws Exception { + compareBuckets(BUCKET_INFO, BUCKET_INFO.toBuilder().build()); + BucketInfo bucketInfo = BUCKET_INFO.toBuilder().setName("B").setGeneratedId("id").build(); + assertEquals("B", bucketInfo.getName()); + assertEquals("id", bucketInfo.getGeneratedId()); + bucketInfo = bucketInfo.toBuilder().setName("b").setGeneratedId(GENERATED_ID).build(); + compareBuckets(BUCKET_INFO, bucketInfo); + assertEquals(ARCHIVE_STORAGE_CLASS, BUCKET_INFO_ARCHIVE.getStorageClass()); + } + + @Test + public void testToBuilderIncomplete() throws Exception { + BucketInfo incompleteBucketInfo = BucketInfo.newBuilder("b").build(); + compareBuckets(incompleteBucketInfo, incompleteBucketInfo.toBuilder().build()); + } + + @Test + public void testOf() { + BucketInfo bucketInfo = BucketInfo.of("bucket"); + assertEquals("bucket", bucketInfo.getName()); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testBuilder() throws Exception { + assertAll( + () -> assertEquals("b", BUCKET_INFO.getName()), + () -> assertEquals(ACL, BUCKET_INFO.getAcl()), + () -> assertEquals(ETAG, BUCKET_INFO.getEtag()), + () -> assertEquals(GENERATED_ID, BUCKET_INFO.getGeneratedId()), + () -> assertEquals(META_GENERATION, BUCKET_INFO.getMetageneration()), + () -> assertEquals(OWNER, BUCKET_INFO.getOwner()), + () -> assertEquals(SELF_LINK, BUCKET_INFO.getSelfLink()), + () -> assertEquals(CREATE_TIME, BUCKET_INFO.getCreateTime()), + () -> assertEquals(UPDATE_TIME, BUCKET_INFO.getUpdateTime()), + () -> assertEquals(CORS, BUCKET_INFO.getCors()), + () -> assertEquals(DEFAULT_ACL, BUCKET_INFO.getDefaultAcl()), + () -> assertEquals(DELETE_RULES, BUCKET_INFO.getDeleteRules()), + () -> assertEquals(INDEX_PAGE, BUCKET_INFO.getIndexPage()), + () -> assertEquals(IAM_CONFIGURATION, BUCKET_INFO.getIamConfiguration()), + () -> assertEquals(NOT_FOUND_PAGE, BUCKET_INFO.getNotFoundPage()), + () -> assertEquals(LOCATION, BUCKET_INFO.getLocation()), + () -> assertEquals(STORAGE_CLASS, BUCKET_INFO.getStorageClass()), + () -> assertEquals(DEFAULT_KMS_KEY_NAME, BUCKET_INFO.getDefaultKmsKeyName()), + () -> assertEquals(VERSIONING_ENABLED, BUCKET_INFO.versioningEnabled()), + () -> assertEquals(BUCKET_LABELS, BUCKET_INFO.getLabels()), + () -> assertEquals(REQUESTER_PAYS, BUCKET_INFO.requesterPays()), + () -> assertEquals(DEFAULT_EVENT_BASED_HOLD, BUCKET_INFO.getDefaultEventBasedHold()), + () -> assertEquals(RETENTION_EFFECTIVE_TIME, BUCKET_INFO.getRetentionEffectiveTime()), + () -> assertEquals(RETENTION_PERIOD, BUCKET_INFO.getRetentionPeriod()), + () -> assertEquals(RETENTION_POLICY_IS_LOCKED, BUCKET_INFO.retentionPolicyIsLocked()), + () -> assertTrue(LOCATION_TYPES.contains(BUCKET_INFO.getLocationType())), + () -> assertEquals(LOGGING, BUCKET_INFO.getLogging())); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testToPbAndFromPb() throws Exception { + Codec codec = Conversions.json().bucketInfo(); + + Bucket encode1 = codec.encode(BUCKET_INFO); + BucketInfo decode1 = codec.decode(encode1); + compareBuckets(BUCKET_INFO, decode1); + + BucketInfo bucketInfo = + BucketInfo.newBuilder("b") + .setDeleteRules(DELETE_RULES) + .setLifecycleRules(LIFECYCLE_RULES) + .setLogging(LOGGING) + .build(); + Bucket encode2 = codec.encode(bucketInfo); + BucketInfo decode2 = codec.decode(encode2); + compareBuckets(bucketInfo, decode2); + } + + private void compareBuckets(BucketInfo expected, BucketInfo value) throws Exception { + assertAll( + () -> assertEquals(expected.getName(), value.getName()), + () -> assertEquals(expected.getAcl(), value.getAcl()), + () -> assertEquals(expected.getEtag(), value.getEtag()), + () -> assertEquals(expected.getGeneratedId(), value.getGeneratedId()), + () -> assertEquals(expected.getMetageneration(), value.getMetageneration()), + () -> assertEquals(expected.getOwner(), value.getOwner()), + () -> assertEquals(expected.getSelfLink(), value.getSelfLink()), + () -> + assertEquals( + expected.getCreateTimeOffsetDateTime(), value.getCreateTimeOffsetDateTime()), + () -> + assertEquals( + expected.getUpdateTimeOffsetDateTime(), value.getUpdateTimeOffsetDateTime()), + () -> assertEquals(expected.getCors(), value.getCors()), + () -> assertEquals(expected.getDefaultAcl(), value.getDefaultAcl()), + () -> assertEquals(expected.getDeleteRules(), value.getDeleteRules()), + () -> assertEquals(expected.getLifecycleRules(), value.getLifecycleRules()), + () -> assertEquals(expected.getIndexPage(), value.getIndexPage()), + () -> assertEquals(expected.getIamConfiguration(), value.getIamConfiguration()), + () -> assertEquals(expected.getNotFoundPage(), value.getNotFoundPage()), + () -> assertEquals(expected.getLocation(), value.getLocation()), + () -> assertEquals(expected.getStorageClass(), value.getStorageClass()), + () -> assertEquals(expected.getDefaultKmsKeyName(), value.getDefaultKmsKeyName()), + () -> assertEquals(expected.versioningEnabled(), value.versioningEnabled()), + () -> assertEquals(expected.getLabels(), value.getLabels()), + () -> assertEquals(expected.requesterPays(), value.requesterPays()), + () -> assertEquals(expected.getDefaultEventBasedHold(), value.getDefaultEventBasedHold()), + () -> + assertEquals( + expected.getRetentionEffectiveTimeOffsetDateTime(), + value.getRetentionEffectiveTimeOffsetDateTime()), + () -> + assertEquals(expected.getRetentionPeriodDuration(), value.getRetentionPeriodDuration()), + () -> assertEquals(expected.retentionPolicyIsLocked(), value.retentionPolicyIsLocked()), + () -> assertEquals(expected.getLogging(), value.getLogging()), + () -> assertEquals(expected, value)); + } + + @Test + public void testLifecycleRules() { + Rule deleteLifecycleRule = + Conversions.json() + .lifecycleRule() + .encode( + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder() + .setAge(10) + .setMatchesPrefix(Arrays.asList("abc", "ijk")) + .setMatchesSuffix(Arrays.asList("xyz")) + .build())); + + assertEquals( + LifecycleRule.DeleteLifecycleAction.TYPE, deleteLifecycleRule.getAction().getType()); + assertEquals(10, deleteLifecycleRule.getCondition().getAge().intValue()); + assertEquals(2, deleteLifecycleRule.getCondition().getMatchesPrefix().size()); + assertEquals("abc", (String) deleteLifecycleRule.getCondition().getMatchesPrefix().get(0)); + assertEquals("ijk", (String) deleteLifecycleRule.getCondition().getMatchesPrefix().get(1)); + assertEquals(1, deleteLifecycleRule.getCondition().getMatchesSuffix().size()); + assertEquals("xyz", deleteLifecycleRule.getCondition().getMatchesSuffix().get(0)); + + LifecycleRule lcr = Conversions.json().lifecycleRule().decode(deleteLifecycleRule); + assertEquals(LifecycleRule.DeleteLifecycleAction.TYPE, lcr.getAction().getActionType()); + assertEquals(10, lcr.getCondition().getAge().intValue()); + assertEquals(2, lcr.getCondition().getMatchesPrefix().size()); + assertEquals("abc", (String) lcr.getCondition().getMatchesPrefix().get(0)); + assertEquals("ijk", (String) lcr.getCondition().getMatchesPrefix().get(1)); + assertEquals(1, lcr.getCondition().getMatchesSuffix().size()); + assertEquals("xyz", lcr.getCondition().getMatchesSuffix().get(0)); + + assertTrue(lcr.getAction() instanceof DeleteLifecycleAction); + + Rule setStorageClassLifecycleRule = + Conversions.json() + .lifecycleRule() + .encode( + new LifecycleRule( + LifecycleAction.newSetStorageClassAction(StorageClass.COLDLINE), + LifecycleCondition.newBuilder() + .setIsLive(true) + .setNumberOfNewerVersions(10) + .build())); + + assertEquals( + StorageClass.COLDLINE.toString(), + setStorageClassLifecycleRule.getAction().getStorageClass()); + assertTrue(setStorageClassLifecycleRule.getCondition().getIsLive()); + assertEquals(10, setStorageClassLifecycleRule.getCondition().getNumNewerVersions().intValue()); + assertTrue( + Conversions.json().lifecycleRule().decode(setStorageClassLifecycleRule).getAction() + instanceof SetStorageClassLifecycleAction); + + Rule lifecycleRule = + Conversions.json() + .lifecycleRule() + .encode( + new LifecycleRule( + LifecycleAction.newSetStorageClassAction(StorageClass.COLDLINE), + LifecycleCondition.newBuilder() + .setIsLive(true) + .setNumberOfNewerVersions(10) + .setDaysSinceNoncurrentTime(30) + .setNoncurrentTimeBefore(new DateTime(System.currentTimeMillis())) + .setCustomTimeBefore(new DateTime(System.currentTimeMillis())) + .setDaysSinceCustomTime(30) + .setMatchesSuffix(Collections.singletonList("-suffix")) + .setMatchesPrefix(Collections.singletonList("prefix-")) + .build())); + assertEquals(StorageClass.COLDLINE.toString(), lifecycleRule.getAction().getStorageClass()); + assertTrue(lifecycleRule.getCondition().getIsLive()); + assertEquals(10, lifecycleRule.getCondition().getNumNewerVersions().intValue()); + assertEquals(30, lifecycleRule.getCondition().getDaysSinceNoncurrentTime().intValue()); + assertNotNull(lifecycleRule.getCondition().getNoncurrentTimeBefore()); + assertEquals(StorageClass.COLDLINE.toString(), lifecycleRule.getAction().getStorageClass()); + assertEquals(30, lifecycleRule.getCondition().getDaysSinceCustomTime().intValue()); + assertNotNull(lifecycleRule.getCondition().getCustomTimeBefore()); + assertEquals("prefix-", lifecycleRule.getCondition().getMatchesPrefix().get(0)); + assertEquals("-suffix", lifecycleRule.getCondition().getMatchesSuffix().get(0)); + assertTrue( + Conversions.json().lifecycleRule().decode(lifecycleRule).getAction() + instanceof SetStorageClassLifecycleAction); + + Rule abortMpuLifecycleRule = + Conversions.json() + .lifecycleRule() + .encode( + new LifecycleRule( + LifecycleAction.newAbortIncompleteMPUploadAction(), + LifecycleCondition.newBuilder().setAge(10).build())); + assertEquals(AbortIncompleteMPUAction.TYPE, abortMpuLifecycleRule.getAction().getType()); + assertEquals(10, abortMpuLifecycleRule.getCondition().getAge().intValue()); + LifecycleRule decode = Conversions.json().lifecycleRule().decode(abortMpuLifecycleRule); + assertThat(decode.getAction()).isInstanceOf(AbortIncompleteMPUAction.class); + + Rule unsupportedRule = + Conversions.json() + .lifecycleRule() + .encode( + new LifecycleRule( + LifecycleAction.newLifecycleAction("This action type doesn't exist"), + LifecycleCondition.newBuilder().setAge(10).build())); + unsupportedRule.setAction( + unsupportedRule.getAction().setType("This action type also doesn't exist")); + + Conversions.json() + .lifecycleRule() + .decode( + unsupportedRule); // If this doesn't throw an exception, unsupported rules are working + } + + @Test + public void testIamConfiguration() { + Bucket.IamConfiguration iamConfiguration = + Conversions.json() + .iamConfiguration() + .encode( + IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .setUniformBucketLevelAccessLockedTime(System.currentTimeMillis()) + .setPublicAccessPrevention(PublicAccessPrevention.ENFORCED) + .build()); + + assertEquals(Boolean.TRUE, iamConfiguration.getUniformBucketLevelAccess().getEnabled()); + assertNotNull(iamConfiguration.getUniformBucketLevelAccess().getLockedTime()); + assertEquals( + BucketInfo.PublicAccessPrevention.ENFORCED.getValue(), + iamConfiguration.getPublicAccessPrevention()); + } + + @Test + public void testPublicAccessPrevention_ensureAbsentWhenUnknown() throws IOException { + StringWriter stringWriter = new StringWriter(); + JsonGenerator jsonGenerator = + JacksonFactory.getDefaultInstance().createJsonGenerator(stringWriter); + + jsonGenerator.serialize( + Conversions.json() + .iamConfiguration() + .encode( + IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .setUniformBucketLevelAccessLockedTime(System.currentTimeMillis()) + .setPublicAccessPrevention(PublicAccessPrevention.UNKNOWN) + .build())); + jsonGenerator.flush(); + + assertFalse(stringWriter.getBuffer().toString().contains("publicAccessPrevention")); + } + + @Test + public void testPapValueOfIamConfiguration() { + Bucket.IamConfiguration iamConfiguration = new Bucket.IamConfiguration(); + Bucket.IamConfiguration.UniformBucketLevelAccess uniformBucketLevelAccess = + new Bucket.IamConfiguration.UniformBucketLevelAccess(); + iamConfiguration.setUniformBucketLevelAccess(uniformBucketLevelAccess); + iamConfiguration.setPublicAccessPrevention("random-string"); + IamConfiguration fromPb = Conversions.json().iamConfiguration().decode(iamConfiguration); + + assertEquals(PublicAccessPrevention.UNKNOWN, fromPb.getPublicAccessPrevention()); + } + + @Test + public void testLogging() { + Bucket.Logging logging = + Conversions.json() + .logging() + .encode( + BucketInfo.Logging.newBuilder() + .setLogBucket("test-bucket") + .setLogObjectPrefix("test-") + .build()); + assertEquals("test-bucket", logging.getLogBucket()); + assertEquals("test-", logging.getLogObjectPrefix()); + } + + @Test + public void testRuleMappingIsCorrect_noMutations() { + Bucket bucket = Conversions.json().bucketInfo().encode(bi().build()); + assertNull(bucket.getLifecycle()); + } + + @Test + public void testRuleMappingIsCorrect_deleteLifecycleRules() { + Bucket bucket = Conversions.json().bucketInfo().encode(bi().deleteLifecycleRules().build()); + assertEquals(EMPTY_LIFECYCLE, bucket.getLifecycle()); + } + + @Test + @SuppressWarnings({"deprecation"}) + public void testRuleMappingIsCorrect_setDeleteRules_null() { + Bucket bucket = Conversions.json().bucketInfo().encode(bi().setDeleteRules(null).build()); + assertNull(bucket.getLifecycle()); + } + + @Test + @SuppressWarnings({"deprecation"}) + public void testRuleMappingIsCorrect_setDeleteRules_empty() { + Codec codec = Conversions.json().bucketInfo(); + BucketInfo bucketInfo = bi().setDeleteRules(Collections.emptyList()).build(); + Bucket bucket = codec.encode(bucketInfo); + Lifecycle actual = bucket.getLifecycle(); + assertThat(actual).isEqualTo(EMPTY_LIFECYCLE); + } + + @Test + public void testRuleMappingIsCorrect_setLifecycleRules_empty() { + Bucket bucket = + Conversions.json() + .bucketInfo() + .encode(bi().setLifecycleRules(Collections.emptyList()).build()); + assertEquals(EMPTY_LIFECYCLE, bucket.getLifecycle()); + } + + @Test + public void testRuleMappingIsCorrect_setLifeCycleRules_nonEmpty() { + LifecycleRule lifecycleRule = + new LifecycleRule( + LifecycleAction.newDeleteAction(), LifecycleCondition.newBuilder().setAge(10).build()); + Rule lifecycleDeleteAfter10 = Conversions.json().lifecycleRule().encode(lifecycleRule); + Bucket bucket = + Conversions.json() + .bucketInfo() + .encode(bi().setLifecycleRules(ImmutableList.of(lifecycleRule)).build()); + assertEquals(lifecycle(lifecycleDeleteAfter10), bucket.getLifecycle()); + } + + private static Lifecycle lifecycle(Rule... rules) { + return lifecycle(Arrays.asList(rules)); + } + + private static Lifecycle lifecycle(List rules) { + Lifecycle emptyLifecycle = new Lifecycle(); + emptyLifecycle.setRule(rules); + return emptyLifecycle; + } + + private static BucketInfo.Builder bi() { + String bucketId = "bucketId"; + return BucketInfo.newBuilder(bucketId); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketTest.java new file mode 100644 index 000000000000..1accadecb1a0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BucketTest.java @@ -0,0 +1,915 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Acl.Role.WRITER; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.paging.Page; +import com.google.cloud.PageImpl; +import com.google.cloud.storage.Acl.Project; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.BucketInfo.AgeDeleteRule; +import com.google.cloud.storage.BucketInfo.DeleteRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.security.Key; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class BucketTest { + + private static final Acl ACL = Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER); + private static final Acl OTHER_ACL = Acl.of(new Project(ProjectRole.OWNERS, "p"), Role.READER); + private static final List ACLS = ImmutableList.of(ACL, OTHER_ACL); + private static final String ETAG = "0xFF00"; + private static final String GENERATED_ID = "B/N:1"; + private static final Long META_GENERATION = 10L; + private static final User OWNER = new User("user@gmail.com"); + private static final String SELF_LINK = "http://storage/b/n"; + private static final Long CREATE_TIME = System.currentTimeMillis(); + private static final Long UPDATE_TIME = CREATE_TIME - 1L; + private static final List CORS = Collections.singletonList(Cors.newBuilder().build()); + private static final List DEFAULT_ACL = + Collections.singletonList(Acl.of(User.ofAllAuthenticatedUsers(), WRITER)); + + @SuppressWarnings({"unchecked", "deprecation"}) + private static final List DELETE_RULES = + Collections.singletonList(new AgeDeleteRule(5)); + + private static final List LIFECYCLE_RULES = + Collections.singletonList( + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder().setAge(5).build())); + private static final String INDEX_PAGE = "index.html"; + private static final String NOT_FOUND_PAGE = "error.html"; + private static final String LOCATION = "ASIA"; + private static final StorageClass STORAGE_CLASS = StorageClass.STANDARD; + private static final String DEFAULT_KMS_KEY_NAME = + "projects/p/locations/kr-loc/keyRings/kr/cryptoKeys/key"; + private static final Boolean VERSIONING_ENABLED = true; + private static final Map BUCKET_LABELS = ImmutableMap.of("label1", "value1"); + private static final Boolean REQUESTER_PAYS = true; + private static final String USER_PROJECT = "test-project"; + private static final Boolean DEFAULT_EVENT_BASED_HOLD = true; + private static final Long RETENTION_EFFECTIVE_TIME = 10L; + private static final Long RETENTION_PERIOD = 10L; + private static final Boolean RETENTION_POLICY_IS_LOCKED = false; + private static final List LOCATION_TYPES = + ImmutableList.of("multi-region", "region", "dual-region"); + private static final String LOCATION_TYPE = "multi-region"; + + @SuppressWarnings({"unchecked", "deprecation"}) + private static final BucketInfo FULL_BUCKET_INFO = + BucketInfo.newBuilder("b") + .setAcl(ACLS) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setCors(CORS) + .setCreateTime(CREATE_TIME) + .setUpdateTime(UPDATE_TIME) + .setDefaultAcl(DEFAULT_ACL) + .setDeleteRules(DELETE_RULES) + .setLifecycleRules(LIFECYCLE_RULES) + .setIndexPage(INDEX_PAGE) + .setNotFoundPage(NOT_FOUND_PAGE) + .setLocation(LOCATION) + .setStorageClass(STORAGE_CLASS) + .setVersioningEnabled(VERSIONING_ENABLED) + .setLabels(BUCKET_LABELS) + .setRequesterPays(REQUESTER_PAYS) + .setDefaultKmsKeyName(DEFAULT_KMS_KEY_NAME) + .setDefaultEventBasedHold(DEFAULT_EVENT_BASED_HOLD) + .setRetentionEffectiveTime(RETENTION_EFFECTIVE_TIME) + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(RETENTION_POLICY_IS_LOCKED) + .build(); + + private static final BucketInfo BUCKET_INFO = + BucketInfo.newBuilder("b").setMetageneration(42L).build(); + private static final String CONTENT_TYPE = "text/plain"; + private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + private static final Key KEY = + new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256"); + private final HttpRetryAlgorithmManager retryAlgorithmManager = + HttpStorageOptions.getDefaultInstance().getRetryAlgorithmManager(); + + private Storage storage; + private Storage serviceMockReturnsOptions = Mockito.mock(Storage.class); + private HttpStorageOptions mockOptions = Mockito.mock(HttpStorageOptions.class); + private Bucket bucket; + private Bucket expectedBucket; + private List blobResults; + + @Before + public void setUp() { + storage = Mockito.mock(Storage.class); + } + + private void initializeExpectedBucket() { + when(serviceMockReturnsOptions.getOptions()).thenReturn(mockOptions); + when(mockOptions.getRetryAlgorithmManager()).thenReturn(retryAlgorithmManager); + expectedBucket = new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(BUCKET_INFO)); + blobResults = + ImmutableList.of( + new Blob( + serviceMockReturnsOptions, + new BlobInfo.BuilderImpl(BlobInfo.newBuilder("b", "n1").build())), + new Blob( + serviceMockReturnsOptions, + new BlobInfo.BuilderImpl(BlobInfo.newBuilder("b", "n2").build())), + new Blob( + serviceMockReturnsOptions, + new BlobInfo.BuilderImpl(BlobInfo.newBuilder("b", "n3").build()))); + } + + private void initializeBucket() { + bucket = new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO)); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedBucket(); + Storage.BucketGetOption[] expectedOptions = {Storage.BucketGetOption.fields()}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BUCKET_INFO.getName(), expectedOptions)).thenReturn(expectedBucket); + initializeBucket(); + assertTrue(bucket.exists()); + + verify(storage).getOptions(); + verify(storage).get(BUCKET_INFO.getName(), expectedOptions); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedBucket(); + Storage.BucketGetOption[] expectedOptions = {Storage.BucketGetOption.fields()}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BUCKET_INFO.getName(), expectedOptions)).thenReturn(null); + initializeBucket(); + assertFalse(bucket.exists()); + + verify(storage).getOptions(); + verify(storage).get(BUCKET_INFO.getName(), expectedOptions); + } + + @Test + public void testReload() throws Exception { + initializeExpectedBucket(); + BucketInfo updatedInfo = BUCKET_INFO.toBuilder().setNotFoundPage("p").build(); + Bucket expectedUpdatedBucket = + new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(updatedInfo)); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(updatedInfo.getName())).thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = bucket.reload(); + assertEquals(expectedUpdatedBucket, updatedBucket); + + verify(storage).getOptions(); + verify(storage).get(updatedInfo.getName()); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BUCKET_INFO.getName())).thenReturn(null); + initializeBucket(); + assertNull(bucket.reload()); + + verify(storage).getOptions(); + verify(storage).get(BUCKET_INFO.getName()); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedBucket(); + BucketInfo updatedInfo = BUCKET_INFO.toBuilder().setNotFoundPage("p").build(); + Bucket expectedUpdatedBucket = + new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(updatedInfo)); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(updatedInfo.getName(), Storage.BucketGetOption.metagenerationMatch(42L))) + .thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = bucket.reload(Bucket.BucketSourceOption.metagenerationMatch()); + assertEquals(expectedUpdatedBucket, updatedBucket); + + verify(storage).getOptions(); + verify(storage).get(updatedInfo.getName(), Storage.BucketGetOption.metagenerationMatch(42L)); + } + + @Test + public void testUpdate() throws Exception { + initializeExpectedBucket(); + Bucket expectedUpdatedBucket = expectedBucket.toBuilder().setNotFoundPage("p").build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.update(expectedUpdatedBucket)).thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = new Bucket(storage, new BucketInfo.BuilderImpl(expectedUpdatedBucket)); + Bucket actualUpdatedBucket = updatedBucket.update(); + assertEquals(expectedUpdatedBucket, actualUpdatedBucket); + + verify(storage, times(2)).getOptions(); + verify(storage).update(expectedUpdatedBucket); + } + + @Test + public void testDelete() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.delete(BUCKET_INFO.getName())).thenReturn(true); + initializeBucket(); + assertTrue(bucket.delete()); + + verify(storage).getOptions(); + verify(storage).delete(BUCKET_INFO.getName()); + } + + @Test + public void testList() throws Exception { + initializeExpectedBucket(); + PageImpl expectedBlobPage = new PageImpl<>(null, "c", blobResults); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.list(BUCKET_INFO.getName())).thenReturn(expectedBlobPage); + initializeBucket(); + Page blobPage = bucket.list(); + Iterator blobInfoIterator = blobPage.getValues().iterator(); + Iterator blobIterator = blobPage.getValues().iterator(); + while (blobInfoIterator.hasNext() && blobIterator.hasNext()) { + assertEquals(blobInfoIterator.next(), blobIterator.next()); + } + assertFalse(blobInfoIterator.hasNext()); + assertFalse(blobIterator.hasNext()); + assertEquals(expectedBlobPage.getNextPageToken(), blobPage.getNextPageToken()); + + verify(storage).getOptions(); + verify(storage).list(BUCKET_INFO.getName()); + } + + @Test + public void testGet() throws Exception { + initializeExpectedBucket(); + Blob expectedBlob = + new Blob( + serviceMockReturnsOptions, + new BlobInfo.BuilderImpl(BlobInfo.newBuilder("b", "n").build())); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.get(BlobId.of(expectedBucket.getName(), "n"), new Storage.BlobGetOption[0])) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.get("n"); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).get(BlobId.of(expectedBucket.getName(), "n"), new Storage.BlobGetOption[0]); + } + + @Test + public void testGetAllArray() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + List blobIds = + Lists.transform( + blobResults, + new Function() { + @Override + public BlobId apply(Blob blob) { + return blob.getBlobId(); + } + }); + when(storage.get(blobIds)).thenReturn(blobResults); + initializeBucket(); + assertEquals(blobResults, bucket.get("n1", "n2", "n3")); + + verify(storage).getOptions(); + verify(storage).get(blobIds); + } + + @Test + public void testGetAllIterable() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + List blobIds = + Lists.transform( + blobResults, + new Function() { + @Override + public BlobId apply(Blob blob) { + return blob.getBlobId(); + } + }); + when(storage.get(blobIds)).thenReturn(blobResults); + initializeBucket(); + assertEquals(blobResults, bucket.get(ImmutableList.of("n1", "n2", "n3"))); + + verify(storage).getOptions(); + verify(storage).get(blobIds); + } + + @Test + public void testCreate() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder("b", "n").setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, content)).thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.create("n", content, CONTENT_TYPE); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, content); + } + + @Test + public void testCreateNoContentType() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder("b", "n").build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, content)).thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.create("n", content); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, content); + } + + @Test + public void testCreateWithOptions() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + Storage.PredefinedAcl acl = Storage.PredefinedAcl.ALL_AUTHENTICATED_USERS; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create( + info, + content, + new BlobTargetOption(UnifiedOpts.generationMatch(42L)), + new BlobTargetOption(UnifiedOpts.metagenerationMatch(24L)), + Storage.BlobTargetOption.predefinedAcl(acl), + Storage.BlobTargetOption.encryptionKey(BASE64_KEY), + Storage.BlobTargetOption.userProject(USER_PROJECT))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create( + "n", + content, + CONTENT_TYPE, + Bucket.BlobTargetOption.generationMatch(42L), + Bucket.BlobTargetOption.metagenerationMatch(24L), + Bucket.BlobTargetOption.predefinedAcl(acl), + Bucket.BlobTargetOption.encryptionKey(BASE64_KEY), + Bucket.BlobTargetOption.userProject(USER_PROJECT)); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage) + .create( + info, + content, + new BlobTargetOption(UnifiedOpts.generationMatch(42L)), + new BlobTargetOption(UnifiedOpts.metagenerationMatch(24L)), + Storage.BlobTargetOption.predefinedAcl(acl), + Storage.BlobTargetOption.encryptionKey(BASE64_KEY), + Storage.BlobTargetOption.userProject(USER_PROJECT)); + } + + @Test + public void testCreateWithEncryptionKey() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, content, Storage.BlobTargetOption.encryptionKey(KEY))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create("n", content, CONTENT_TYPE, Bucket.BlobTargetOption.encryptionKey(KEY)); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, content, Storage.BlobTargetOption.encryptionKey(KEY)); + } + + @Test + public void testCreateWithKmsKeyName() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, content, Storage.BlobTargetOption.kmsKeyName(DEFAULT_KMS_KEY_NAME))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create( + "n", content, CONTENT_TYPE, Bucket.BlobTargetOption.kmsKeyName(DEFAULT_KMS_KEY_NAME)); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage) + .create(info, content, Storage.BlobTargetOption.kmsKeyName(DEFAULT_KMS_KEY_NAME)); + } + + @Test + public void testCreateNotExists() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, content, new BlobTargetOption(UnifiedOpts.doesNotExist()))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.create("n", content, CONTENT_TYPE, Bucket.BlobTargetOption.doesNotExist()); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, content, new BlobTargetOption(UnifiedOpts.doesNotExist())); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateFromStream() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder("b", "n").setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + InputStream streamContent = new ByteArrayInputStream(content); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, streamContent)).thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.create("n", streamContent, CONTENT_TYPE); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, streamContent); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateFromStreamNoContentType() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder("b", "n").build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + InputStream streamContent = new ByteArrayInputStream(content); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, streamContent)).thenReturn(expectedBlob); + initializeBucket(); + Blob blob = bucket.create("n", streamContent); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, streamContent); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateFromStreamWithOptions() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = info.asBlob(serviceMockReturnsOptions); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + String crc32c = Utils.crc32cCodec.encode(Hashing.crc32c().hashBytes(content).asInt()); + Storage.PredefinedAcl acl = Storage.PredefinedAcl.ALL_AUTHENTICATED_USERS; + InputStream streamContent = new ByteArrayInputStream(content); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create( + info, + streamContent, + new BlobWriteOption(UnifiedOpts.generationMatch(42L)), + new BlobWriteOption(UnifiedOpts.metagenerationMatch(24L)), + Storage.BlobWriteOption.predefinedAcl(acl), + new BlobWriteOption(UnifiedOpts.crc32cMatch(crc32c)), + new BlobWriteOption(UnifiedOpts.md5Match("md5")), + Storage.BlobWriteOption.encryptionKey(BASE64_KEY), + Storage.BlobWriteOption.userProject(USER_PROJECT))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create( + "n", + streamContent, + CONTENT_TYPE, + Bucket.BlobWriteOption.generationMatch(42L), + Bucket.BlobWriteOption.metagenerationMatch(24L), + Bucket.BlobWriteOption.predefinedAcl(acl), + Bucket.BlobWriteOption.crc32cMatch(crc32c), + Bucket.BlobWriteOption.md5Match("md5"), + Bucket.BlobWriteOption.encryptionKey(BASE64_KEY), + Bucket.BlobWriteOption.userProject(USER_PROJECT)); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage) + .create( + info, + streamContent, + new BlobWriteOption(UnifiedOpts.generationMatch(42L)), + new BlobWriteOption(UnifiedOpts.metagenerationMatch(24L)), + Storage.BlobWriteOption.predefinedAcl(acl), + new BlobWriteOption(UnifiedOpts.crc32cMatch(crc32c)), + new BlobWriteOption(UnifiedOpts.md5Match("md5")), + Storage.BlobWriteOption.encryptionKey(BASE64_KEY), + Storage.BlobWriteOption.userProject(USER_PROJECT)); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateFromStreamWithEncryptionKey() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(info)); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + InputStream streamContent = new ByteArrayInputStream(content); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, streamContent, Storage.BlobWriteOption.encryptionKey(KEY))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create("n", streamContent, CONTENT_TYPE, Bucket.BlobWriteOption.encryptionKey(KEY)); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, streamContent, Storage.BlobWriteOption.encryptionKey(KEY)); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateFromStreamNotExists() throws Exception { + initializeExpectedBucket(); + BlobInfo info = BlobInfo.newBuilder(BlobId.of("b", "n")).setContentType(CONTENT_TYPE).build(); + Blob expectedBlob = info.asBlob(serviceMockReturnsOptions); + byte[] content = {0xD, 0xE, 0xA, 0xD}; + InputStream streamContent = new ByteArrayInputStream(content); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.create(info, streamContent, new BlobWriteOption(UnifiedOpts.doesNotExist()))) + .thenReturn(expectedBlob); + initializeBucket(); + Blob blob = + bucket.create("n", streamContent, CONTENT_TYPE, Bucket.BlobWriteOption.doesNotExist()); + assertEquals(expectedBlob, blob); + + verify(storage).getOptions(); + verify(storage).create(info, streamContent, new BlobWriteOption(UnifiedOpts.doesNotExist())); + } + + @Test + public void testGetAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.getAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers())).thenReturn(ACL); + initializeBucket(); + assertEquals(ACL, bucket.getAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).getAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testDeleteAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.deleteAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers())).thenReturn(true); + initializeBucket(); + assertTrue(bucket.deleteAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).deleteAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testCreateAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.createAcl(BUCKET_INFO.getName(), ACL)).thenReturn(returnedAcl); + initializeBucket(); + assertEquals(returnedAcl, bucket.createAcl(ACL)); + + verify(storage).getOptions(); + verify(storage).createAcl(BUCKET_INFO.getName(), ACL); + } + + @Test + public void testUpdateAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.updateAcl(BUCKET_INFO.getName(), ACL)).thenReturn(returnedAcl); + initializeBucket(); + assertEquals(returnedAcl, bucket.updateAcl(ACL)); + verify(storage).getOptions(); + verify(storage).updateAcl(BUCKET_INFO.getName(), ACL); + } + + @Test + public void testListAcls() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.listAcls(BUCKET_INFO.getName())).thenReturn(ACLS); + initializeBucket(); + assertEquals(ACLS, bucket.listAcls()); + + verify(storage).getOptions(); + verify(storage).listAcls(BUCKET_INFO.getName()); + } + + @Test + public void testGetDefaultAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.getDefaultAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers())) + .thenReturn(ACL); + initializeBucket(); + assertEquals(ACL, bucket.getDefaultAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).getDefaultAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testDeleteDefaultAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.deleteDefaultAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers())) + .thenReturn(true); + initializeBucket(); + assertTrue(bucket.deleteDefaultAcl(User.ofAllAuthenticatedUsers())); + + verify(storage).getOptions(); + verify(storage).deleteDefaultAcl(BUCKET_INFO.getName(), User.ofAllAuthenticatedUsers()); + } + + @Test + public void testCreateDefaultAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.createDefaultAcl(BUCKET_INFO.getName(), ACL)).thenReturn(returnedAcl); + initializeBucket(); + assertEquals(returnedAcl, bucket.createDefaultAcl(ACL)); + + verify(storage).getOptions(); + verify(storage).createDefaultAcl(BUCKET_INFO.getName(), ACL); + } + + @Test + public void testUpdateDefaultAcl() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build(); + when(storage.updateDefaultAcl(BUCKET_INFO.getName(), ACL)).thenReturn(returnedAcl); + initializeBucket(); + assertEquals(returnedAcl, bucket.updateDefaultAcl(ACL)); + + verify(storage).getOptions(); + verify(storage).updateDefaultAcl(BUCKET_INFO.getName(), ACL); + } + + @Test + public void testListDefaultAcls() throws Exception { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.listDefaultAcls(BUCKET_INFO.getName())).thenReturn(ACLS); + initializeBucket(); + assertEquals(ACLS, bucket.listDefaultAcls()); + + verify(storage).getOptions(); + verify(storage).listDefaultAcls(BUCKET_INFO.getName()); + } + + @Test + public void testLockRetention() throws Exception { + initializeExpectedBucket(); + Bucket expectedRetentionLockedBucket = + expectedBucket.toBuilder() + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(true) + .build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.lockRetentionPolicy( + expectedRetentionLockedBucket, + Storage.BucketTargetOption.metagenerationMatch(), + Storage.BucketTargetOption.userProject(USER_PROJECT))) + .thenReturn(expectedRetentionLockedBucket); + initializeBucket(); + Bucket lockedRetentionPolicyBucket = + new Bucket(storage, new BucketInfo.BuilderImpl(expectedRetentionLockedBucket)); + Bucket actualRetentionLockedBucket = + lockedRetentionPolicyBucket.lockRetentionPolicy( + Storage.BucketTargetOption.metagenerationMatch(), + Storage.BucketTargetOption.userProject(USER_PROJECT)); + assertEquals(expectedRetentionLockedBucket, actualRetentionLockedBucket); + + verify(storage, times(2)).getOptions(); + verify(storage) + .lockRetentionPolicy( + expectedRetentionLockedBucket, + Storage.BucketTargetOption.metagenerationMatch(), + Storage.BucketTargetOption.userProject(USER_PROJECT)); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testToBuilder() { + when(storage.getOptions()).thenReturn(mockOptions); + Bucket fullBucket = new Bucket(storage, new BucketInfo.BuilderImpl(FULL_BUCKET_INFO)); + assertEquals(fullBucket, fullBucket.toBuilder().build()); + Bucket simpleBlob = new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO)); + assertEquals(simpleBlob, simpleBlob.toBuilder().build()); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testBuilder() { + initializeExpectedBucket(); + when(storage.getOptions()).thenReturn(mockOptions); + Bucket.Builder builder = + new Bucket.Builder(new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO))); + Bucket bucket = + builder + .setAcl(ACLS) + .setEtag(ETAG) + .setGeneratedId(GENERATED_ID) + .setMetageneration(META_GENERATION) + .setOwner(OWNER) + .setSelfLink(SELF_LINK) + .setCors(CORS) + .setCreateTime(CREATE_TIME) + .setUpdateTime(UPDATE_TIME) + .setDefaultAcl(DEFAULT_ACL) + .setDeleteRules(DELETE_RULES) + .setLifecycleRules(LIFECYCLE_RULES) + .setIndexPage(INDEX_PAGE) + .setNotFoundPage(NOT_FOUND_PAGE) + .setLocation(LOCATION) + .setLocationType(LOCATION_TYPE) + .setStorageClass(STORAGE_CLASS) + .setVersioningEnabled(VERSIONING_ENABLED) + .setLabels(BUCKET_LABELS) + .setRequesterPays(REQUESTER_PAYS) + .setDefaultKmsKeyName(DEFAULT_KMS_KEY_NAME) + .setDefaultEventBasedHold(DEFAULT_EVENT_BASED_HOLD) + .setRetentionEffectiveTime(RETENTION_EFFECTIVE_TIME) + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(RETENTION_POLICY_IS_LOCKED) + .build(); + assertEquals("b", bucket.getName()); + assertEquals(ACLS, bucket.getAcl()); + assertEquals(ETAG, bucket.getEtag()); + assertEquals(GENERATED_ID, bucket.getGeneratedId()); + assertEquals(META_GENERATION, bucket.getMetageneration()); + assertEquals(OWNER, bucket.getOwner()); + assertEquals(SELF_LINK, bucket.getSelfLink()); + assertEquals(CREATE_TIME, bucket.getCreateTime()); + assertEquals(UPDATE_TIME, bucket.getUpdateTime()); + assertEquals(CORS, bucket.getCors()); + assertEquals(DEFAULT_ACL, bucket.getDefaultAcl()); + assertEquals(DELETE_RULES, bucket.getDeleteRules()); + assertEquals(LIFECYCLE_RULES, bucket.getLifecycleRules()); + assertEquals(INDEX_PAGE, bucket.getIndexPage()); + assertEquals(NOT_FOUND_PAGE, bucket.getNotFoundPage()); + assertEquals(LOCATION, bucket.getLocation()); + assertEquals(STORAGE_CLASS, bucket.getStorageClass()); + assertEquals(VERSIONING_ENABLED, bucket.versioningEnabled()); + assertEquals(BUCKET_LABELS, bucket.getLabels()); + assertEquals(REQUESTER_PAYS, bucket.requesterPays()); + assertEquals(DEFAULT_KMS_KEY_NAME, bucket.getDefaultKmsKeyName()); + assertEquals(DEFAULT_EVENT_BASED_HOLD, bucket.getDefaultEventBasedHold()); + assertEquals(RETENTION_EFFECTIVE_TIME, bucket.getRetentionEffectiveTime()); + assertEquals(RETENTION_PERIOD, bucket.getRetentionPeriod()); + assertEquals(RETENTION_POLICY_IS_LOCKED, bucket.retentionPolicyIsLocked()); + assertEquals(storage.getOptions(), bucket.getStorage().getOptions()); + assertTrue(LOCATION_TYPES.contains(LOCATION_TYPE)); + } + + @Test + public void testDeleteLifecycleRules() { + initializeExpectedBucket(); + Bucket bucket = + new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(FULL_BUCKET_INFO)); + assertThat(bucket.getLifecycleRules()).hasSize(1); + Bucket expectedUpdatedBucket = bucket.toBuilder().deleteLifecycleRules().build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.update(expectedUpdatedBucket)).thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = new Bucket(storage, new BucketInfo.BuilderImpl(expectedUpdatedBucket)); + Bucket actualUpdatedBucket = updatedBucket.update(); + assertThat(actualUpdatedBucket.getLifecycleRules()).hasSize(0); + + verify(storage, times(2)).getOptions(); + verify(storage).update(expectedUpdatedBucket); + } + + @Test + public void testUpdateBucketLogging() { + initializeExpectedBucket(); + BucketInfo.Logging logging = + BucketInfo.Logging.newBuilder() + .setLogBucket("logs-bucket") + .setLogObjectPrefix("test-logs") + .build(); + BucketInfo bucketInfo = BucketInfo.newBuilder("b").setLogging(logging).build(); + Bucket bucket = new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(bucketInfo)); + assertThat(bucket.getLogging().getLogBucket()).isEqualTo("logs-bucket"); + assertThat(bucket.getLogging().getLogObjectPrefix()).isEqualTo("test-logs"); + Bucket expectedUpdatedBucket = bucket.toBuilder().setLogging(null).build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.update(expectedUpdatedBucket)).thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = new Bucket(storage, new BucketInfo.BuilderImpl(expectedUpdatedBucket)); + Bucket actualUpdatedBucket = updatedBucket.update(); + assertThat(actualUpdatedBucket.getLogging().getLogBucket()).isNull(); + assertThat(actualUpdatedBucket.getLogging().getLogObjectPrefix()).isNull(); + + verify(storage, times(2)).getOptions(); + verify(storage).update(expectedUpdatedBucket); + } + + @Test + public void testRemoveBucketCORS() { + initializeExpectedBucket(); + List origins = ImmutableList.of(Cors.Origin.of("http://cloud.google.com")); + List httpMethods = ImmutableList.of(HttpMethod.GET); + List responseHeaders = ImmutableList.of("Content-Type"); + Cors cors = + Cors.newBuilder() + .setOrigins(origins) + .setMethods(httpMethods) + .setResponseHeaders(responseHeaders) + .setMaxAgeSeconds(100) + .build(); + BucketInfo bucketInfo = BucketInfo.newBuilder("b").setCors(ImmutableList.of(cors)).build(); + Bucket bucket = new Bucket(serviceMockReturnsOptions, new BucketInfo.BuilderImpl(bucketInfo)); + assertThat(bucket.getCors()).isNotNull(); + assertThat(bucket.getCors().get(0).getMaxAgeSeconds()).isEqualTo(100); + assertThat(bucket.getCors().get(0).getMethods()).isEqualTo(httpMethods); + assertThat(bucket.getCors().get(0).getOrigins()).isEqualTo(origins); + assertThat(bucket.getCors().get(0).getResponseHeaders()).isEqualTo(responseHeaders); + + // Remove bucket CORS configuration. + Bucket expectedUpdatedBucket = bucket.toBuilder().setCors(null).build(); + when(storage.getOptions()).thenReturn(mockOptions); + when(storage.update(expectedUpdatedBucket)).thenReturn(expectedUpdatedBucket); + initializeBucket(); + Bucket updatedBucket = new Bucket(storage, new BucketInfo.BuilderImpl(expectedUpdatedBucket)); + Bucket actualUpdatedBucket = updatedBucket.update(); + assertThat(actualUpdatedBucket.getCors()).isEmpty(); + + verify(storage, times(2)).getOptions(); + verify(storage).update(expectedUpdatedBucket); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandlePoolTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandlePoolTest.java new file mode 100644 index 000000000000..85faeb885da6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandlePoolTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.collect.Sets.newHashSet; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.BufferHandlePool.FixedBufferHandlePool; +import com.google.cloud.storage.BufferHandlePool.PooledBuffer; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.HashSet; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public final class BufferHandlePoolTest { + + private static ExecutorService exec; + + @BeforeClass + public static void beforeClass() { + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("test-bhmt-%d").build(); + exec = Executors.newCachedThreadPool(threadFactory); + } + + @AfterClass + public static void afterClass() { + if (exec != null) { + exec.shutdownNow(); + } + } + + @Test + public void fixedPool_doesNotAllowTheSameBufferToBeReturnedWhilePresent() { + BufferHandle b1 = BufferHandle.allocate(10); + BufferHandle b2 = BufferHandle.allocate(10); + + PooledBuffer p1 = PooledBuffer.of(b1); + PooledBuffer p2 = PooledBuffer.of(b2); + HashSet pooledBuffers = newHashSet(p1, p2); + FixedBufferHandlePool pool = new FixedBufferHandlePool(pooledBuffers); + + PooledBuffer g1 = pool.getBuffer(); + PooledBuffer g2 = pool.getBuffer(); + + pool.returnBuffer(g1); + pool.returnBuffer(g1); + + assertThat(pool.pool).isEqualTo(newHashSet(g1)); + } + + @Test + public void fixedPool_getBuffer_blocksIfEmpty() { + FixedBufferHandlePool pool = FixedBufferHandlePool.of(1, 10); + PooledBuffer p1 = pool.getBuffer(); + + Future f = exec.submit(pool::getBuffer); + assertThrows(TimeoutException.class, () -> f.get(10, TimeUnit.MILLISECONDS)); + } + + @Test + public void fixedPool_returnBuffer_blocksIfFull() { + FixedBufferHandlePool pool = FixedBufferHandlePool.of(1, 10); + + PooledBuffer imposter = PooledBuffer.of(BufferHandle.allocate(5)); + Future f = + exec.submit( + () -> { + pool.returnBuffer(imposter); + return null; + }); + assertThrows(TimeoutException.class, () -> f.get(10, TimeUnit.MILLISECONDS)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandleTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandleTest.java new file mode 100644 index 000000000000..d88b56b2d6b9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferHandleTest.java @@ -0,0 +1,114 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BufferHandle.LazyBufferHandle; +import java.nio.ByteBuffer; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Assert; +import org.junit.Test; + +public final class BufferHandleTest { + + @Test + public void lazyBufferHandle_methodsBehaveTheSameAsAnEmptyByteBuffer() { + int capacity = 10; + ByteBuffer baseline = ByteBuffer.allocate(capacity); + LazyBufferHandle handle = + new LazyBufferHandle( + capacity, + i -> { + Assert.fail("should not be called"); + return null; + }); + + assertThat(handle.remaining()).isEqualTo(baseline.remaining()); + assertThat(handle.position()).isEqualTo(baseline.position()); + assertThat(handle.capacity()).isEqualTo(baseline.capacity()); + } + + @Test + public void lazyBufferHandle_afterAllocationOnlyBackingIsReferenced() { + int capacity = 10; + ByteBuffer baseline = ByteBuffer.allocate(capacity); + AtomicBoolean alloc = new AtomicBoolean(false); + LazyBufferHandle handle = + new LazyBufferHandle( + capacity, + i -> { + alloc.compareAndSet(false, true); + return ByteBuffer.allocate(capacity); + }); + + assertThat(handle.remaining()).isEqualTo(baseline.remaining()); + assertThat(handle.position()).isEqualTo(baseline.position()); + assertThat(handle.capacity()).isEqualTo(baseline.capacity()); + + byte[] bytes = new byte[] {(byte) 'a', (byte) 'b'}; + handle.get().put(bytes); + assertThat(alloc.get()).isTrue(); + assertThat(handle.remaining()).isEqualTo(8); + assertThat(handle.position()).isEqualTo(2); + assertThat(handle.capacity()).isEqualTo(capacity); + } + + @Test + public void lazyBufferHandle_initIsThreadSafe() throws ExecutionException, InterruptedException { + int capacity = 10; + ExecutorService exec = Executors.newFixedThreadPool(2); + AtomicBoolean alloc = new AtomicBoolean(false); + LazyBufferHandle handle = + new LazyBufferHandle( + capacity, + i -> { + alloc.compareAndSet(false, true); + return ByteBuffer.allocate(capacity); + }); + + Future f1 = exec.submit(handle::get); + Future f2 = exec.submit(handle::get); + + assertThat(f1.get()).isSameInstanceAs(f2.get()); + + assertThat(handle.get().capacity()).isEqualTo(capacity); + } + + @Test + public void eagerBufferHandle_methodsBehaveTheSameAsAnEmptyByteBuffer() { + int capacity = 10; + ByteBuffer baseline = ByteBuffer.allocate(capacity); + BufferHandle handle = BufferHandle.handleOf(baseline); + + assertThat(handle.remaining()).isEqualTo(baseline.remaining()); + assertThat(handle.position()).isEqualTo(baseline.position()); + assertThat(handle.capacity()).isEqualTo(baseline.capacity()); + + byte[] bytes = new byte[] {(byte) 'a', (byte) 'b'}; + baseline.put(bytes); + assertThat(handle.remaining()).isEqualTo(8); + assertThat(handle.position()).isEqualTo(2); + assertThat(handle.capacity()).isEqualTo(capacity); + + assertThat(handle.get()).isSameInstanceAs(baseline); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferToDiskThenUploadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferToDiskThenUploadTest.java new file mode 100644 index 000000000000..04ddbd6c10fb --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BufferToDiskThenUploadTest.java @@ -0,0 +1,174 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BlobWriteSessionConfig.WriterFactory; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.it.ChecksummedTestContent; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestName; + +public final class BufferToDiskThenUploadTest { + + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + @Rule public final TestName testName = new TestName(); + + @Test + public void happyPath() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + + BufferToDiskThenUpload btdtu = + BlobWriteSessionConfigs.bufferToDiskThenUpload(tempDir).withIncludeLoggingSink(); + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + WriterFactory factory = btdtu.createFactory(clock); + + BlobInfo blobInfo = BlobInfo.newBuilder("bucket", "object").build(); + AtomicReference actualBytes = new AtomicReference<>(null); + WritableByteChannelSession writeSession = + factory.writeSession( + new StorageInternal() { + @Override + public BlobInfo internalCreateFrom( + Path path, BlobInfo info, Opts opts) throws IOException { + byte[] actual = Files.readAllBytes(path); + actualBytes.compareAndSet(null, actual); + return info; + } + }, + blobInfo, + Opts.empty()); + + byte[] bytes = DataGenerator.base64Characters().genBytes(128); + try (WritableByteChannel open = writeSession.open()) { + open.write(ByteBuffer.wrap(bytes)); + } + String xxdActual = xxd(actualBytes.get()); + String xxdExpected = xxd(bytes); + assertThat(xxdActual).isEqualTo(xxdExpected); + } + + @Test + public void crc32c_default() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + + BufferToDiskThenUpload btdtu = BlobWriteSessionConfigs.bufferToDiskThenUpload(tempDir); + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + WriterFactory factory = btdtu.createFactory(clock); + + BlobInfo blobInfo = BlobInfo.newBuilder("bucket", "object").build(); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(128)); + WritableByteChannelSession writeSession = + factory.writeSession( + new StorageInternal() { + @Override + public BlobInfo internalCreateFrom( + Path path, BlobInfo info, Opts opts) { + assertThat(info.getCrc32c()).isEqualTo(testContent.getCrc32cBase64()); + assertThat(opts) + .isEqualTo(Opts.from(UnifiedOpts.crc32cMatch(testContent.getCrc32c()))); + return info; + } + }, + blobInfo, + Opts.empty()); + + try (WritableByteChannel open = writeSession.open()) { + open.write(ByteBuffer.wrap(testContent.getBytes())); + } + } + + @Test + public void userProvidedCrc32cTakesPriority() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + + BufferToDiskThenUpload btdtu = BlobWriteSessionConfigs.bufferToDiskThenUpload(tempDir); + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + WriterFactory factory = btdtu.createFactory(clock); + + BlobInfo blobInfo = + BlobInfo.newBuilder("bucket", "object") + .setCrc32c(Utils.crc32cCodec.encode(737)) + .setMd5("something") + .build(); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(128)); + Opts origOpts = Opts.from(UnifiedOpts.crc32cMatch(737)); + WritableByteChannelSession writeSession = + factory.writeSession( + new StorageInternal() { + @Override + public BlobInfo internalCreateFrom( + Path path, BlobInfo info, Opts opts) { + assertThat(Utils.crc32cCodec.decode(info.getCrc32c())).isEqualTo(737); + assertThat(opts).isEqualTo(origOpts); + return info; + } + }, + blobInfo, + origOpts); + + try (WritableByteChannel open = writeSession.open()) { + open.write(ByteBuffer.wrap(testContent.getBytes())); + } + } + + @Test + public void userProvidedMd5TakesPriority() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + + BufferToDiskThenUpload btdtu = BlobWriteSessionConfigs.bufferToDiskThenUpload(tempDir); + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + WriterFactory factory = btdtu.createFactory(clock); + + BlobInfo blobInfo = BlobInfo.newBuilder("bucket", "object").setMd5("something").build(); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(128)); + Opts origOpts = Opts.from(UnifiedOpts.md5Match("something")); + WritableByteChannelSession writeSession = + factory.writeSession( + new StorageInternal() { + @Override + public BlobInfo internalCreateFrom( + Path path, BlobInfo info, Opts opts) { + assertThat(info.getMd5()).isEqualTo("something"); + assertThat(opts).isEqualTo(origOpts); + return info; + } + }, + blobInfo, + origOpts); + + try (WritableByteChannel open = writeSession.open()) { + open.write(ByteBuffer.wrap(testContent.getBytes())); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java new file mode 100644 index 000000000000..f0d760cb8559 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import org.junit.Test; + +public final class BuffersTest { + + @Test + public void copy() { + SecureRandom rand = new SecureRandom(); + ByteBuffer content = DataGenerator.rand(rand).genByteBuffer(2048); + + ByteBuffer[] bufs = { + ByteBuffer.allocate(1), + ByteBuffer.allocate(2), + ByteBuffer.allocate(4), + ByteBuffer.allocate(8), + ByteBuffer.allocate(27), + }; + + long copy = 0; + for (long read = 0; content.hasRemaining() && (read = Buffers.copy(content, bufs)) != -1; ) { + for (ByteBuffer buf : bufs) { + if (!buf.hasRemaining()) { + buf.clear(); + } + } + copy += read; + } + assertThat(copy).isEqualTo(2048); + } + + @Test + public void allocateAligned_nonDivisible_capacityGtAlignment() { + ByteBuffer b1 = Buffers.allocateAligned(3, 2); + assertThat(b1.capacity()).isEqualTo(4); + } + + @Test + public void allocateAligned_nonDivisible_capacityLtAlignment() { + ByteBuffer b1 = Buffers.allocateAligned(1, 2); + assertThat(b1.capacity()).isEqualTo(2); + } + + @Test + public void allocateAligned_evenlyDivisible_capacityLtAlignment() { + ByteBuffer b1 = Buffers.allocateAligned(2, 4); + assertThat(b1.capacity()).isEqualTo(4); + } + + @Test + public void allocateAligned_evenlyDivisible_capacityGtAlignment() { + ByteBuffer b1 = Buffers.allocateAligned(8, 4); + assertThat(b1.capacity()).isEqualTo(8); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java new file mode 100644 index 000000000000..66e1bc9688e2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java @@ -0,0 +1,840 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteRangeSpec.EFFECTIVE_INFINITY; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static java.util.Objects.requireNonNull; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Streams; +import com.google.storage.v2.ReadObjectRequest; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Arrays; +import java.util.Locale; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Enclosed.class) +public final class ByteRangeSpecTest { + + public static final class Behavior { + + @Test + public void negativeBeginOffset() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(-5L, null); + ByteRangeSpec exO = ByteRangeSpec.explicit(-5L, null); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(-5L, null); + threeWayEqual(exO, exC, rel); + } + + @Test + public void negativeBeginOffset_fromNull() { + ByteRangeSpec spec = ByteRangeSpec.nullRange().withNewBeginOffset(-5L); + assertThat(spec.getHttpRangeHeader()).isEqualTo("bytes=-5"); + } + + @Test + public void beginNonNullZero_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(0L, 52L); + ByteRangeSpec exO = ByteRangeSpec.explicit(0L, 52L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(0L, 51L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNonNullNonZero_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(10L, 10L); + ByteRangeSpec exO = ByteRangeSpec.explicit(10L, 20L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(10L, 19L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNull_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(null, 10L); + ByteRangeSpec exO = ByteRangeSpec.explicit(null, 10L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(null, 9L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNonNullNonZero_endNull() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(10L, null); + ByteRangeSpec exO = ByteRangeSpec.explicit(10L, null); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(10L, null); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void bothNull_relative() { + assertThat(ByteRangeSpec.relativeLength(null, null)) + .isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void bothNull_explicit() { + assertThat(ByteRangeSpec.explicit(null, null)).isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void bothNull_explicitClosed() { + assertThat(ByteRangeSpec.explicitClosed(null, null)) + .isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void httpRangeHeaderIsCached() { + ByteRangeSpec relative = ByteRangeSpec.relativeLength(5L, null); + + String header1 = relative.getHttpRangeHeader(); + String header2 = relative.getHttpRangeHeader(); + + assertThat(header1).isSameInstanceAs(header2); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withRelativeLength_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withNewRelativeLength(10L)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewEndOffset(EFFECTIVE_INFINITY)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffsetClosed_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewEndOffsetClosed(EFFECTIVE_INFINITY)).isSameInstanceAs(spec); + } + + @Test + public void withNewRelativeLength_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewRelativeLength(EFFECTIVE_INFINITY)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 41L); + assertThat(spec.withNewEndOffset(41L)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffsetClosed_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 41L); + assertThat(spec.withNewEndOffsetClosed(41L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withShiftBeginOffset_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withShiftBeginOffset_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withNewEndOffset_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffsetInclusive()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.endOffsetInclusive()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffsetInclusive()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffsetInclusive()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffsetInclusive()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @SuppressWarnings("EqualsBetweenInconvertibleTypes") + @Test + public void negativeEquals() { + assertThat(ByteRangeSpec.nullRange().equals("")).isFalse(); + } + + @Test + public void nullRangeShouldBeASingletonAcrossJavaSerialization() + throws IOException, ClassNotFoundException { + ByteRangeSpec orig = ByteRangeSpec.nullRange(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { + oos.writeObject(orig); + } + + byte[] serializedBytes = baos.toByteArray(); + ByteRangeSpec deserialized; + try (ByteArrayInputStream bais = new ByteArrayInputStream(serializedBytes); + ObjectInputStream ois = new ObjectInputStream(bais)) { + deserialized = (ByteRangeSpec) ois.readObject(); + } + assertThat(deserialized).isSameInstanceAs(orig); + } + } + + private static void threeWayEqual( + ByteRangeSpec explicitO, ByteRangeSpec explicitC, ByteRangeSpec relative) throws Exception { + + assertAll( + () -> assertThat(explicitO).isEqualTo(relative), + () -> assertThat(explicitO).isEqualTo(explicitC), + () -> assertThat(explicitC).isEqualTo(relative)); + } + + @RunWith(Parameterized.class) + public static final class RangeScenarios { + + private final RangeScenario rs; + private final RangeScenario.Expectations expect; + + public RangeScenarios(RangeScenario rs) { + this.rs = rs; + this.expect = rs.getExpectations(); + } + + @Test + public void httpRangeHeader() { + assertThat(rs.getSpec().getHttpRangeHeader()).isEqualTo(expect.getHttpRange()); + } + + @Test + public void seekReadObjectRequest() { + ByteRangeSpec spec = rs.getSpec(); + ReadObjectRequest actual = spec.seekReadObjectRequest(ReadObjectRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expect.getReadObjectRequest()); + } + + @Test + public void beginOffset() { + assertThat(rs.getSpec().beginOffset()).isEqualTo(expect.getBeginOffset()); + } + + @Test + public void endOffset() { + assertThat(rs.getSpec().endOffset()).isEqualTo(expect.getEndOffset()); + } + + @Test + public void endOffsetInclusive() { + assertThat(rs.getSpec().endOffsetInclusive()).isEqualTo(expect.getEndOffsetInclusive()); + } + + @Test + public void length() { + assertThat(rs.getSpec().length()).isEqualTo(expect.getLength()); + } + + @Parameters(name = "{0}") + public static Iterable testCases() { + // expect that by default, a range should be from zero to infinity + Stream bothNullOrEmpty = + RangeScenario.expectThat() + .beginOffset(0L) + .endOffset(EFFECTIVE_INFINITY) + .endOffsetInclusive(EFFECTIVE_INFINITY) + .length(EFFECTIVE_INFINITY) + .httpRange(null) + .readObjectRequest(reqId()) + .isApplicableTo( + ByteRangeSpec.relativeLength(null, null), + ByteRangeSpec.explicit(null, null), + ByteRangeSpec.explicitClosed(null, null), + ByteRangeSpec.relativeLength(0L, null), + ByteRangeSpec.explicit(0L, null), + ByteRangeSpec.explicitClosed(0L, null), + ByteRangeSpec.relativeLength(null, EFFECTIVE_INFINITY), + ByteRangeSpec.explicit(null, EFFECTIVE_INFINITY), + ByteRangeSpec.explicitClosed(null, EFFECTIVE_INFINITY), + ByteRangeSpec.relativeLength(0L, EFFECTIVE_INFINITY), + ByteRangeSpec.explicit(0L, EFFECTIVE_INFINITY), + ByteRangeSpec.explicitClosed(0L, EFFECTIVE_INFINITY)); + // expect that, if the range is effectively LeftClosed only, all upper bounds should + // be EFFECTIVE_INFINITY and the requests should be open-ended + Stream effectivelyOnlyBegin = + RangeScenario.expectThat() + .beginOffset(3L) + .endOffset(EFFECTIVE_INFINITY) + .endOffsetInclusive(EFFECTIVE_INFINITY) + .length(EFFECTIVE_INFINITY) + .httpRange(headerRangeOpen(3)) + .readObjectRequest(reqOpen(3)) + .isApplicableTo( + ByteRangeSpec.relativeLength(3L, null), + ByteRangeSpec.explicit(3L, null), + ByteRangeSpec.explicitClosed(3L, null), + // effective infinity means it should not impact things + ByteRangeSpec.relativeLength(3L, EFFECTIVE_INFINITY), + ByteRangeSpec.explicit(3L, EFFECTIVE_INFINITY), + ByteRangeSpec.explicitClosed(3L, EFFECTIVE_INFINITY)); + // expect that, if the range is effectively Right{Closed,Open} only, lower bounds should + // be zero and the requests should be bounded from 0-N + Stream effectivelyOnlyEnd = + Stream.concat( + RangeScenario.expectThat() + .beginOffset(0L) + .endOffset(131L) + .endOffsetInclusive(130L) + .length(131L) + .bounded() + .isApplicableTo( + ByteRangeSpec.relativeLength(null, 131L), + ByteRangeSpec.explicit(null, 131L), + ByteRangeSpec.explicitClosed(null, 130L)), + RangeScenario.expectThat() + .beginOffset(0L) + .endOffset(251L) + .endOffsetInclusive(250L) + .length(251L) + .bounded() + .isApplicableTo( + ByteRangeSpec.relativeLength(0L, 251L), + ByteRangeSpec.explicit(0L, 251L), + ByteRangeSpec.explicitClosed(0L, 250L))); + + // expect that, when a range has both a lower and upper bound, all values match along with + // requests being bounded + Stream bothSpecified = + RangeScenario.expectThat() + .beginOffset(4L) + .endOffset(10L) + .endOffsetInclusive(9L) + .length(6L) + .bounded() + .isApplicableTo( + ByteRangeSpec.relativeLength(4L, 6L), + ByteRangeSpec.explicit(4L, 10L), + ByteRangeSpec.explicitClosed(4L, 9L)); + + // variable name should be read as "effective max minus zero" + long effectiveMax_0 = EFFECTIVE_INFINITY - 1; + // variable name should be read as "effective max minus one" + long effectiveMax_1 = effectiveMax_0 - 1; + // edge cases near default values + Stream edgeCases = + Streams.concat( + // expect that, if the range is effectively LeftClosed only, all upper bounds should + // be EFFECTIVE_INFINITY and the requests should be open-ended + RangeScenario.expectThat() + .beginOffset(1L) + .endOffset(EFFECTIVE_INFINITY) + .endOffsetInclusive(EFFECTIVE_INFINITY) + .length(EFFECTIVE_INFINITY) + .httpRange(headerRangeOpen(1L)) + .readObjectRequest(reqOpen(1L)) + .isApplicableTo( + ByteRangeSpec.relativeLength(1L, null), + ByteRangeSpec.explicit(1L, null), + ByteRangeSpec.explicitClosed(1L, null)), + // expect that, we can start reading from effective_infinity with not upper bound + RangeScenario.expectThat() + .beginOffset(EFFECTIVE_INFINITY) + .endOffset(EFFECTIVE_INFINITY) + .endOffsetInclusive(EFFECTIVE_INFINITY) + .length(EFFECTIVE_INFINITY) + .httpRange(headerRangeOpen(EFFECTIVE_INFINITY)) + .readObjectRequest(reqOpen(EFFECTIVE_INFINITY)) + .isApplicableTo( + ByteRangeSpec.relativeLength(EFFECTIVE_INFINITY, null), + ByteRangeSpec.explicit(EFFECTIVE_INFINITY, null), + ByteRangeSpec.explicitClosed(EFFECTIVE_INFINITY, null)), + // expect that, we can read up to Long.MAX_VALUE - 1 + RangeScenario.expectThat() + .beginOffset(0L) + .endOffset(effectiveMax_0) + .endOffsetInclusive(effectiveMax_1) + .length(effectiveMax_0) + .bounded() + .isApplicableTo( + ByteRangeSpec.relativeLength(null, effectiveMax_0), + ByteRangeSpec.explicit(null, effectiveMax_0), + ByteRangeSpec.explicitClosed(null, effectiveMax_1)), + // expect that, we can read from 1 up to Long.MAX_VALUE - 1 + RangeScenario.expectThat() + .beginOffset(1L) + .endOffset(effectiveMax_0) + .endOffsetInclusive(effectiveMax_1) + .length(effectiveMax_1) + .bounded() + .isApplicableTo( + ByteRangeSpec.relativeLength(1L, effectiveMax_1), + ByteRangeSpec.explicit(1L, effectiveMax_0), + ByteRangeSpec.explicitClosed(1L, effectiveMax_1))); + + return Streams.concat( + bothNullOrEmpty, effectivelyOnlyBegin, effectivelyOnlyEnd, bothSpecified, edgeCases) + .map(rs -> new Object[] {rs}) + .collect(ImmutableList.toImmutableList()); + } + } + + private static String headerRangeOpen(long min) { + return String.format(Locale.US, "bytes=%d-", min); + } + + private static String headerRangeClosed(long min, long max) { + return String.format(Locale.US, "bytes=%d-%d", min, max); + } + + private static ReadObjectRequest reqOpen(long offset) { + return ReadObjectRequest.newBuilder().setReadOffset(offset).build(); + } + + private static ReadObjectRequest reqBounded(long offset, long length) { + return ReadObjectRequest.newBuilder().setReadOffset(offset).setReadLimit(length).build(); + } + + private static ReadObjectRequest reqId() { + return ReadObjectRequest.getDefaultInstance(); + } + + private static final class RangeScenario { + private final ByteRangeSpec spec; + private final Expectations expectations; + + private RangeScenario(ByteRangeSpec spec, Expectations expectations) { + this.spec = spec; + this.expectations = expectations; + } + + public ByteRangeSpec getSpec() { + return spec; + } + + public Expectations getExpectations() { + return expectations; + } + + @Override + public String toString() { + return String.format( + Locale.US, "Expect that %s is applicable to %s", expectations.testNameFormat(), spec); + } + + static ExpectationsBuilder expectThat() { + return new ExpectationsBuilder(); + } + + private static final class Expectations { + private final long beginOffset; + private final long endOffset; + private final long endOffsetInclusive; + private final long length; + @Nullable private final String httpRange; + @NonNull private final ReadObjectRequest readObjectRequest; + + private Expectations( + long beginOffset, + long endOffset, + long endOffsetInclusive, + long length, + @Nullable String httpRange, + @NonNull ReadObjectRequest readObjectRequest) { + this.beginOffset = beginOffset; + this.endOffset = endOffset; + this.endOffsetInclusive = endOffsetInclusive; + this.length = length; + this.httpRange = httpRange; + this.readObjectRequest = readObjectRequest; + } + + public long getBeginOffset() { + return beginOffset; + } + + public long getEndOffset() { + return endOffset; + } + + public long getEndOffsetInclusive() { + return endOffsetInclusive; + } + + public long getLength() { + return length; + } + + public @Nullable String getHttpRange() { + return httpRange; + } + + public @NonNull ReadObjectRequest getReadObjectRequest() { + return readObjectRequest; + } + + String testNameFormat() { + return MoreObjects.toStringHelper("") + .add("bo", fmt(beginOffset)) + .add("eo", fmt(endOffset)) + .add("eoi", fmt(endOffsetInclusive)) + .add("l", fmt(length)) + .toString(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("beginOffset", fmt(beginOffset)) + .add("endOffset", fmt(endOffset)) + .add("endOffsetInclusive", fmt(endOffsetInclusive)) + .add("length", fmt(length)) + .add("httpRange", httpRange) + .add("readObjectRequest", StorageV2ProtoUtils.fmtProto(readObjectRequest)) + .toString(); + } + + private static String fmt(@Nullable Long l) { + if (l == null) { + return null; + } else if (l == Long.MAX_VALUE) { + return "Long.MAX_VALUE"; + } else { + long diff = Long.MAX_VALUE - l; + // When testing near the upperbound of Long it can be challenging to read how close we are + // to the max value at a glance. In an effort to help this, for any value that is within + // 20 of Long.MAX_VALUE format it as a difference. + if (diff <= 20) { + return String.format(Locale.US, "(Long.MAX_VALUE - %d)", diff); + } else { + return l.toString(); + } + } + } + } + + private static final class ExpectationsBuilder { + private Long beginOffset; + private Long endOffset; + private Long endOffsetInclusive; + private Long length; + @Nullable private String httpRange; + @NonNull private ReadObjectRequest readObjectRequest; + + public ExpectationsBuilder beginOffset(Long beginOffset) { + this.beginOffset = beginOffset; + return this; + } + + public ExpectationsBuilder endOffset(Long endOffset) { + this.endOffset = endOffset; + return this; + } + + public ExpectationsBuilder endOffsetInclusive(Long endOffsetInclusive) { + this.endOffsetInclusive = endOffsetInclusive; + return this; + } + + public ExpectationsBuilder length(Long length) { + this.length = length; + return this; + } + + public ExpectationsBuilder httpRange(String httpRange) { + this.httpRange = httpRange; + return this; + } + + public ExpectationsBuilder readObjectRequest(ReadObjectRequest readObjectRequest) { + this.readObjectRequest = readObjectRequest; + return this; + } + + public ExpectationsBuilder bounded() { + return this.httpRange(headerRangeClosed(beginOffset, endOffsetInclusive)) + .readObjectRequest(reqBounded(beginOffset, length)); + } + + public Stream isApplicableTo(ByteRangeSpec... brss) { + Expectations expectations = this.build(); + return Arrays.stream(brss).map(brs -> new RangeScenario(brs, expectations)); + } + + private Expectations build() { + return new Expectations( + requireNonNull(beginOffset, "beginOffset must be non null"), + requireNonNull(endOffset, "endOffset must be non null"), + requireNonNull(endOffsetInclusive, "endOffsetInclusive must be non null"), + requireNonNull(length, "length must be non null"), + httpRange, + requireNonNull(readObjectRequest, "readObjectRequest must be non null")); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CIUtils.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CIUtils.java new file mode 100644 index 000000000000..1c4b96322223 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CIUtils.java @@ -0,0 +1,52 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +public final class CIUtils { + + private static final String CI_VERBOSE_RUN_KEY = "CI_VERBOSE_RUN"; + + private CIUtils() {} + + public static boolean verbose() { + String ciVerboseRun = System.getenv(CI_VERBOSE_RUN_KEY); + if (ciVerboseRun == null) { + ciVerboseRun = System.getProperty(CI_VERBOSE_RUN_KEY); + } + return Boolean.parseBoolean(ciVerboseRun); + } + + public static boolean isRunningInCI() { + return isJobTypeUnit() || isJobTypeIntegration(); + } + + public static boolean isJobTypeUnit() { + return isJobTypeEq("test"); + } + + public static boolean isJobTypeIntegration() { + return isJobTypeEq("integration"); + } + + public static boolean isRunningOnGitHubActions() { + return System.getenv("GITHUB_JOB") != null; + } + + private static boolean isJobTypeEq(String integration) { + return integration.equals(System.getenv("JOB_TYPE")); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializerTest.java new file mode 100644 index 000000000000..5df61aa22e35 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CanonicalExtensionHeadersSerializerTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; + +public class CanonicalExtensionHeadersSerializerTest { + + private CanonicalExtensionHeadersSerializer v2Serializer; + private CanonicalExtensionHeadersSerializer v4Serializer; + + @Before + public void setUp() { + v2Serializer = + new CanonicalExtensionHeadersSerializer(Storage.SignUrlOption.SignatureVersion.V2); + v4Serializer = + new CanonicalExtensionHeadersSerializer(Storage.SignUrlOption.SignatureVersion.V4); + } + + @Test + public void givenNoHeadersWhenSerializeThenProduceNothing() { + + StringBuilder sb = v2Serializer.serialize(Collections.emptyMap()); + + assertEquals(sb.toString(), ""); + } + + @Test + public void givenNullHeadersWhenSerializeThenProduceNothing() { + + StringBuilder sb = v2Serializer.serialize(null); + + assertEquals(sb.toString(), ""); + } + + @Test + public void givenEncryptionHeadersWhenSerializeThenAreRemvoed() { + + Map encryptionHeaders = new HashMap<>(); + encryptionHeaders.put("x-goog-encryption-key", ""); + encryptionHeaders.put("x-goog-encryption-key-sha256", ""); + + StringBuilder sb = v2Serializer.serialize(encryptionHeaders); + + assertEquals(sb.toString(), ""); + } + + @Test + public void givenHeadersWhenSerializeThenSuccess() { + + Map encryptionHeaders = new HashMap<>(); + encryptionHeaders.put("x-goog-encryption-key", ""); + encryptionHeaders.put("x-GOOg-acl", " \n public-read "); + encryptionHeaders.put("x-goog-encryption-key-sha256", ""); + encryptionHeaders.put("X-goog-meta-OWNER", " myself and others \n"); + + StringBuilder sb = v2Serializer.serialize(encryptionHeaders); + + assertEquals(sb.toString(), "x-goog-acl:public-read\nx-goog-meta-owner:myself and others\n"); + } + + @Test + public void testV4Serialization() { + Map encryptionHeaders = new HashMap<>(); + encryptionHeaders.put("x-goog-encryption-key", "key"); + encryptionHeaders.put("x-GOOg-acl", " \n public-read "); + encryptionHeaders.put("x-goog-encryption-key-sha256", "sha"); + encryptionHeaders.put("X-goog-meta-OWNER", " myself and others \n"); + + StringBuilder sb = v4Serializer.serialize(encryptionHeaders); + + assertEquals( + "x-goog-acl:public-read\nx-goog-encryption-key:key\nx-goog-encryption-key-sha256:sha" + + "\nx-goog-meta-owner:myself and others\n", + sb.toString()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChecksumResponseParserTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChecksumResponseParserTest.java new file mode 100644 index 000000000000..00d5733bc1af --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChecksumResponseParserTest.java @@ -0,0 +1,124 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.testing.http.MockHttpTransport; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import java.io.IOException; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public final class ChecksumResponseParserTest { + + @Test + public void testParse() throws IOException { + HttpResponse response = + createHttpResponse("\"test-etag\"", "crc32c=AAAAAA==,md5=rL0Y20zC+Fzt72VPzMSk2A=="); + + UploadPartResponse uploadPartResponse = ChecksumResponseParser.parseUploadResponse(response); + + assertThat(uploadPartResponse.eTag()).isEqualTo("\"test-etag\""); + assertThat(uploadPartResponse.md5()).isEqualTo("rL0Y20zC+Fzt72VPzMSk2A=="); + assertThat(uploadPartResponse.crc32c()).isEqualTo("AAAAAA=="); + } + + @Test + public void testExtractHashesFromHeader() throws IOException { + HttpResponse response = + createHttpResponse(null, "crc32c=AAAAAA==,md5=rL0Y20zC+Fzt72VPzMSk2A=="); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).containsEntry("crc32c", "AAAAAA=="); + assertThat(hashes).containsEntry("md5", "rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void testExtractHashesFromHeader_singleHash() throws IOException { + HttpResponse response = createHttpResponse(null, "crc32c=AAAAAA=="); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).containsEntry("crc32c", "AAAAAA=="); + assertThat(hashes).doesNotContainKey("md5"); + } + + @Test + public void testExtractHashesFromHeader_unknownHash() throws IOException { + HttpResponse response = + createHttpResponse(null, "crc32c=AAAAAA==,sha256=rL0Y20zC+Fzt72VPzMSk2A=="); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).containsEntry("crc32c", "AAAAAA=="); + assertThat(hashes).doesNotContainKey("sha256"); + } + + @Test + public void testExtractHashesFromHeader_nullHeader() throws IOException { + HttpResponse response = createHttpResponse(null, null); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).isEmpty(); + } + + @Test + public void testExtractHashesFromHeader_emptyHeader() throws IOException { + HttpResponse response = createHttpResponse(null, ""); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).isEmpty(); + } + + @Test + public void testExtractHashesFromHeader_multipleHeaders() throws IOException { + HttpResponse response = + createHttpResponse(null, "crc32c=AAAAAA==", "md5=rL0Y20zC+Fzt72VPzMSk2A=="); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).containsEntry("crc32c", "AAAAAA=="); + assertThat(hashes).containsEntry("md5", "rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void testExtractHashesFromHeader_multipleHeadersAndCsv() throws IOException { + HttpResponse response = + createHttpResponse(null, "crc32c=AAAAAA==", "md5=rL0Y20zC+Fzt72VPzMSk2A==,extra=value"); + Map hashes = ChecksumResponseParser.extractHashesFromHeader(response); + assertThat(hashes).containsEntry("crc32c", "AAAAAA=="); + assertThat(hashes).containsEntry("md5", "rL0Y20zC+Fzt72VPzMSk2A=="); + assertThat(hashes).hasSize(2); + } + + private HttpResponse createHttpResponse(String etag, String... googHash) throws IOException { + MockLowLevelHttpResponse lowLevelResponse = new MockLowLevelHttpResponse(); + if (etag != null) { + lowLevelResponse.addHeader("ETag", etag); + } + if (googHash != null) { + for (String hash : googHash) { + lowLevelResponse.addHeader("x-goog-hash", hash); + } + } + HttpTransport transport = + new MockHttpTransport.Builder().setLowLevelHttpResponse(lowLevelResponse).build(); + HttpRequest request = + transport.createRequestFactory().buildGetRequest(new GenericUrl("http://example.com")); + return request.execute(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChunkSegmenterTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChunkSegmenterTest.java new file mode 100644 index 000000000000..5f523c874789 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ChunkSegmenterTest.java @@ -0,0 +1,337 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.HashCode; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.RandomDistribution; + +final class ChunkSegmenterTest { + private static final int _2MiB = 2 * 1024 * 1024; + + @Property + void chunkIt(@ForAll("TestData") TestData td) { + if (CIUtils.verbose()) { + System.out.println("td = " + td); + } + + ChunkSegment[] data = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), td.chunkSize) + .segmentBuffers(td.buffers); + + long dataTotalSize = Arrays.stream(data).mapToLong(d -> d.getB().size()).sum(); + Optional reduce = + Arrays.stream(data) + .map(ChunkSegment::getCrc32c) + .filter(Objects::nonNull) + .reduce(Crc32cValue::concat); + + assertThat(dataTotalSize).isEqualTo(td.totalSize); + assertThat(data).hasLength(td.expectedChunkCount); + assertThat(reduce).isAnyOf(Optional.empty(), Optional.of(Crc32cValue.of(td.allCrc32c.asInt()))); + } + + /** + * + * + *
+   * Given 64 bytes, maxSegmentSize: 10, blockSize: 5
+   * 0                                                              64
+   * |---------------------------------------------------------------|
+   *   Produce 6 10-byte segments
+   * |---------|---------|---------|---------|---------|---------|
+   * 
+ */ + @Example + void allowUnalignedBlocks_false_1() { + ChunkSegmenter segmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 10, 5); + + byte[] bytes = DataGenerator.base64Characters().genBytes(64); + List expected = + ImmutableList.of( + ByteString.copyFrom(bytes, 0, 10), + ByteString.copyFrom(bytes, 10, 10), + ByteString.copyFrom(bytes, 20, 10), + ByteString.copyFrom(bytes, 30, 10), + ByteString.copyFrom(bytes, 40, 10), + ByteString.copyFrom(bytes, 50, 10)); + + ByteBuffer buf = ByteBuffer.wrap(bytes); + + ChunkSegment[] segments = segmenter.segmentBuffers(new ByteBuffer[] {buf}, 0, 1, false); + assertThat(buf.remaining()).isEqualTo(4); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertThat(actual).isEqualTo(expected); + } + + /** + * + * + *
+   * Given 64 bytes, maxSegmentSize: 14, blockSize: 7
+   * 0                                                              64
+   * |---------------------------------------------------------------|
+   *   Produce 4 14-byte segments, and one 7 byte segment
+   * |-------------|-------------|-------------|-------------|------|
+   * 
+ */ + @Example + void allowUnalignedBlocks_false_2() throws Exception { + ChunkSegmenter segmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 14, 7); + + byte[] bytes = DataGenerator.base64Characters().genBytes(64); + List expected = + ImmutableList.of( + ByteString.copyFrom(bytes, 0, 14), + ByteString.copyFrom(bytes, 14, 14), + ByteString.copyFrom(bytes, 28, 14), + ByteString.copyFrom(bytes, 42, 14), + ByteString.copyFrom(bytes, 56, 7)); + + ByteBuffer buf = ByteBuffer.wrap(bytes); + + ChunkSegment[] segments = segmenter.segmentBuffers(new ByteBuffer[] {buf}, 0, 1, false); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertAll( + () -> assertThat(buf.remaining()).isEqualTo(1), + () -> assertThat(actual).isEqualTo(expected)); + } + + /** + * + * + *
+   * Given 60 bytes in one buffer and 4 bytes in a second buffer, maxSegmentSize: 14, blockSize: 7
+   * 0                                                          60   4
+   * |-----------------------------------------------------------|---|
+   *   Produce 4 14-byte segments, and one 7 byte segment
+   * |-------------|-------------|-------------|-------------|------|
+   * 
+ */ + @Example + void allowUnalignedBlocks_false_3() throws Exception { + ChunkSegmenter segmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 14, 7); + + byte[] bytes = DataGenerator.base64Characters().genBytes(64); + List expected = + ImmutableList.of( + ByteString.copyFrom(bytes, 0, 14), + ByteString.copyFrom(bytes, 14, 14), + ByteString.copyFrom(bytes, 28, 14), + ByteString.copyFrom(bytes, 42, 14), + ByteString.copyFrom(bytes, 56, 7)); + + ByteBuffer buf1 = ByteBuffer.wrap(bytes, 0, 60); + ByteBuffer buf2 = ByteBuffer.wrap(bytes, 60, 4); + + ChunkSegment[] segments = segmenter.segmentBuffers(new ByteBuffer[] {buf1, buf2}, 0, 2, false); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertAll( + () -> assertThat(buf1.remaining()).isEqualTo(0), + () -> assertThat(buf2.remaining()).isEqualTo(1), + () -> assertThat(actual).isEqualTo(expected)); + } + + @Example + void maxBytesToConsume_unaligned() throws Exception { + + ChecksummedTestContent ctc = ChecksummedTestContent.gen(64); + + ChunkSegmenter segmenter = new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 6, 3); + + List chunks = ctc.chunkup(4); + ByteBuffer[] buffers = + chunks.stream().map(ChecksummedTestContent::asByteBuffer).toArray(ByteBuffer[]::new); + buffers[1].position(1); + + ChecksummedTestContent slice = ctc.slice(5, 37); + List expected = + slice.chunkup(6).stream() + .map(ChecksummedTestContent::asByteBuffer) + .map(ByteStringStrategy.noCopy()) + .collect(Collectors.toList()); + + ChunkSegment[] segments = segmenter.segmentBuffers(buffers, 1, buffers.length - 2, true, 37); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertThat(actual).isEqualTo(expected); + } + + @Example + void maxBytesToConsume_aligned() throws Exception { + + ChecksummedTestContent ctc = ChecksummedTestContent.gen(64); + + ChunkSegmenter segmenter = new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 6, 3); + + List chunks = ctc.chunkup(4); + ByteBuffer[] buffers = + chunks.stream().map(ChecksummedTestContent::asByteBuffer).toArray(ByteBuffer[]::new); + buffers[1].position(1); + + ChecksummedTestContent slice = ctc.slice(5, 36); + List expected = + slice.chunkup(6).stream() + .map(ChecksummedTestContent::asByteBuffer) + .map(ByteStringStrategy.noCopy()) + .collect(Collectors.toList()); + + ChunkSegment[] segments = segmenter.segmentBuffers(buffers, 1, buffers.length - 2, false, 37); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertThat(actual).isEqualTo(expected); + } + + @Example + void alignedConsumeForLargeBuffersOnlyConsumesAligned() throws Exception { + + ChecksummedTestContent ctc = ChecksummedTestContent.gen(2048 + 13); + + ChunkSegmenter segmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.noCopy(), 2048, 256); + + ChecksummedTestContent slice = ctc.slice(0, 2048); + List expected = + slice.chunkup(2048).stream() + .map(ChecksummedTestContent::asByteBuffer) + .map(ByteStringStrategy.noCopy()) + .collect(Collectors.toList()); + + ByteBuffer buf = ctc.asByteBuffer(); + ChunkSegment[] segments = segmenter.segmentBuffers(new ByteBuffer[] {buf}, 0, 1, false); + List actual = + Arrays.stream(segments).map(ChunkSegment::getB).collect(Collectors.toList()); + assertThat(actual).isEqualTo(expected); + } + + @Provide("TestData") + static Arbitrary arbitraryTestData() { + return Arbitraries.lazyOf( + () -> + Arbitraries.lazyOf( + () -> + Arbitraries.integers() + .greaterOrEqual(1) + .lessOrEqual(8 * 1024 * 1024) + .withDistribution(RandomDistribution.uniform())) + .map(DataGenerator.base64Characters()::genBytes) + .array(byte[][].class) + .ofMinSize(0) + .ofMaxSize(10) + .withSizeDistribution(RandomDistribution.uniform())) + .map(TestData::create); + } + + static final class TestData { + private final int chunkSize; + private final long totalSize; + private final int expectedChunkCount; + private final byte[][] originalData; + private final ByteBuffer[] buffers; + private final HashCode allCrc32c; + + private TestData( + long totalSize, + int expectedChunkCount, + byte[][] originalData, + ByteBuffer[] buffers, + HashCode allCrc32c, + int chunkSize) { + this.totalSize = totalSize; + this.expectedChunkCount = expectedChunkCount; + this.originalData = originalData; + this.buffers = buffers; + this.allCrc32c = allCrc32c; + this.chunkSize = chunkSize; + } + + @Override + public String toString() { + return "TestData{" + + "chunkSize=" + + fmt(chunkSize) + + ", totalSize=" + + fmt(totalSize) + + ", expectedChunkCount=" + + fmt(expectedChunkCount) + + ", allCrc32c=" + + allCrc32c + + ", originalDataLengths=" + + Arrays.toString( + Arrays.stream(originalData).mapToInt(x -> x.length).mapToObj(TestData::fmt).toArray()) + + '}'; + } + + @SuppressWarnings("UnstableApiUsage") + static TestData create(byte[][] bs) { + long totalSize = 0; + HashCode allCrc32c; + int expectedChunkCount; + com.google.common.hash.Hasher hasher = Hashing.crc32c().newHasher(); + for (byte[] bb : bs) { + totalSize += bb.length; + hasher.putBytes(bb); + } + allCrc32c = hasher.hash(); + + int chunkSize = _2MiB; + expectedChunkCount = Math.toIntExact(totalSize / chunkSize); + if (totalSize % chunkSize != 0) { + expectedChunkCount++; + } + + ByteBuffer[] bbs = Arrays.stream(bs).map(ByteBuffer::wrap).toArray(ByteBuffer[]::new); + + return new TestData(totalSize, expectedChunkCount, bs, bbs, allCrc32c, chunkSize); + } + + static String fmt(int i) { + return String.format(Locale.US, "0x%08x", i); + } + + static String fmt(long i) { + return String.format(Locale.US, "0x%016x", i); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyRequestTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyRequestTest.java new file mode 100644 index 000000000000..381f18e15555 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyRequestTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Storage.PredefinedAcl.PUBLIC_READ; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.common.collect.ImmutableList; +import org.junit.Test; + +public class CopyRequestTest { + + private static final String SOURCE_BUCKET_NAME = "b0"; + private static final String SOURCE_BLOB_NAME = "o0"; + private static final String TARGET_BUCKET_NAME = "b1"; + private static final String TARGET_BLOB_NAME = "o1"; + private static final String TARGET_BLOB_CONTENT_TYPE = "contentType"; + private static final BlobId SOURCE_BLOB_ID = BlobId.of(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME); + private static final BlobId TARGET_BLOB_ID = BlobId.of(TARGET_BUCKET_NAME, TARGET_BLOB_NAME); + private static final BlobInfo TARGET_BLOB_INFO = + BlobInfo.newBuilder(TARGET_BLOB_ID).setContentType(TARGET_BLOB_CONTENT_TYPE).build(); + + @Test + public void testCopyRequest() { + Storage.CopyRequest copyRequest1 = + Storage.CopyRequest.newBuilder() + .setSource(SOURCE_BLOB_ID) + .setSourceOptions(BlobSourceOption.generationMatch(1)) + .setTarget(TARGET_BLOB_INFO, BlobTargetOption.predefinedAcl(PUBLIC_READ)) + .build(); + assertEquals(SOURCE_BLOB_ID, copyRequest1.getSource()); + assertEquals(1, copyRequest1.getSourceOptions().size()); + assertEquals(BlobSourceOption.generationMatch(1), copyRequest1.getSourceOptions().get(0)); + assertEquals(TARGET_BLOB_INFO, copyRequest1.getTarget()); + assertTrue(copyRequest1.overrideInfo()); + assertEquals(1, copyRequest1.getTargetOptions().size()); + assertEquals( + BlobTargetOption.predefinedAcl(PUBLIC_READ), copyRequest1.getTargetOptions().get(0)); + + Storage.CopyRequest copyRequest2 = + Storage.CopyRequest.newBuilder() + .setSource(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME) + .setTarget(TARGET_BLOB_ID) + .build(); + assertEquals(SOURCE_BLOB_ID, copyRequest2.getSource()); + assertEquals(BlobInfo.newBuilder(TARGET_BLOB_ID).build(), copyRequest2.getTarget()); + assertFalse(copyRequest2.overrideInfo()); + + Storage.CopyRequest copyRequest3 = + Storage.CopyRequest.newBuilder() + .setSource(SOURCE_BLOB_ID) + .setTarget( + TARGET_BLOB_INFO, ImmutableList.of(BlobTargetOption.predefinedAcl(PUBLIC_READ))) + .build(); + assertEquals(SOURCE_BLOB_ID, copyRequest3.getSource()); + assertEquals(TARGET_BLOB_INFO, copyRequest3.getTarget()); + assertTrue(copyRequest3.overrideInfo()); + assertEquals( + ImmutableList.of(BlobTargetOption.predefinedAcl(PUBLIC_READ)), + copyRequest3.getTargetOptions()); + } + + @Test + public void testCopyRequestOf() { + Storage.CopyRequest copyRequest1 = Storage.CopyRequest.of(SOURCE_BLOB_ID, TARGET_BLOB_INFO); + assertEquals(SOURCE_BLOB_ID, copyRequest1.getSource()); + assertEquals(TARGET_BLOB_INFO, copyRequest1.getTarget()); + assertTrue(copyRequest1.overrideInfo()); + + Storage.CopyRequest copyRequest2 = Storage.CopyRequest.of(SOURCE_BLOB_ID, TARGET_BLOB_NAME); + assertEquals(SOURCE_BLOB_ID, copyRequest2.getSource()); + assertEquals( + BlobInfo.newBuilder(BlobId.of(SOURCE_BUCKET_NAME, TARGET_BLOB_NAME)).build(), + copyRequest2.getTarget()); + assertFalse(copyRequest2.overrideInfo()); + + Storage.CopyRequest copyRequest3 = + Storage.CopyRequest.of(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME, TARGET_BLOB_INFO); + assertEquals(SOURCE_BLOB_ID, copyRequest3.getSource()); + assertEquals(TARGET_BLOB_INFO, copyRequest3.getTarget()); + assertTrue(copyRequest3.overrideInfo()); + + Storage.CopyRequest copyRequest4 = + Storage.CopyRequest.of(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME, TARGET_BLOB_NAME); + assertEquals(SOURCE_BLOB_ID, copyRequest4.getSource()); + assertEquals( + BlobInfo.newBuilder(BlobId.of(SOURCE_BUCKET_NAME, TARGET_BLOB_NAME)).build(), + copyRequest4.getTarget()); + assertFalse(copyRequest4.overrideInfo()); + + Storage.CopyRequest copyRequest5 = Storage.CopyRequest.of(SOURCE_BLOB_ID, TARGET_BLOB_ID); + assertEquals(SOURCE_BLOB_ID, copyRequest5.getSource()); + assertEquals(BlobInfo.newBuilder(TARGET_BLOB_ID).build(), copyRequest5.getTarget()); + assertFalse(copyRequest5.overrideInfo()); + + Storage.CopyRequest copyRequest6 = + Storage.CopyRequest.of(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME, TARGET_BLOB_ID); + assertEquals(SOURCE_BLOB_ID, copyRequest6.getSource()); + assertEquals(BlobInfo.newBuilder(TARGET_BLOB_ID).build(), copyRequest6.getTarget()); + assertFalse(copyRequest6.overrideInfo()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyWriterTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyWriterTest.java new file mode 100644 index 000000000000..dfcd8851ecfe --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CopyWriterTest.java @@ -0,0 +1,213 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.RestorableState; +import com.google.cloud.ServiceOptions; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.spi.StorageRpcFactory; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteRequest; +import com.google.cloud.storage.spi.v1.StorageRpc.RewriteResponse; +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class CopyWriterTest { + + private static final String SOURCE_BUCKET_NAME = "b"; + private static final String SOURCE_BLOB_NAME = "n"; + private static final String DESTINATION_BUCKET_NAME = "b1"; + private static final String DESTINATION_BLOB_NAME = "n1"; + private static final BlobId BLOB_ID = BlobId.of(SOURCE_BUCKET_NAME, SOURCE_BLOB_NAME); + private static final BlobInfo BLOB_INFO = + BlobInfo.newBuilder(DESTINATION_BUCKET_NAME, DESTINATION_BLOB_NAME).build(); + private static final BlobInfo RESULT_INFO = + BlobInfo.newBuilder(DESTINATION_BUCKET_NAME, DESTINATION_BLOB_NAME) + .setContentType("type") + .build(); + private static final Map EMPTY_OPTIONS = ImmutableMap.of(); + private static final RewriteRequest REQUEST_WITH_OBJECT = + new StorageRpc.RewriteRequest( + Conversions.json().blobId().encode(BLOB_ID), + EMPTY_OPTIONS, + true, + Conversions.json().blobInfo().encode(BLOB_INFO), + EMPTY_OPTIONS, + null); + private static final RewriteRequest REQUEST_WITHOUT_OBJECT = + new StorageRpc.RewriteRequest( + Conversions.json().blobId().encode(BLOB_ID), + EMPTY_OPTIONS, + false, + Conversions.json().blobInfo().encode(BLOB_INFO), + EMPTY_OPTIONS, + null); + private static final RewriteResponse RESPONSE_WITH_OBJECT = + new RewriteResponse(REQUEST_WITH_OBJECT, null, 42L, false, "token", 21L); + private static final RewriteResponse RESPONSE_WITHOUT_OBJECT = + new RewriteResponse(REQUEST_WITHOUT_OBJECT, null, 42L, false, "token", 21L); + private static final RewriteResponse RESPONSE_WITH_OBJECT_DONE = + new RewriteResponse( + REQUEST_WITH_OBJECT, + Conversions.json().blobInfo().encode(RESULT_INFO), + 42L, + true, + "token", + 42L); + private static final RewriteResponse RESPONSE_WITHOUT_OBJECT_DONE = + new RewriteResponse( + REQUEST_WITHOUT_OBJECT, + Conversions.json().blobInfo().encode(RESULT_INFO), + 42L, + true, + "token", + 42L); + + private HttpStorageOptions options; + private StorageRpcFactory rpcFactoryMock; + private StorageRpc storageRpcMock; + private CopyWriter copyWriter; + private Blob result; + + @Before + public void setUp() { + rpcFactoryMock = Mockito.mock(StorageRpcFactory.class); + storageRpcMock = Mockito.mock(StorageRpc.class); + when(rpcFactoryMock.create(any(StorageOptions.class))).thenReturn(storageRpcMock); + options = + HttpStorageOptions.newBuilder() + .setProjectId("projectid") + .setServiceRpcFactory(rpcFactoryMock) + .setRetrySettings(ServiceOptions.getNoRetrySettings()) + .build(); + result = new Blob(options.getService(), new BlobInfo.BuilderImpl(RESULT_INFO)); + verify(rpcFactoryMock).create(any(StorageOptions.class)); + } + + @Test + public void testRewriteWithObject() { + when(storageRpcMock.continueRewrite(RESPONSE_WITH_OBJECT)) + .thenReturn(RESPONSE_WITH_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITH_OBJECT, Retrier.attemptOnce()); + assertEquals(result, copyWriter.getResult()); + assertTrue(copyWriter.isDone()); + assertEquals(42L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + verify(storageRpcMock).continueRewrite(RESPONSE_WITH_OBJECT); + } + + @Test + public void testRewriteWithoutObject() { + when(storageRpcMock.continueRewrite(RESPONSE_WITHOUT_OBJECT)) + .thenReturn(RESPONSE_WITHOUT_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITHOUT_OBJECT, Retrier.attemptOnce()); + assertEquals(result, copyWriter.getResult()); + assertTrue(copyWriter.isDone()); + assertEquals(42L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + verify(storageRpcMock).continueRewrite(RESPONSE_WITHOUT_OBJECT); + } + + @Test + public void testRewriteWithObjectMultipleRequests() { + when(storageRpcMock.continueRewrite(RESPONSE_WITH_OBJECT)) + .thenReturn(RESPONSE_WITH_OBJECT, RESPONSE_WITHOUT_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITH_OBJECT, Retrier.attemptOnce()); + assertEquals(result, copyWriter.getResult()); + assertTrue(copyWriter.isDone()); + assertEquals(42L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + verify(storageRpcMock, times(2)).continueRewrite(RESPONSE_WITH_OBJECT); + } + + @Test + public void testRewriteWithoutObjectMultipleRequests() { + when(storageRpcMock.continueRewrite(RESPONSE_WITHOUT_OBJECT)) + .thenReturn(RESPONSE_WITHOUT_OBJECT, RESPONSE_WITHOUT_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITHOUT_OBJECT, Retrier.attemptOnce()); + assertEquals(result, copyWriter.getResult()); + assertTrue(copyWriter.isDone()); + assertEquals(42L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + verify(storageRpcMock, times(2)).continueRewrite(RESPONSE_WITHOUT_OBJECT); + } + + @Test + public void testSaveAndRestoreWithObject() { + when(storageRpcMock.continueRewrite(RESPONSE_WITH_OBJECT)) + .thenReturn(RESPONSE_WITH_OBJECT, RESPONSE_WITH_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITH_OBJECT, Retrier.attemptOnce()); + copyWriter.copyChunk(); + assertTrue(!copyWriter.isDone()); + assertEquals(21L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + RestorableState rewriterState = copyWriter.capture(); + CopyWriter restoredRewriter = rewriterState.restore(); + assertEquals(result, restoredRewriter.getResult()); + assertTrue(restoredRewriter.isDone()); + assertEquals(42L, restoredRewriter.getTotalBytesCopied()); + assertEquals(42L, restoredRewriter.getBlobSize()); + verify(storageRpcMock, times(2)).continueRewrite(RESPONSE_WITH_OBJECT); + } + + @Test + public void testSaveAndRestoreWithoutObject() { + when(storageRpcMock.continueRewrite(RESPONSE_WITHOUT_OBJECT)) + .thenReturn(RESPONSE_WITHOUT_OBJECT, RESPONSE_WITHOUT_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITHOUT_OBJECT, Retrier.attemptOnce()); + copyWriter.copyChunk(); + assertTrue(!copyWriter.isDone()); + assertEquals(21L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + RestorableState rewriterState = copyWriter.capture(); + CopyWriter restoredRewriter = rewriterState.restore(); + assertEquals(result, restoredRewriter.getResult()); + assertTrue(restoredRewriter.isDone()); + assertEquals(42L, restoredRewriter.getTotalBytesCopied()); + assertEquals(42L, restoredRewriter.getBlobSize()); + verify(storageRpcMock, times(2)).continueRewrite(RESPONSE_WITHOUT_OBJECT); + } + + @Test + public void testSaveAndRestoreWithResult() { + when(storageRpcMock.continueRewrite(RESPONSE_WITH_OBJECT)) + .thenReturn(RESPONSE_WITH_OBJECT_DONE); + copyWriter = new HttpCopyWriter(options, RESPONSE_WITH_OBJECT, Retrier.attemptOnce()); + copyWriter.copyChunk(); + assertEquals(result, copyWriter.getResult()); + assertTrue(copyWriter.isDone()); + assertEquals(42L, copyWriter.getTotalBytesCopied()); + assertEquals(42L, copyWriter.getBlobSize()); + RestorableState rewriterState = copyWriter.capture(); + CopyWriter restoredRewriter = rewriterState.restore(); + assertEquals(result, restoredRewriter.getResult()); + assertTrue(restoredRewriter.isDone()); + assertEquals(42L, restoredRewriter.getTotalBytesCopied()); + assertEquals(42L, restoredRewriter.getBlobSize()); + verify(storageRpcMock).continueRewrite(RESPONSE_WITH_OBJECT); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CorsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CorsTest.java new file mode 100644 index 000000000000..a60519257edd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/CorsTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.storage.Cors.Origin; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.junit.Test; + +public class CorsTest { + + @Test + public void testOrigin() { + assertEquals("bla", Origin.of("bla").getValue()); + assertEquals("http://host:8080", Origin.of("http", "host", 8080).toString()); + assertEquals(Origin.of("*"), Origin.any()); + } + + @Test + public void corsTest() { + List origins = ImmutableList.of(Origin.any(), Origin.of("o")); + List headers = ImmutableList.of("h1", "h2"); + List methods = ImmutableList.of(HttpMethod.GET); + Cors cors = + Cors.newBuilder() + .setMaxAgeSeconds(100) + .setOrigins(origins) + .setResponseHeaders(headers) + .setMethods(methods) + .build(); + + assertEquals(Integer.valueOf(100), cors.getMaxAgeSeconds()); + assertEquals(origins, cors.getOrigins()); + assertEquals(methods, cors.getMethods()); + assertEquals(headers, cors.getResponseHeaders()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityPropertyTest.java new file mode 100644 index 000000000000..23d303208588 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityPropertyTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.common.hash.Hashing; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; + +public class Crc32cUtilityPropertyTest { + @Property + public void testCrc32cCombinePropertyTest( + @ForAll byte[] firstObject, @ForAll byte[] secondObject) { + int firstPartHash = Hashing.crc32c().hashBytes(firstObject).asInt(); + int secondPartHash = Hashing.crc32c().hashBytes(secondObject).asInt(); + int expected = + Hashing.crc32c().newHasher().putBytes(firstObject).putBytes(secondObject).hash().asInt(); + int actual = Crc32cUtility.concatCrc32c(firstPartHash, secondPartHash, secondObject.length); + assertThat(actual).isEqualTo(expected); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityTest.java new file mode 100644 index 000000000000..7906606e29ec --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cUtilityTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import com.google.common.hash.Hashing; +import org.junit.Assert; +import org.junit.Test; + +public class Crc32cUtilityTest { + @Test + public void testCrc32cCombine() { + // crc32c("helloworld") -> 0x3D082299 + int expected = 0x56CBB480; + // crc32c("hello") -> 0x9A71BB4C + int object1_hash = 0x9A71BB4C; + // crc32c("world") -> 0x31AA814E + int object2_hash = 0x31AA814E; + // length("world") -> 5 + int object2_size = 5; + int combined = Crc32cUtility.concatCrc32c(object1_hash, object2_hash, object2_size); + Assert.assertEquals(expected, combined); + } + + @Test + public void testCrc32cCombineGuavaValues() { + final String hello = "hello"; + final String world = "world"; + final String helloWorld = hello + world; + int expected = Hashing.crc32c().hashBytes(helloWorld.getBytes()).asInt(); + int object1Hash = Hashing.crc32c().hashBytes(hello.getBytes()).asInt(); + int object2Hash = Hashing.crc32c().hashBytes(world.getBytes()).asInt(); + int combined = Crc32cUtility.concatCrc32c(object1Hash, object2Hash, world.length()); + Assert.assertEquals(expected, combined); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cValueTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cValueTest.java new file mode 100644 index 000000000000..1565e926c6f3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/Crc32cValueTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthUnknown; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import net.jqwik.api.Example; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class Crc32cValueTest { + + @Example + public void ensureConcatenationOfTwoValuesOnlyWorksWhenTheLengthIsKnownForRightHandSide() { + Crc32cValue.of(1).concat(Crc32cValue.of(2, 1L)); + } + + @Example + public void ensureConcatSatisfiesTheLeftDistributedProperty() { + HashFunction f = Hashing.crc32c(); + + int expected = + f.hashBytes( + new byte[] { + 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f + }) + .asInt(); + int a = f.hashBytes(new byte[] {0x00, 0x01, 0x02, 0x03}).asInt(); + int b = f.hashBytes(new byte[] {0x04, 0x05, 0x06, 0x07}).asInt(); + int c = f.hashBytes(new byte[] {0x08, 0x09, 0x0a, 0x0b}).asInt(); + int d = f.hashBytes(new byte[] {0x0c, 0x0d, 0x0e, 0x0f}).asInt(); + + Crc32cLengthUnknown A = Crc32cValue.of(a); + Crc32cLengthKnown B = Crc32cValue.of(b, 4); + Crc32cLengthKnown C = Crc32cValue.of(c, 4); + Crc32cLengthKnown D = Crc32cValue.of(d, 4); + + Crc32cLengthKnown BC = B.concat(C); + Crc32cLengthKnown BCD = BC.concat(D); + Crc32cLengthUnknown ABCD = A.concat(BCD); + + Crc32cValue chain = A.concat(B).concat(C).concat(D); + Crc32cValue nesting = A.concat(B.concat(C.concat(D))); + Crc32cValue mixed = A.concat(B.concat(C)).concat(D); + + assertThat(ABCD.getValue()).isEqualTo(expected); + assertThat(chain.getValue()).isEqualTo(expected); + assertThat(nesting.getValue()).isEqualTo(expected); + assertThat(mixed.getValue()).isEqualTo(expected); + } + + @Example + void zeroDoesNotTransform() { + Crc32cLengthKnown base = + Hasher.enabled().hash(DataGenerator.base64Characters().genByteBuffer(64)); + + assertThat(base.concat(Crc32cValue.zero())).isSameInstanceAs(base); + assertThat(Crc32cValue.zero().concat(base)).isSameInstanceAs(base); + } + + @Example + void nullSafeConcat_isAlwaysNull() { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(2 * 1024 * 1024)); + + Crc32cLengthKnown actual = + testContent.chunkup(373).stream() + .map(Crc32cValueTest::toCrc32cValue) + .reduce(null, Hasher.enabled()::nullSafeConcat); + + assertThat(actual).isNull(); + } + + private static @NonNull Crc32cLengthKnown toCrc32cValue(ChecksummedTestContent testContent) { + return Crc32cValue.of(testContent.getCrc32c(), testContent.getBytes().length); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataChain.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataChain.java new file mode 100644 index 000000000000..878aebe752ab --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataChain.java @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.UnmodifiableIterator; +import java.nio.ByteBuffer; +import java.util.Iterator; + +public abstract class DataChain implements Iterable { + + private DataChain() {} + + public abstract long getTotalCapacity(); + + public abstract Iterator splice(long objectSize); + + static DataChain ofByteBuffers(ImmutableList buffers) { + return new ByteBufferChain(buffers); + } + + static final class ByteBufferChain extends DataChain { + private final long totalCapacity; + private final ImmutableList buffers; + + public ByteBufferChain(ImmutableList buffers) { + this.buffers = buffers; + this.totalCapacity = buffers.stream().mapToLong(ByteBuffer::capacity).sum(); + } + + @Override + public long getTotalCapacity() { + return totalCapacity; + } + + @Override + public Iterator splice(long objectSize) { + return new BufferSplicer(objectSize, buffers.iterator()); + } + + @Override + public Iterator iterator() { + return splice(totalCapacity); + } + + static final class BufferSplicer implements Iterator { + private final Iterator iter; + private final long limit; + + private ByteBuffer now; + private long overallPosition; + + BufferSplicer(long limit, UnmodifiableIterator iterator) { + this.limit = limit; + iter = iterator; + } + + @Override + public boolean hasNext() { + return remaining() > 0 && iter.hasNext(); + } + + @Override + public ByteBuffer next() { + if (now == null) { + checkState(hasNext()); + now = iter.next(); + } + ByteBuffer sub = now.asReadOnlyBuffer(); + int subLimit = + Math.toIntExact(Math.min(sub.remaining(), Math.min(Integer.MAX_VALUE, remaining()))); + sub.limit(subLimit); + overallPosition += subLimit; + return sub; + } + + private long remaining() { + return limit - overallPosition; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataGenerator.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataGenerator.java new file mode 100644 index 000000000000..03b70f602ad6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DataGenerator.java @@ -0,0 +1,255 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Path; +import java.util.Locale; +import java.util.Random; +import java.util.stream.IntStream; + +public abstract class DataGenerator { + + private DataGenerator() {} + + public final ByteBuffer genByteBuffer(int capacity) { + Preconditions.checkArgument(capacity > 0, "capacity must be > 0"); + ByteBuffer bb = ByteBuffer.allocate(capacity); + fill(bb); + bb.position(0); + return bb; + } + + public final ByteBuffer genByteBuffer(long capacity) { + return genByteBuffer(Math.toIntExact(capacity)); + } + + public final byte[] genBytes(int capacity) { + Preconditions.checkArgument(capacity > 0, "capacity must be > 0"); + byte[] bytes = new byte[capacity]; + fill(bytes, 0, capacity); + return bytes; + } + + public final byte[] genBytes(long capacity) { + return genBytes(Math.toIntExact(capacity)); + } + + public abstract void fill(ByteBuffer buf); + + public abstract void fill(byte[] b, int offset, int length); + + public final DataChain dataChainOfByteBuffers( + long totalCapacity, int segmentCapacity) { + final int segmentCount; + if (totalCapacity % segmentCapacity == 0) { + segmentCount = Math.toIntExact(totalCapacity / segmentCapacity); + } else { + segmentCount = Math.toIntExact(totalCapacity / segmentCapacity) + 1; + } + ImmutableList buffers; + if (segmentCount > 1) { + buffers = + IntStream.range(0, segmentCount) + .mapToObj(i -> genByteBuffer(segmentCapacity)) + .collect(ImmutableList.toImmutableList()); + } else { + buffers = ImmutableList.of(genByteBuffer(totalCapacity)); + } + return DataChain.ofByteBuffers(buffers); + } + + public final TmpFile tempFile(Path baseDir, long size) throws IOException { + requireNonNull(baseDir, "baseDir must be non null"); + checkState(size > 0, "size must be > 0"); + TmpFile bin = TmpFile.of(baseDir, String.format(Locale.US, "%015d-", size), ".bin"); + ReadableByteChannel src = + new ReadableByteChannel() { + long read = 0; + + @Override + public int read(ByteBuffer dst) throws IOException { + long remaining = size - read; + if (remaining <= 0) { + return -1; + } + int toRead = Math.toIntExact(Math.min(dst.remaining(), remaining)); + byte[] bytes = genBytes(toRead); + dst.put(bytes); + read += toRead; + return toRead; + } + + @Override + public boolean isOpen() { + return read < size; + } + + @Override + public void close() throws IOException {} + }; + + try (SeekableByteChannel dest = bin.writer()) { + ByteStreams.copy(src, dest); + } + + return bin; + } + + public static DataGenerator base64Characters() { + return new Base64CharactersDataGenerator(); + } + + public static DataGenerator rand(Random rand) { + return new RandomDataGenerator(rand); + } + + private static final class Base64CharactersDataGenerator extends DataGenerator { + private static final byte[] base64Characters = + new byte[] { + // A-Z + 0x41, + 0x42, + 0x43, + 0x44, + 0x45, + 0x46, + 0x47, + 0x48, + 0x49, + 0x4a, + 0x4b, + 0x4c, + 0x4d, + 0x4e, + 0x4f, + 0x50, + 0x51, + 0x52, + 0x53, + 0x54, + 0x55, + 0x56, + 0x57, + 0x58, + 0x59, + 0x5a, + // a-z + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0x73, + 0x74, + 0x75, + 0x76, + 0x77, + 0x78, + 0x79, + 0x7a, + // 0-9 + 0x30, + 0x31, + 0x32, + 0x33, + 0x34, + 0x35, + 0x36, + 0x37, + 0x38, + 0x39, + // +, / + 0x2b, + 0x2f, + }; + + @Override + public void fill(ByteBuffer buf) { + int rem; + while ((rem = buf.remaining()) > 0) { + buf.put(base64Characters, 0, Math.min(rem, base64Characters.length)); + } + } + + @Override + public void fill(byte[] b, int offset, int length) { + int curr = offset; + int rem = length; + do { + int min = Math.min(rem, base64Characters.length); + System.arraycopy(base64Characters, 0, b, curr, min); + curr += min; + rem -= min; + } while (rem > 0); + } + } + + private static final class RandomDataGenerator extends DataGenerator { + private final Random rand; + + private RandomDataGenerator(Random rand) { + this.rand = rand; + } + + @Override + public void fill(ByteBuffer b) { + while (b.position() < b.limit()) { + byte b1 = (byte) rand.nextInt(Byte.MAX_VALUE); + if (b1 == 0x00) { // exclude nul sequence + continue; + } + b.put(b1); + } + } + + @Override + public void fill(byte[] b, int offset, int length) { + int i = 0; + while (i < length) { + byte b1 = (byte) rand.nextInt(Byte.MAX_VALUE); + if (b1 == 0x00) { // exclude nul sequence + continue; + } + b[offset + i] = b1; + i++; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DateTimeCodecPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DateTimeCodecPropertyTest.java new file mode 100644 index 000000000000..71ab929d4f6d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DateTimeCodecPropertyTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.RFC_3339_DATE_TIME_FORMATTER; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.util.DateTime; +import com.google.cloud.storage.Conversions.Codec; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoUnit; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.time.api.DateTimes; + +final class DateTimeCodecPropertyTest { + + private static final Codec codec = Utils.dateTimeCodec; + + @Example + void codecShouldRoundTrip_UTC() { + codecShouldRoundTrip("2019-08-23T07:23:51.396Z"); + } + + @Example + void codecShouldRoundTrip_negative() { + codecShouldRoundTrip("2019-08-23T07:23:51.396-08:59"); + } + + @Example + void codecShouldRoundTrip_positive() { + codecShouldRoundTrip("2019-08-23T07:23:51.396+00:13"); + } + + @Property(tries = 10000) + void codecShouldRoundTrip(@ForAll("rfc3339") String rfc3339String) { + DateTime actual = new DateTime(rfc3339String); + OffsetDateTime odt = codec.decode(actual); + DateTime dt = codec.encode(odt); + + assertThat(dt.toStringRfc3339()).isEqualTo(rfc3339String); + } + + @Provide("rfc3339") + Arbitrary arbitraryRfc3339Strings() { + return Combinators.combine(DateTimes.offsetDateTimes(), Arbitraries.integers().between(0, 999)) + .as((odt, millis) -> odt.plus(millis, ChronoUnit.MILLIS)) + .map(DateTimeCodecPropertyTest::offsetDateTimeToString); + } + + private static String offsetDateTimeToString(OffsetDateTime odt) { + return odt.format(RFC_3339_DATE_TIME_FORMATTER); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedReadableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedReadableByteChannelTest.java new file mode 100644 index 000000000000..6ecdf2072d9f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedReadableByteChannelTest.java @@ -0,0 +1,234 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ChunkSegmenterTest.TestData.fmt; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Streams; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.util.Locale; +import java.util.stream.IntStream; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class DefaultBufferedReadableByteChannelTest { + + @Example + void edgeCases() { + JqwikTest.report(TypeUsage.of(ReadOps.class), arbitraryReadOps()); + } + + @Example + void _61bytes_should_read_16_16_16_13_whenRead16AtATime() throws IOException { + byte[] bytes = DataGenerator.base64Characters().genBytes(61); + + ByteBuffer buf = ByteBuffer.allocate(16); + BufferHandle buffer = BufferHandle.allocate(3); + try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + UnbufferedReadableByteChannelAdapter adapter = + new UnbufferedReadableByteChannelAdapter(Channels.newChannel(bais)); + BufferedReadableByteChannel c = new DefaultBufferedReadableByteChannel(buffer, adapter)) { + int read1 = c.read(buf); + assertThat(buffer.remaining()).isEqualTo(1); + assertThat(read1).isEqualTo(16); + buf.clear(); + int read2 = c.read(buf); + assertThat(buffer.remaining()).isEqualTo(2); + assertThat(read2).isEqualTo(16); + buf.clear(); + int read3 = c.read(buf); + assertThat(buffer.remaining()).isEqualTo(3); + assertThat(read3).isEqualTo(16); + buf.clear(); + int read4 = c.read(buf); + assertThat(buffer.remaining()).isEqualTo(3); + assertThat(read4).isEqualTo(13); + } + } + + @Property + void bufferingOnlyRequiresExpectedReads(@ForAll("ReadOps") ReadOps readOps) throws IOException { + if (CIUtils.verbose()) { + System.out.println("readOps = " + readOps); + } + byte[] bytes = readOps.bytes; + + ByteBuffer buf = ByteBuffer.allocate(readOps.readSize); + BufferHandle buffer = BufferHandle.allocate(readOps.bufferSize); + try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + UnbufferedReadableByteChannelAdapter adapter = + new UnbufferedReadableByteChannelAdapter(Channels.newChannel(bais)); + BufferedReadableByteChannel c = new DefaultBufferedReadableByteChannel(buffer, adapter)) { + ImmutableList expectedReadSizes = readOps.expectedReadSizes; + int maxReadIdx = expectedReadSizes.size() - 1; + + int i = 0; + int read; + // repeatedly read from `c` until we read EOF + while ((read = c.read(buf)) != -1) { + // ensure the read that just succeeded matches the expected size + if (i <= maxReadIdx) { + int expectedReadSize = expectedReadSizes.get(i); + assertThat(read).isEqualTo(expectedReadSize); + buf.clear(); + } else { + assertWithMessage("More reads than expected").that(i).isAtMost(maxReadIdx); + } + i++; + } + assertThrows(ClosedChannelException.class, () -> c.read(null)); + } + } + + @Provide("ReadOps") + static Arbitrary arbitraryReadOps() { + return Combinators.combine( + Arbitraries.integers().between(1, 256 * 1024), + Arbitraries.integers().between(1, 16 * 1024), + Arbitraries.integers().between(1, 64 * 1024)) + .as(ReadOps::of); + } + + private static final class ReadOps { + private final byte[] bytes; + private final int bufferSize; + private final int readSize; + + private final ImmutableList expectedReadSizes; + private final String dbgExpectedReadSizes; + + ReadOps(byte[] bytes, int bufferSize, int readSize) { + this.bytes = bytes; + this.bufferSize = bufferSize; + this.readSize = readSize; + + int fullReadCount = bytes.length / readSize; + int remainingRead = (bytes.length - fullReadCount * readSize); + expectedReadSizes = + Streams.concat( + IntStream.range(0, fullReadCount).map(i -> readSize), + IntStream.of(remainingRead).filter(i -> i > 0)) + .boxed() + .collect(ImmutableList.toImmutableList()); + + if (fullReadCount > 0 && remainingRead > 0) { + dbgExpectedReadSizes = + String.format(Locale.US, "[%s * %d, %s]", readSize, fullReadCount, remainingRead); + } else if (remainingRead > 0) { + dbgExpectedReadSizes = String.format(Locale.US, "[%s]", remainingRead); + } else { + dbgExpectedReadSizes = String.format(Locale.US, "[%s * %d]", readSize, fullReadCount); + } + } + + @Override + public String toString() { + return "ReadOps{" + + "bytes.length=" + + fmt(bytes.length) + + ", bufferSize=" + + fmt(bufferSize) + + ", readSize=" + + fmt(readSize) + + ", expectedReadSizes=" + + dbgExpectedReadSizes + + '}'; + } + + @NonNull + static ReadOps of(int byteSize, int bufferSize, int readSize) { + byte[] bytes = DataGenerator.base64Characters().genBytes(byteSize); + return new ReadOps(bytes, bufferSize, readSize); + } + } + + /** + * Adapter to make any {@link ReadableByteChannel} into an {@link UnbufferedReadableByteChannel} + */ + private static final class UnbufferedReadableByteChannelAdapter + implements UnbufferedReadableByteChannel { + + private final ReadableByteChannel c; + + private UnbufferedReadableByteChannelAdapter(ReadableByteChannel c) { + this.c = c; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return Math.toIntExact(read(new ByteBuffer[] {dst}, 0, 1)); + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (!c.isOpen()) { + return -1; + } + + long totalBytesRead = 0; + for (int i = offset; i < length; i++) { + ByteBuffer dst = dsts[i]; + if (dst.hasRemaining()) { + int read = c.read(dst); + if (read == -1) { + if (totalBytesRead == 0) { + c.close(); + return -1; + } else { + break; + } + } + totalBytesRead += read; + } + } + return totalBytesRead; + } + + @Override + public boolean isOpen() { + return c.isOpen(); + } + + @Override + public void close() throws IOException { + c.close(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..954d2917fd53 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java @@ -0,0 +1,912 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ChunkSegmenterTest.TestData.fmt; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.MinFlushBufferedWritableByteChannelTest.OnlyConsumeNBytes; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.WritableByteChannel; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class DefaultBufferedWritableByteChannelTest { + + @Example + void edgeCases() { + JqwikTest.report(TypeUsage.of(WriteOps.class), arbitraryWriteOps()); + } + + @Property + void bufferingEagerlyFlushesWhenFull(@ForAll("WriteOps") WriteOps writeOps) throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(writeOps.bufferSize); + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.handleOf(buffer)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new DefaultBufferedWritableByteChannel(handle, adapter)) { + + List actualWriteSizes = new ArrayList<>(); + + for (ByteBuffer buf : writeOps.writes) { + int write = c.write(buf); + actualWriteSizes.add(write); + } + + c.close(); + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected write size") + .that(actualWriteSizes) + .isEqualTo(writeOps.writeSizes); + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(writeOps.expectedFlushes); + assertThat(baos.toByteArray()).isEqualTo(writeOps.bytes); + } + } + + /** + * Scenario A: + * + *

Data size, and write size are smaller than buffer size + */ + @Example + void scenario_a() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(1, 2, 1)); + } + + /** Scenario B: Data size and buffer size are equal, while write size may be larger than both */ + @Example + void scenario_b() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(1, 1, 2)); + } + + /** + * Scenario C: + * + *

    + *
  • data size is evenly divisible by buffer size and write size + *
  • buffer size is larger than write size + *
  • buffer size is not evenly divisible by write size + *
+ */ + @Example + void scenario_c() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(105, 15, 7)); + } + + /** + * Scenario D: + * + *
    + *
  • write and buffer size are smaller than data + *
  • data size is not evenly divisible by either write size nor buffer size + *
  • buffer size is smaller than write size + *
  • write size is not evenly divisible by buffer size + *
+ */ + @Example + void scenario_d() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(61, 3, 16)); + } + + /** + * Scenario E: + * + *

Some flushes are only partially consumed. Ensure we proceed with consuming the buffer + * provided to {@code write} + * + *

+   *           0                        27
+   * data:    |--------------------------|
+   *               5       14 17        27
+   * writes:  |----|--------|--|---------|
+   *                   10
+   * flush 1: |---------|
+   *            2        12
+   * flush 2:   |---------|
+   *                     12        22
+   * flush 3:             |---------|
+   *                              19    27
+   * flush 4:                     |------|
+   * 
+ */ + @Example + void partialFlushOfEnqueuedBytesFlushesMultipleTimes() throws IOException { + ByteBuffer data1 = DataGenerator.base64Characters().genByteBuffer(5); + ByteBuffer data2 = DataGenerator.base64Characters().genByteBuffer(9); + ByteBuffer data3 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data4 = DataGenerator.base64Characters().genByteBuffer(10); + + ImmutableList buffers = ImmutableList.of(data1, data2, data3, data4); + + int allDataSize = buffers.stream().mapToInt(ByteBuffer::remaining).sum(); + byte[] allData = + buffers.stream().reduce(ByteBuffer.allocate(allDataSize), ByteBuffer::put).array(); + buffers.forEach(b -> b.position(0)); + + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.allocate(10)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new DefaultBufferedWritableByteChannel(handle, adapter)) { + + c.write(data1); // write 5 bytes, which should enqueue in full + // before the next write, limit the number of bytes the underlying channel will consume to 2. + adapter.nextWriteMaxConsumptionLimit = 2L; + // write 9 bytes, which should trigger a flush - limited to 2 bytes, leaving 3 bytes in the + // buffer and not consuming any of the 9 bytes. Since 3 + 9 is still larger than our buffer + // attempt another flush of 10 bytes which will all be consumed. Enqueue the remaining 2 + // bytes from data2. + c.write(data2); + + // write 3 bytes, which should enqueue in full, leaving the buffer with 5 bytes enqueued + c.write(data3); + // before the next write, limit the number of bytes the underlying channel will consume to 7. + adapter.nextWriteMaxConsumptionLimit = 7L; + // write 10 bytes, which should trigger a flush - limited to 7 bytes, consuming all of the + // buffer, but only consuming 2 bytes written data. The remaining 8 bytes should be + // enqueued in full. + c.write(data4); + + // close the channel, causing a flush of the 8 outstanding bytes in buffer. + c.close(); + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(ImmutableList.of(2L, 12L, 19L, 27L)); + assertThat(baos.toByteArray()).isEqualTo(allData); + } + } + + /** + * Ensure manually calling flush works. + * + *
+   *           0         12
+   * data:    |-----------|
+   *             3  6  9
+   * writes:  |--|--|--|--|
+   *             3
+   * flush 1: |--|
+   *             3  6
+   * flush 2:    |--|
+   *               5   10
+   * flush 3:      |----|
+   *                   10 12
+   * flush 4:           |-|
+   * 
+ */ + @Example + void manualFlushingIsAccurate() throws IOException { + ByteBuffer data1 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data2 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data3 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data4 = DataGenerator.base64Characters().genByteBuffer(3); + + ImmutableList buffers = ImmutableList.of(data1, data2, data3, data4); + + int allDataSize = buffers.stream().mapToInt(ByteBuffer::remaining).sum(); + byte[] allData = + buffers.stream().reduce(ByteBuffer.allocate(allDataSize), ByteBuffer::put).array(); + buffers.forEach(b -> b.position(0)); + + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.allocate(5)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new DefaultBufferedWritableByteChannel(handle, adapter)) { + + c.write(data1); // write 3 bytes, which should enqueue in full + c.flush(); // flush all enqueued bytes + + c.write(data2); // write 3 bytes, which should enqueue in full + // before we call flush, limit how many bytes the underlying channel will consume to 2. + // This should leave 1 byte in the buffer + adapter.nextWriteMaxConsumptionLimit = 2L; + c.flush(); // attempt to flush all enqueued bytes, however only 2 of the 3 will be consumed + c.write(data3); // write 3 bytes, which should enqueue in full + // after this write, our buffer should contain 4 bytes of its 5 byte capacity + // on the next write, 5 bytes should be flushed. 4 from the buffer, 1 from the written data + c.write(data4); // 1 of the 3 bytes will be flushed, leaving 2 bytes in the buffer + + c.close(); // close the channel, which should flush the 2 outstanding buffered bytes + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(ImmutableList.of(3L, 5L, 6L, 11L, 12L)); + assertThat(baos.toByteArray()).isEqualTo(allData); + } + } + + @Provide("WriteOps") + static Arbitrary arbitraryWriteOps() { + return Combinators.combine( + Arbitraries.integers().between(1, 256 * 1024), + Arbitraries.integers().between(1, 16 * 1024), + Arbitraries.integers().between(1, 64 * 1024)) + .as(WriteOps::of); + } + + /** + * + * + *
+   *           0                                                                                                     105
+   * data:    |--------------------------------------------------------------------------------------------------------|
+   *                 7     14     21     28     35     42     49     56     63     70     77     84     91     98    105
+   * writes:  |------|------|------|------|------|------|------|------|------|------|------|------|------|------|------|
+   *                        15             30             45             60             75             90            105
+   * flushes: |--------------|--------------|--------------|--------------|--------------|--------------|--------------|
+   * 
+ */ + @Example + void writeOpsOfGeneratesAccurately_1() { + int dataSize = 105; + int bufferSize = 15; + int writeSize = 7; + + byte[] bytes = DataGenerator.base64Characters().genBytes(dataSize); + ImmutableList writes = + ImmutableList.of( + ByteBuffer.wrap(bytes, 0, writeSize), + ByteBuffer.wrap(bytes, 7, writeSize), + ByteBuffer.wrap(bytes, 14, writeSize), + ByteBuffer.wrap(bytes, 21, writeSize), + ByteBuffer.wrap(bytes, 28, writeSize), + ByteBuffer.wrap(bytes, 35, writeSize), + ByteBuffer.wrap(bytes, 42, writeSize), + ByteBuffer.wrap(bytes, 49, writeSize), + ByteBuffer.wrap(bytes, 56, writeSize), + ByteBuffer.wrap(bytes, 63, writeSize), + ByteBuffer.wrap(bytes, 70, writeSize), + ByteBuffer.wrap(bytes, 77, writeSize), + ByteBuffer.wrap(bytes, 84, writeSize), + ByteBuffer.wrap(bytes, 91, writeSize), + ByteBuffer.wrap(bytes, 98, writeSize)); + ImmutableList flushes = ImmutableList.of(15L, 30L, 45L, 60L, 75L, 90L, 105L); + String z = "[0x00000007 * 0x0000000f]"; + WriteOps expected = new WriteOps(bytes, bufferSize, writeSize, writes, flushes, z); + assertThat(WriteOps.of(dataSize, bufferSize, writeSize)).isEqualTo(expected); + } + + /** + * + * + *
+   *           0                                                          61
+   * data:    |------------------------------------------------------------|
+   *                         16         (16) 32         (16) 48      (13) 61
+   * writes:  |---------------|---------------|---------------|------------|
+   *             3  6  9 12 15 18 21 24 27 30 33 36 39 42 45 48 51 54 57 60
+   * flushes: |--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--||
+   * 
+ */ + @Example + void writeOpsOfGeneratesAccurately_2() { + int dataSize = 61; + int bufferSize = 3; + int writeSize = 16; + byte[] bytes = DataGenerator.base64Characters().genBytes(dataSize); + ImmutableList writes = + ImmutableList.of( + ByteBuffer.wrap(bytes, 0, writeSize), + ByteBuffer.wrap(bytes, 16, writeSize), + ByteBuffer.wrap(bytes, 32, writeSize), + ByteBuffer.wrap(bytes, 48, 13)); + ImmutableList flushes = + ImmutableList.of( + 3L, 6L, 9L, 12L, 15L, 18L, 21L, 24L, 27L, 30L, 33L, 36L, 39L, 42L, 45L, 48L, 51L, 54L, + 57L, 60L, 61L); + String z = "[0x00000010 * 0x00000003, 0x0000000d]"; + WriteOps expected = new WriteOps(bytes, bufferSize, writeSize, writes, flushes, z); + WriteOps actual = WriteOps.of(dataSize, bufferSize, writeSize); + assertThat(actual).isEqualTo(expected); + } + + @Example + @SuppressWarnings("JUnit5AssertionsConverter") + void callingCloseWithBufferedDataShouldCallWriteAndClose() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + AtomicBoolean closed = new AtomicBoolean(false); + UnbufferedWritableByteChannel delegate = + new UnbufferedWritableByteChannel() { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + fail("unexpected write(ByteBuffer[], int, int) call"); + return 0; + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long total = 0; + try (WritableByteChannel out = Channels.newChannel(baos)) { + for (ByteBuffer src : srcs) { + total += out.write(src); + } + } + closed.compareAndSet(false, true); + return total; + } + + @Override + public boolean isOpen() { + return !closed.get(); + } + + @Override + public void close() throws IOException { + fail("unexpected close() call"); + } + }; + DefaultBufferedWritableByteChannel test = + new DefaultBufferedWritableByteChannel(BufferHandle.allocate(20), delegate); + + byte[] bytes = DataGenerator.base64Characters().genBytes(10); + String expected = xxd(bytes); + + int write = test.write(ByteBuffer.wrap(bytes)); + assertThat(write).isEqualTo(10); + + assertThat(closed.get()).isFalse(); + + test.close(); + + String actual = xxd(baos.toByteArray()); + assertThat(actual).isEqualTo(expected); + assertThat(closed.get()).isTrue(); + } + + @Example + void nonBlockingWrite0DoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(0, 1), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_8_3 = ByteBuffer.wrap(all.slice(0, 3).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + + int written3 = c.write(s_8_3); + assertThat(written3).isEqualTo(0); + assertThat(s_8_3.remaining()).isEqualTo(3); + + assertThat(handle.remaining()).isEqualTo(1); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withoutBuffering() throws IOException { + BufferHandle handle = BufferHandle.allocate(4); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(4, 4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(4); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + + // Attempt to write 4 bytes, but 0 will be consumed, break out without consuming any + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(4); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withoutBuffering_oversized() throws IOException { + BufferHandle handle = BufferHandle.allocate(2); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(4, 2); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + + // Attempt to write 4 bytes, but 0 will be consumed, break out without consuming any + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withBuffering() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(5, 5); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + ByteBuffer s_8_12 = ByteBuffer.wrap(all.slice(8, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(0); + + // + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(4); + assertThat(s_4_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(5); + + int written3 = c.write(s_8_12); + assertThat(written3).isEqualTo(0); + assertThat(s_8_12.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(5); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withBuffering_oversized() throws IOException { + BufferHandle handle = BufferHandle.allocate(3); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(6, 3); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + ByteBuffer s_8_12 = ByteBuffer.wrap(all.slice(8, 4).getBytes()); + + // slice 3 bytes and consume them, then enqueue the remaining 1 byte + int written1_1 = c.write(s_0_4); + assertThat(written1_1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(3); + + // write 1 buffered byte and 2 sliced bytes, enqueue 2 remaining + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(4); + assertThat(s_4_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(6); + + // attempt to write 4 bytes, non will be consumed and the buffer should remain the same + int written3 = c.write(s_8_12); + assertThat(written3).isEqualTo(0); + assertThat(s_8_12.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(6); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_eqBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows(IllegalStateException.class, () -> c.write(all.slice(0, 4).asByteBuffer())); + ise.printStackTrace(System.out); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_gtBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows(IllegalStateException.class, () -> c.write(all.slice(0, 5).asByteBuffer())); + ise.printStackTrace(System.out); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_ltBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows( + IllegalStateException.class, + () -> { + int written1 = c.write(all.slice(0, 3).asByteBuffer()); + assertThat(written1).isEqualTo(3); + c.write(all.slice(3, 3).asByteBuffer()); + fail("should have errored in previous write call"); + }); + ise.printStackTrace(System.out); + } + + @Example + void test() { + illegalStateExceptionIfWrittenLt0_slice_eqBuffer(); + illegalStateExceptionIfWrittenLt0_slice_gtBuffer(); + illegalStateExceptionIfWrittenLt0_slice_ltBuffer(); + } + + @Property + void bufferAllocationShouldOnlyHappenWhenNeeded(@ForAll("BufferSizes") WriteOps writeOps) + throws IOException { + AuditingBufferHandle handle = + new AuditingBufferHandle(BufferHandle.allocate(writeOps.bufferSize)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new DefaultBufferedWritableByteChannel(handle, adapter)) { + + for (ByteBuffer buf : writeOps.writes) { + c.write(buf); + } + } + + // if our write size is evenly divisible by our buffer size AND our buffer size is smaller + // than the total data size we expect to never allocate a buffer + if (writeOps.writeSize % writeOps.bufferSize == 0 + && writeOps.bufferSize <= writeOps.bytes.length) { + assertThat(handle.getCallCount).isEqualTo(0); + } else { + assertThat(handle.getCallCount).isGreaterThan(0); + } + } + + @Provide("BufferSizes") + static Arbitrary arbitraryBufferSizes() { + return Arbitraries.of( + // expect no allocation + WriteOps.of(32, 4, 16), + WriteOps.of(32, 16, 16), + WriteOps.of(32, 32, 32), + // expect allocation + WriteOps.of(32, 33, 32), + WriteOps.of(32, 64, 4)); + } + + private static final class WriteOps { + private final byte[] bytes; + private final int bufferSize; + private final int writeSize; + private final ImmutableList writeSizes; + private final ImmutableList writes; + private final ImmutableList expectedFlushes; + private final String dbgExpectedWriteSizes; + + public WriteOps( + byte[] bytes, + int bufferSize, + int writeSize, + ImmutableList writes, + ImmutableList expectedFlushes, + String dbgExpectedWriteSizes) { + this.bytes = bytes; + this.bufferSize = bufferSize; + this.writeSize = writeSize; + this.writeSizes = + writes.stream().map(ByteBuffer::remaining).collect(ImmutableList.toImmutableList()); + this.writes = writes; + this.expectedFlushes = expectedFlushes; + this.dbgExpectedWriteSizes = dbgExpectedWriteSizes; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof WriteOps)) { + return false; + } + WriteOps writeOps = (WriteOps) o; + return bufferSize == writeOps.bufferSize + && writeSize == writeOps.writeSize + && Arrays.equals(bytes, writeOps.bytes) + && Objects.equals(writes, writeOps.writes) + && Objects.equals(expectedFlushes, writeOps.expectedFlushes) + && Objects.equals(dbgExpectedWriteSizes, writeOps.dbgExpectedWriteSizes); + } + + @Override + public int hashCode() { + int result = + Objects.hash(bufferSize, writeSize, writes, expectedFlushes, dbgExpectedWriteSizes); + result = 31 * result + Arrays.hashCode(bytes); + return result; + } + + @Override + public String toString() { + return "[WriteOps.of(" + + fmt(bytes.length) + + ", " + + fmt(bufferSize) + + ", " + + fmt(writeSize) + + ")] WriteOps{" + + "bytes.length=" + + fmt(bytes.length) + + ", bufferSize=" + + fmt(bufferSize) + + ", writeSize=" + + fmt(writeSize) + + ", writes.size()=" + + fmt(writes.size()) + + ", expectedFlushes.size()=" + + fmt(expectedFlushes.size()) + + ", expectedWriteSizes=" + + dbgExpectedWriteSizes + + '}'; + } + + @NonNull + static WriteOps of(int byteSize, int bufferSize, int writeSize) { + byte[] bytes = DataGenerator.base64Characters().genBytes(byteSize); + + List writes = new ArrayList<>(); + Deque expectedFlushes = new ArrayDeque<>(); + + int length = bytes.length; + + int fullWriteCount = 0; + int remainingWrite = 0; + int prevWriteEndOffset = 0; + for (int i = 1; i <= length; i++) { + boolean flushBoundary = (i % bufferSize == 0) || bufferSize == 1; + boolean writeBoundary = (i % writeSize == 0) || writeSize == 1; + boolean eof = i == length; + + if (flushBoundary) { + expectedFlushes.addLast((long) i); + } + + if (writeBoundary) { + writes.add(ByteBuffer.wrap(bytes, prevWriteEndOffset, writeSize)); + fullWriteCount++; + prevWriteEndOffset += writeSize; + } + + if (eof) { + // We expect a flush during close in the following scenarios: + // the buffer size is larger than our data size (peekLast == null) + // data size is not evenly divisible by bufferSize + if (expectedFlushes.peekLast() == null || expectedFlushes.peekLast() != length) { + expectedFlushes.addLast((long) length); + } + + // If the data size is not evenly divisible by writeSize we will have an extra + // smaller write + if (prevWriteEndOffset != length) { + int writeLen = Math.min(length - prevWriteEndOffset, writeSize); + writes.add(ByteBuffer.wrap(bytes, prevWriteEndOffset, writeLen)); + remainingWrite = writeLen; + prevWriteEndOffset += writeLen; + } + } + } + + String dbgExpectedWriteSizes; + if (fullWriteCount > 0 && remainingWrite > 0) { + dbgExpectedWriteSizes = + String.format( + Locale.US, + "[%s * %s, %s]", + fmt(writeSize), + fmt(fullWriteCount), + fmt(remainingWrite)); + } else if (remainingWrite > 0) { + dbgExpectedWriteSizes = String.format(Locale.US, "[%s]", fmt(remainingWrite)); + } else { + dbgExpectedWriteSizes = + String.format(Locale.US, "[%s * %s]", fmt(writeSize), fmt(fullWriteCount)); + } + return new WriteOps( + bytes, + bufferSize, + writeSize, + ImmutableList.copyOf(writes), + ImmutableList.copyOf(expectedFlushes), + dbgExpectedWriteSizes); + } + } + + /** + * Adapter to make any {@link WritableByteChannel} into an {@link UnbufferedWritableByteChannel} + */ + static final class CountingWritableByteChannelAdapter implements UnbufferedWritableByteChannel { + + private final WritableByteChannel c; + + final List writeEndPoints; + long totalBytesWritten; + + long nextWriteMaxConsumptionLimit = Long.MAX_VALUE; + + CountingWritableByteChannelAdapter(WritableByteChannel c) { + this.c = c; + writeEndPoints = new ArrayList<>(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return Math.toIntExact(write(new ByteBuffer[] {src}, 0, 1)); + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return write(srcs, 0, srcs.length); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + if (!c.isOpen()) { + return -1; + } + + long budgetRemaining = nextWriteMaxConsumptionLimit; + nextWriteMaxConsumptionLimit = Long.MAX_VALUE; + + long bytesWriten = 0; + for (int i = offset; i < length && budgetRemaining > 0; i++) { + ByteBuffer src = srcs[i]; + if (src.hasRemaining()) { + ByteBuffer slice = src.slice(); + int remaining = src.remaining(); + int newLimit = Math.toIntExact(Math.min(budgetRemaining, remaining)); + slice.limit(newLimit); + int write = c.write(slice); + if (write == -1) { + if (bytesWriten == 0) { + c.close(); + return -1; + } else { + break; + } + } else if (write == 0) { + break; + } else { + src.position(src.position() + write); + } + budgetRemaining -= write; + bytesWriten += write; + } + } + incr(bytesWriten); + return bytesWriten; + } + + @Override + public boolean isOpen() { + return c.isOpen(); + } + + @Override + public void close() throws IOException { + c.close(); + } + + private void incr(long bytesWritten) { + if (bytesWritten > 0) { + totalBytesWritten += bytesWritten; + writeEndPoints.add(totalBytesWritten); + } + } + } + + static final class AuditingBufferHandle extends BufferHandle { + private final BufferHandle delegate; + + int getCallCount = 0; + + AuditingBufferHandle(BufferHandle delegate) { + this.delegate = delegate; + } + + @Override + public int remaining() { + return delegate.remaining(); + } + + @Override + public int capacity() { + return delegate.capacity(); + } + + @Override + public int position() { + return delegate.position(); + } + + @Override + public ByteBuffer get() { + getCallCount++; + return delegate.get(); + } + } + + private static class NegativeOneWritableByteChannel implements UnbufferedWritableByteChannel { + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java new file mode 100644 index 000000000000..63e8af728773 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java @@ -0,0 +1,1113 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.fail; + +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.io.JsonEOFException; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponseException; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.auth.Retryable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.google.gson.stream.MalformedJsonException; +import java.io.IOException; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.security.cert.CertificateException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLProtocolException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Validate the behavior of our new "default" ResultRetryAlgorithms against that of the legacy retry + * handling behavior. + */ +@RunWith(Parameterized.class) +public final class DefaultRetryHandlingBehaviorTest { + private static final String DEFAULT_MESSAGE = "err_default_message"; + + private final Case c; + private final DefaultStorageRetryStrategy defaultStrategy; + private final StorageRetryStrategy legacyStrategy; + + @SuppressWarnings("deprecation") + public DefaultRetryHandlingBehaviorTest(Case c) { + this.c = c; + defaultStrategy = DefaultStorageRetryStrategy.INSTANCE; + legacyStrategy = StorageRetryStrategy.getLegacyStorageRetryStrategy(); + } + + /** + * For a specified {@link Case} + * + *
    + *
  1. Resolve the ResultRetryAlgorithm for the specific {@link HandlerCategory} for both + * default and legacy + *
  2. Evaluate the retryability of the throwable {@link Case#getThrowable()} against both of + * the resolved {@code ResultRetryAlgorithm}s + *
  3. Resolve the {@link Behavior} change if any between the two evaluation results + *
  4. Assert the behavior change matches the {@link Case#expectedBehavior expected behavior} + *
+ */ + @SuppressWarnings("ConstantConditions") + @Test + public void validateBehavior() { + ResultRetryAlgorithm defaultAlgorithm = c.handlerCategory.apply(defaultStrategy); + ResultRetryAlgorithm legacyAlgorithm = c.handlerCategory.apply(legacyStrategy); + Throwable throwable = c.getThrowable(); + + boolean defaultShouldRetryResult = defaultAlgorithm.shouldRetry(throwable, null); + boolean legacyShouldRetryResult = legacyAlgorithm.shouldRetry(throwable, null); + + Behavior actualBehavior = null; + String message = null; + boolean shouldRetry = c.getExpectRetry().shouldRetry; + if (shouldRetry && !defaultShouldRetryResult && legacyShouldRetryResult) { + actualBehavior = Behavior.DEFAULT_MORE_STRICT; + message = "default is more strict"; + } else if (shouldRetry && !defaultShouldRetryResult && !legacyShouldRetryResult) { + actualBehavior = Behavior.SAME; + message = "both are rejecting when we want a retry"; + fail(message); + } else if (shouldRetry && defaultShouldRetryResult && legacyShouldRetryResult) { + actualBehavior = Behavior.SAME; + message = "both are allowing"; + } else if (shouldRetry && defaultShouldRetryResult && !legacyShouldRetryResult) { + actualBehavior = Behavior.DEFAULT_MORE_PERMISSIBLE; + message = "default is more permissive"; + } else if (!shouldRetry && !defaultShouldRetryResult && legacyShouldRetryResult) { + actualBehavior = Behavior.DEFAULT_MORE_STRICT; + message = "default is more strict"; + } else if (!shouldRetry && !defaultShouldRetryResult && !legacyShouldRetryResult) { + actualBehavior = Behavior.SAME; + message = "both are rejecting as expected"; + } else if (!shouldRetry && defaultShouldRetryResult && legacyShouldRetryResult) { + actualBehavior = Behavior.SAME; + message = "both are too permissive"; + fail(message); + } else if (!shouldRetry && defaultShouldRetryResult && !legacyShouldRetryResult) { + actualBehavior = Behavior.DEFAULT_MORE_PERMISSIBLE; + message = "default is too permissive"; + } + + assertWithMessage(message).that(actualBehavior).isEqualTo(c.expectedBehavior); + } + + /** Resolve all the test cases and assert all permutations have a case defined. */ + @Parameters(name = "{0}") + public static Collection testCases() { + + // define the list of cases to be validated + List cases = getAllCases(); + + /* perform validation of the defined list of cases to ensure all permutations are defined */ + + // calculate all the possible permutations + ImmutableSet expectedTokens = + Arrays.stream(HandlerCategory.values()) + .flatMap( + handlerCategory -> + Arrays.stream(ThrowableCategory.values()) + .map(throwableCategory -> token(throwableCategory, handlerCategory))) + .collect(toImmutableSet()); + + // calculate the actual defined permutations + ImmutableSet actualTokens = + cases.stream() + .map(c -> token(c.throwableCategory, c.handlerCategory)) + .collect(toImmutableSet()); + + // calculate the difference if any between expected and actual, then sort and listify + ImmutableList difference = + Sets.difference(expectedTokens, actualTokens).stream().sorted().collect(toImmutableList()); + + // ensure all permutations are accounted for, reporting any that haven't been and providing + // a stub which can be used to easily define them. + assertWithMessage("Missing mappings for tokens").that(difference).isEmpty(); + + // wrap our case in an array for ultimate passing to the constructor + return cases.stream().map(c -> new Object[] {c}).collect(toImmutableList()); + } + + /** + * Generate a token which represents a permutation for which a {@link Case} must be defined. + * + *

If a case is not defined, this value will be reported and functions as a stub to easily + * define a new {@code Case}. + */ + private static String token(ThrowableCategory t, HandlerCategory h) { + return String.format( + Locale.US, + "new Case(ThrowableCategory.%s, HandlerCategory.%s, /*TODO*/ null, /*TODO*/ null)", + t.name(), + h.name()); + } + + /** + * An individual case we want to validate. + * + *

Given a {@link HandlerCategory} and {@link ThrowableCategory} validate the retryability and + * behavior between default and legacy handlers. + */ + static final class Case { + + private final HandlerCategory handlerCategory; + private final ThrowableCategory throwableCategory; + private final ExpectRetry expectRetry; + private final Behavior expectedBehavior; + + Case( + ThrowableCategory throwableCategory, + HandlerCategory handlerCategory, + ExpectRetry expectRetry, + Behavior expectedBehavior) { + this.handlerCategory = handlerCategory; + this.throwableCategory = throwableCategory; + this.expectRetry = expectRetry; + this.expectedBehavior = expectedBehavior; + } + + Throwable getThrowable() { + return throwableCategory.throwable; + } + + public ExpectRetry getExpectRetry() { + return expectRetry; + } + + @Override + public String toString() { + return "Case{" + + "handlerCategory=" + + handlerCategory + + ", throwableCategory=" + + throwableCategory + + ", expectRetry=" + + expectRetry + + ", expectedBehavior=" + + expectedBehavior + + '}'; + } + } + + /** Whether to expect a retry to happen or not */ + enum ExpectRetry { + YES(true), + NO(false); + + private final boolean shouldRetry; + + ExpectRetry(boolean shouldRetry) { + this.shouldRetry = shouldRetry; + } + } + + /** + * A category of handler type, and the ability to resolve the {@link ResultRetryAlgorithm} given a + * {@link StorageRetryStrategy} + */ + enum HandlerCategory implements Function> { + IDEMPOTENT, + NONIDEMPOTENT; + + @Override + public ResultRetryAlgorithm apply(StorageRetryStrategy storageRetryStrategy) { + switch (this) { + case IDEMPOTENT: + return storageRetryStrategy.getIdempotentHandler(); + case NONIDEMPOTENT: + return storageRetryStrategy.getNonidempotentHandler(); + default: + throw new IllegalStateException("Unmappable HandlerCategory: " + this.name()); + } + } + } + + /** Some states comparing behavior between default and legacy */ + enum Behavior { + DEFAULT_MORE_PERMISSIBLE, + SAME, + DEFAULT_MORE_STRICT + } + + /** + * A set of exceptions we want to validate behavior for. + * + *

This class is an enum for convenience of specifying a closed set, along with providing easy + * to read names in code thereby forgoing the need to maintain a separate set of strings. + */ + enum ThrowableCategory { + SOCKET_TIMEOUT_EXCEPTION(C.SOCKET_TIMEOUT_EXCEPTION), + SOCKET_EXCEPTION(C.SOCKET_EXCEPTION), + SSL_EXCEPTION(C.SSL_EXCEPTION), + SSL_EXCEPTION_CONNECTION_SHUTDOWN(C.SSL_EXCEPTION_CONNECTION_SHUTDOWN), + SSL_EXCEPTION_CONNECTION_RESET(C.SSL_EXCEPTION_CONNECTION_RESET), + SSL_HANDSHAKE_EXCEPTION(C.SSL_HANDSHAKE_EXCEPTION), + SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION( + C.SSL_HANDSHAKE_EXCEPTION_CERTIFICATE_EXCEPTION), + INSUFFICIENT_DATA(C.INSUFFICIENT_DATA_WRITTEN), + ERROR_WRITING_REQUEST_BODY(C.ERROR_WRITING_REQUEST_BODY), + HTTP_RESPONSE_EXCEPTION_401(C.HTTP_401), + HTTP_RESPONSE_EXCEPTION_403(C.HTTP_403), + HTTP_RESPONSE_EXCEPTION_404(C.HTTP_404), + HTTP_RESPONSE_EXCEPTION_408(C.HTTP_409), + HTTP_RESPONSE_EXCEPTION_429(C.HTTP_429), + HTTP_RESPONSE_EXCEPTION_500(C.HTTP_500), + HTTP_RESPONSE_EXCEPTION_502(C.HTTP_502), + HTTP_RESPONSE_EXCEPTION_503(C.HTTP_503), + HTTP_RESPONSE_EXCEPTION_504(C.HTTP_504), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_401(new StorageException(C.HTTP_401)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_403(new StorageException(C.HTTP_403)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_404(new StorageException(C.HTTP_404)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_408(new StorageException(C.HTTP_409)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_429(new StorageException(C.HTTP_429)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_500(new StorageException(C.HTTP_500)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_502(new StorageException(C.HTTP_502)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_503(new StorageException(C.HTTP_503)), + STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_504(new StorageException(C.HTTP_504)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_401(new StorageException(C.JSON_401)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_403(new StorageException(C.JSON_403)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_404(new StorageException(C.JSON_404)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_408(new StorageException(C.JSON_408)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_429(new StorageException(C.JSON_429)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_500(new StorageException(C.JSON_500)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_502(new StorageException(C.JSON_502)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_503(new StorageException(C.JSON_503)), + STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_504(new StorageException(C.JSON_504)), + STORAGE_EXCEPTION_SOCKET_TIMEOUT_EXCEPTION(new StorageException(C.SOCKET_TIMEOUT_EXCEPTION)), + STORAGE_EXCEPTION_SOCKET_EXCEPTION(StorageException.translate(C.SOCKET_EXCEPTION)), + STORAGE_EXCEPTION_SSL_EXCEPTION(new StorageException(C.SSL_EXCEPTION)), + STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_SHUTDOWN( + new StorageException(C.SSL_EXCEPTION_CONNECTION_SHUTDOWN)), + STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_RESET( + new StorageException(C.SSL_EXCEPTION_CONNECTION_RESET)), + STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION(new StorageException(C.SSL_HANDSHAKE_EXCEPTION)), + STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION( + new StorageException(C.SSL_HANDSHAKE_EXCEPTION_CERTIFICATE_EXCEPTION)), + STORAGE_EXCEPTION_INSUFFICIENT_DATA(new StorageException(C.INSUFFICIENT_DATA_WRITTEN)), + STORAGE_EXCEPTION_ERROR_WRITING_REQUEST_BODY( + new StorageException(C.ERROR_WRITING_REQUEST_BODY)), + ILLEGAL_ARGUMENT_EXCEPTION(C.ILLEGAL_ARGUMENT_EXCEPTION), + STORAGE_EXCEPTION_ILLEGAL_ARGUMENT_EXCEPTION( + StorageException.coalesce(C.ILLEGAL_ARGUMENT_EXCEPTION)), + STORAGE_EXCEPTION_0_INTERNAL_ERROR( + new StorageException(0, "internalError", "internalError", null)), + STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY( + new StorageException( + 0, "connectionClosedPrematurely", "connectionClosedPrematurely", null)), + STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE( + new StorageException( + 0, + "connectionClosedPrematurely", + "connectionClosedPrematurely", + C.CONNECTION_CLOSED_PREMATURELY)), + STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE_NO_REASON( + StorageException.translate(C.CONNECTION_CLOSED_PREMATURELY)), + STORAGE_EXCEPTION_0_IO_PREMATURE_EOF(StorageException.translate(C.IO_PREMATURE_EOF)), + EMPTY_JSON_PARSE_ERROR(new IllegalArgumentException("no JSON input found")), + JACKSON_EOF_EXCEPTION(C.JACKSON_EOF_EXCEPTION), + STORAGE_EXCEPTION_0_JACKSON_EOF_EXCEPTION( + new StorageException(0, "parse error", C.JACKSON_EOF_EXCEPTION)), + GSON_MALFORMED_EXCEPTION(C.GSON_MALFORMED_EXCEPTION), + STORAGE_EXCEPTION_0_GSON_MALFORMED_EXCEPTION( + new StorageException(0, "parse error", C.GSON_MALFORMED_EXCEPTION)), + IO_EXCEPTION(new IOException("no retry")), + AUTH_RETRYABLE_TRUE(new RetryableException(true)), + AUTH_RETRYABLE_FALSE(new RetryableException(false)), + UNKNOWN_HOST_EXCEPTION(C.UNKNOWN_HOST_EXCEPTION), + ; + + private final Throwable throwable; + + ThrowableCategory(Throwable throwable) { + this.throwable = throwable; + } + + public Throwable getThrowable() { + return throwable; + } + + /** + * A class of constants for use by the containing enum. + * + *

Enums can't have static fields, so we use this class to hold constants which are used by + * the enum values. + */ + private static final class C { + private static final SocketTimeoutException SOCKET_TIMEOUT_EXCEPTION = + new SocketTimeoutException(); + private static final SocketException SOCKET_EXCEPTION = new SocketException(); + private static final SSLException SSL_EXCEPTION = new SSLException("unknown"); + private static final SSLException SSL_EXCEPTION_CONNECTION_SHUTDOWN = + new SSLException("Connection has been shutdown: asdf"); + private static final SSLException SSL_EXCEPTION_CONNECTION_RESET = + new SSLException("Connection reset", new SocketException("Connection reset")); + private static final SSLHandshakeException SSL_HANDSHAKE_EXCEPTION = + newSslHandshakeExceptionWithCause(new SSLProtocolException(DEFAULT_MESSAGE)); + private static final SSLHandshakeException SSL_HANDSHAKE_EXCEPTION_CERTIFICATE_EXCEPTION = + newSslHandshakeExceptionWithCause(new CertificateException()); + private static final IOException INSUFFICIENT_DATA_WRITTEN = + new IOException("insufficient data written"); + private static final IOException ERROR_WRITING_REQUEST_BODY = + new IOException("Error writing request body to server"); + private static final HttpResponseException HTTP_401 = + newHttpResponseException(401, "Unauthorized"); + private static final HttpResponseException HTTP_403 = + newHttpResponseException(403, "Forbidden"); + private static final HttpResponseException HTTP_404 = + newHttpResponseException(404, "Not Found"); + private static final HttpResponseException HTTP_409 = + newHttpResponseException(408, "Request Timeout"); + private static final HttpResponseException HTTP_429 = + newHttpResponseException(429, "Too Many Requests"); + private static final HttpResponseException HTTP_500 = + newHttpResponseException(500, "Internal Server Error"); + private static final HttpResponseException HTTP_502 = + newHttpResponseException(502, "Bad Gateway"); + private static final HttpResponseException HTTP_503 = + newHttpResponseException(503, "Service Unavailable"); + private static final HttpResponseException HTTP_504 = + newHttpResponseException(504, "Gateway Timeout"); + private static final GoogleJsonError JSON_401 = newGoogleJsonError(401, "Unauthorized"); + private static final GoogleJsonError JSON_403 = newGoogleJsonError(403, "Forbidden"); + private static final GoogleJsonError JSON_404 = newGoogleJsonError(404, "Not Found"); + private static final GoogleJsonError JSON_408 = newGoogleJsonError(408, "Request Timeout"); + private static final GoogleJsonError JSON_429 = newGoogleJsonError(429, "Too Many Requests"); + private static final GoogleJsonError JSON_500 = + newGoogleJsonError(500, "Internal Server Error"); + private static final GoogleJsonError JSON_502 = newGoogleJsonError(502, "Bad Gateway"); + private static final GoogleJsonError JSON_503 = + newGoogleJsonError(503, "Service Unavailable"); + private static final GoogleJsonError JSON_504 = newGoogleJsonError(504, "Gateway Timeout"); + private static final IllegalArgumentException ILLEGAL_ARGUMENT_EXCEPTION = + new IllegalArgumentException("illegal argument"); + private static final IOException CONNECTION_CLOSED_PREMATURELY = + new IOException("simulated Connection closed prematurely"); + private static final JsonEOFException JACKSON_EOF_EXCEPTION = + new JsonEOFException(null, JsonToken.VALUE_STRING, "parse-exception"); + private static final MalformedJsonException GSON_MALFORMED_EXCEPTION = + new MalformedJsonException("parse-exception"); + private static final IOException IO_PREMATURE_EOF = new IOException("Premature EOF"); + private static final UnknownHostException UNKNOWN_HOST_EXCEPTION = + new UnknownHostException("fake.fake"); + + private static HttpResponseException newHttpResponseException( + int httpStatusCode, String name) { + return new HttpResponseException.Builder(httpStatusCode, name, new HttpHeaders()).build(); + } + + private static GoogleJsonError newGoogleJsonError(int code, String message) { + GoogleJsonError error = new GoogleJsonError(); + error.setCode(code); + error.setMessage(message); + return error; + } + + private static SSLHandshakeException newSslHandshakeExceptionWithCause(Throwable cause) { + SSLHandshakeException sslHandshakeException = new SSLHandshakeException(DEFAULT_MESSAGE); + Throwable throwable = sslHandshakeException.initCause(cause); + return (SSLHandshakeException) throwable; + } + } + } + + /** + * A corralled method which allows us to move the individual cases away from the rest of the code + * since our code formatter is very strict and seems to feel 475 lines of code at 100 columns is + * better than 90 lines at 200 columns. + * + *

this method returns a list that essentially is a table of where each row is an individual + * test case + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
{@link ThrowableCategory throwable category}{@link HandlerCategory handler category}{@link ExpectRetry whether retry is expected}{@link Behavior what the expected behavior comparison is}
{@link ThrowableCategory#STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_500 STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_500}{@link HandlerCategory#NONIDEMPOTENT NONIDEMPOTENT}{@link ExpectRetry#NO NO}{@link Behavior#DEFAULT_MORE_STRICT DEFAULT_MORE_STRICT}
+ */ + private static ImmutableList getAllCases() { + return ImmutableList.builder() + .add( + new Case( + ThrowableCategory.ERROR_WRITING_REQUEST_BODY, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.ERROR_WRITING_REQUEST_BODY, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_401, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_401, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_403, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_403, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_404, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_404, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_408, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_408, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_429, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_429, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_500, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_500, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_502, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_502, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_503, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_503, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_504, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.HTTP_RESPONSE_EXCEPTION_504, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.ILLEGAL_ARGUMENT_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.ILLEGAL_ARGUMENT_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.INSUFFICIENT_DATA, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.INSUFFICIENT_DATA, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SOCKET_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.SOCKET_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SOCKET_TIMEOUT_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.SOCKET_TIMEOUT_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_EXCEPTION_CONNECTION_SHUTDOWN, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.SSL_EXCEPTION_CONNECTION_SHUTDOWN, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_EXCEPTION_CONNECTION_RESET, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.SSL_EXCEPTION_CONNECTION_RESET, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_HANDSHAKE_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.SSL_HANDSHAKE_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_ERROR_WRITING_REQUEST_BODY, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_ERROR_WRITING_REQUEST_BODY, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_401, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_401, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_403, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_403, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_404, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_404, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_408, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_408, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_429, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_429, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_500, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_500, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_502, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_502, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_503, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_503, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_504, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_GOOGLE_JSON_ERROR_504, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_401, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_401, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_403, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_403, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_404, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_404, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_408, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_408, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_429, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_429, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_500, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_500, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_502, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_502, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_503, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_503, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_504, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_HTTP_RESPONSE_EXCEPTION_504, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_ILLEGAL_ARGUMENT_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_ILLEGAL_ARGUMENT_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_INSUFFICIENT_DATA, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_INSUFFICIENT_DATA, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SOCKET_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SOCKET_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SOCKET_TIMEOUT_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SOCKET_TIMEOUT_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_SHUTDOWN, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_SHUTDOWN, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_RESET, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_EXCEPTION_CONNECTION_RESET, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory + .STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory + .STORAGE_EXCEPTION_SSL_HANDSHAKE_EXCEPTION_CAUSED_BY_CERTIFICATE_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory + .STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE_NO_REASON, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory + .STORAGE_EXCEPTION_0_CONNECTION_CLOSED_PREMATURELY_IO_CAUSE_NO_REASON, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_IO_PREMATURE_EOF, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_IO_PREMATURE_EOF, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_INTERNAL_ERROR, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_INTERNAL_ERROR, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.DEFAULT_MORE_STRICT), + new Case( + ThrowableCategory.EMPTY_JSON_PARSE_ERROR, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.EMPTY_JSON_PARSE_ERROR, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.IO_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.IO_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.JACKSON_EOF_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.JACKSON_EOF_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_JACKSON_EOF_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_JACKSON_EOF_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.GSON_MALFORMED_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.GSON_MALFORMED_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_GSON_MALFORMED_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.STORAGE_EXCEPTION_0_GSON_MALFORMED_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.AUTH_RETRYABLE_TRUE, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.AUTH_RETRYABLE_TRUE, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.AUTH_RETRYABLE_FALSE, + HandlerCategory.IDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.AUTH_RETRYABLE_FALSE, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME), + new Case( + ThrowableCategory.UNKNOWN_HOST_EXCEPTION, + HandlerCategory.IDEMPOTENT, + ExpectRetry.YES, + Behavior.DEFAULT_MORE_PERMISSIBLE), + new Case( + ThrowableCategory.UNKNOWN_HOST_EXCEPTION, + HandlerCategory.NONIDEMPOTENT, + ExpectRetry.NO, + Behavior.SAME)) + .build(); + } + + /** + * The auth library provides the interface {@link Retryable} to annotate an exception as + * retryable. Add a definition here. Explicitly extend IOException to ensure our handling of this + * type is sooner than IOExceptions + */ + private static final class RetryableException extends IOException implements Retryable { + + private final boolean isRetryable; + + private RetryableException(boolean isRetryable) { + super(String.format(Locale.US, "RetryableException{isRetryable=%s}", isRetryable)); + this.isRetryable = isRetryable; + } + + @Override + public boolean isRetryable() { + return isRetryable; + } + + @Override + public int getRetryCount() { + return 0; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DetectContentTypeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DetectContentTypeTest.java new file mode 100644 index 000000000000..69d6ad639dc1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DetectContentTypeTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.SetContentType; +import com.google.storage.v2.Object; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectSpec; +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public final class DetectContentTypeTest { + + private final String objectName; + private final String expectedContentType; + private final SetContentType expected; + + public DetectContentTypeTest(String objectName, String expectedContentType) { + this.objectName = objectName; + this.expectedContentType = expectedContentType; + this.expected = UnifiedOpts.setContentType(expectedContentType); + } + + @Test + public void blobId() { + ObjectTargetOpt actual = + UnifiedOpts.detectContentType().extractFromBlobId(BlobId.of("bucket", objectName)); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void blobInfo() { + ObjectTargetOpt actual = + UnifiedOpts.detectContentType() + .extractFromBlobInfo(BlobInfo.newBuilder("bucket", objectName).build()); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateBlobInfo() { + BlobInfo base = BlobInfo.newBuilder("bucket", objectName).build(); + BlobInfo actual = + UnifiedOpts.detectContentType() + .extractFromBlobInfo(base) + .blobInfo() + .apply(base.toBuilder()) + .build(); + BlobInfo expected = base.toBuilder().setContentType(expectedContentType).build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void writeObjectRequest() { + WriteObjectRequest base = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("bucket").setName(objectName).build()) + .build()) + .build(); + WriteObjectRequest actual = + UnifiedOpts.detectContentType() + .extractFromBlobInfo(BlobInfo.newBuilder("bucket", objectName).build()) + .writeObject() + .apply(base.toBuilder()) + .build(); + WriteObjectRequest.Builder b2 = base.toBuilder(); + b2.getWriteObjectSpecBuilder().getResourceBuilder().setContentType(expectedContentType); + WriteObjectRequest expected = b2.build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObjectRequest() { + UpdateObjectRequest base = + UpdateObjectRequest.newBuilder() + .setObject(Object.newBuilder().setBucket("bucket").setName(objectName).build()) + .build(); + UpdateObjectRequest actual = + UnifiedOpts.detectContentType() + .extractFromBlobInfo(BlobInfo.newBuilder("bucket", objectName).build()) + .updateObject() + .apply(base.toBuilder()) + .build(); + UpdateObjectRequest.Builder b2 = base.toBuilder(); + b2.getObjectBuilder().setContentType(expectedContentType); + UpdateObjectRequest expected = b2.build(); + assertThat(actual).isEqualTo(expected); + } + + @Parameters(name = "{0}") + public static Iterable data() { + return Arrays.asList( + new java.lang.Object[] {"file1.txt", "text/plain"}, + new java.lang.Object[] {"file.Jpg", "image/jpeg"}, + new java.lang.Object[] {"file", "application/octet-stream"}); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationCodecPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationCodecPropertyTest.java new file mode 100644 index 000000000000..f65cc966f8ed --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationCodecPropertyTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Conversions.Codec; +import com.google.cloud.storage.jqwik.StorageArbitraries; +import com.google.protobuf.Duration; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.ArbitrarySupplier; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; + +final class DurationCodecPropertyTest { + + @Property + void timestampCodecShouldRoundTrip(@ForAll(supplier = Supp.class) Duration ts) { + Codec codec = GrpcConversions.INSTANCE.durationCodec; + java.time.Duration decode = codec.decode(ts); + Duration encode = codec.encode(decode); + + assertThat(encode).isEqualTo(ts); + } + + private static final class Supp implements ArbitrarySupplier { + @Override + public Arbitrary get() { + return StorageArbitraries.duration(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationsTest.java new file mode 100644 index 000000000000..f2c075853026 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/DurationsTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static java.time.Duration.ZERO; + +import java.time.Duration; +import org.junit.Test; + +public final class DurationsTest { + + private static final Duration ONE_SECOND = Duration.ofSeconds(1); + private static final Duration TWO_SECONDS = Duration.ofSeconds(2); + + @Test + public void eq() throws Exception { + assertAll( + () -> assertThat(Durations.eq(ZERO, ZERO)).isTrue(), + () -> assertThat(Durations.eq(ONE_SECOND, ONE_SECOND)).isTrue(), + () -> assertThat(Durations.eq(ZERO, ONE_SECOND)).isFalse(), + () -> assertThat(Durations.eq(ONE_SECOND, ZERO)).isFalse()); + } + + @Test + public void ltEq() throws Exception { + assertAll( + () -> assertThat(Durations.ltEq(ZERO, ZERO)).isTrue(), + () -> assertThat(Durations.ltEq(ONE_SECOND, ONE_SECOND)).isTrue(), + () -> assertThat(Durations.ltEq(ZERO, ONE_SECOND)).isTrue(), + () -> assertThat(Durations.ltEq(ONE_SECOND, ZERO)).isFalse()); + } + + @Test + public void gtEq() throws Exception { + assertAll( + () -> assertThat(Durations.gtEq(ZERO, ZERO)).isTrue(), + () -> assertThat(Durations.gtEq(ONE_SECOND, ONE_SECOND)).isTrue(), + () -> assertThat(Durations.gtEq(ZERO, ONE_SECOND)).isFalse(), + () -> assertThat(Durations.gtEq(ONE_SECOND, ZERO)).isTrue()); + } + + @Test + public void gt() throws Exception { + assertAll( + () -> assertThat(Durations.gt(ZERO, ZERO)).isFalse(), + () -> assertThat(Durations.gt(ONE_SECOND, ONE_SECOND)).isFalse(), + () -> assertThat(Durations.gt(ZERO, ONE_SECOND)).isFalse(), + () -> assertThat(Durations.gt(ONE_SECOND, ZERO)).isTrue()); + } + + @Test + public void min() throws Exception { + assertAll( + () -> assertThat(Durations.min(ZERO, ZERO)).isEqualTo(ZERO), + () -> assertThat(Durations.min(ONE_SECOND, ONE_SECOND)).isEqualTo(ONE_SECOND), + () -> assertThat(Durations.min(ZERO, ONE_SECOND)).isEqualTo(ZERO), + () -> assertThat(Durations.min(ONE_SECOND, ZERO)).isEqualTo(ZERO), + () -> assertThat(Durations.min(ONE_SECOND, TWO_SECONDS, ZERO)).isEqualTo(ZERO)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeHttpServer.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeHttpServer.java new file mode 100644 index 000000000000..566a7765ebd5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeHttpServer.java @@ -0,0 +1,202 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderValues.CLOSE; + +import com.google.api.client.http.UriTemplate; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.it.runner.registry.Registry; +import io.grpc.netty.shaded.io.netty.bootstrap.ServerBootstrap; +import io.grpc.netty.shaded.io.netty.buffer.ByteBuf; +import io.grpc.netty.shaded.io.netty.channel.Channel; +import io.grpc.netty.shaded.io.netty.channel.ChannelFuture; +import io.grpc.netty.shaded.io.netty.channel.ChannelFutureListener; +import io.grpc.netty.shaded.io.netty.channel.ChannelHandlerContext; +import io.grpc.netty.shaded.io.netty.channel.ChannelInitializer; +import io.grpc.netty.shaded.io.netty.channel.ChannelOption; +import io.grpc.netty.shaded.io.netty.channel.ChannelPipeline; +import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.SimpleChannelInboundHandler; +import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.socket.SocketChannel; +import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaders; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpObjectAggregator; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpRequest; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpServerCodec; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpServerExpectContinueHandler; +import io.grpc.netty.shaded.io.netty.handler.logging.LogLevel; +import io.grpc.netty.shaded.io.netty.handler.logging.LoggingHandler; +import java.net.InetSocketAddress; +import java.net.URI; +import java.time.Duration; +import java.util.Map; + +final class FakeHttpServer implements AutoCloseable { + + private final URI endpoint; + private final Channel channel; + private final Runnable shutdown; + private final HttpStorageOptions httpStorageOptions; + + private FakeHttpServer( + URI endpoint, Channel channel, Runnable shutdown, HttpStorageOptions httpStorageOptions) { + this.endpoint = endpoint; + this.channel = channel; + this.shutdown = shutdown; + this.httpStorageOptions = httpStorageOptions; + } + + /** + * overload which calls {@link #createUri(String, Map, boolean)} with {@code createUri(template, + * params, false)} + */ + public URI createUri(String template, Map params) { + return createUri(template, params, false); + } + + /** Decorator for {@link UriTemplate#expand(String, String, Object, boolean)} */ + public URI createUri( + String template, Map params, boolean addUnusedParamsAsQueryParams) { + String expand = + UriTemplate.expand(endpoint.toString(), template, params, addUnusedParamsAsQueryParams); + return URI.create(expand); + } + + public HttpStorageOptions getHttpStorageOptions() { + return httpStorageOptions; + } + + @Override + public void close() throws Exception { + shutdown.run(); + channel.closeFuture().syncUninterruptibly(); + } + + static FakeHttpServer of(HttpRequestHandler server) { + return of(server, true); + } + + static FakeHttpServer of(HttpRequestHandler server, boolean trailingSlash) { + // based on + // https://github.com/netty/netty/blob/59aa6e635b9996cf21cd946e64353270679adc73/example/src/main/java/io/netty/example/http/helloworld/HttpHelloWorldServer.java + InetSocketAddress address = new InetSocketAddress("localhost", 0); + // Configure the server. + EventLoopGroup bossGroup = new NioEventLoopGroup(1); + EventLoopGroup workerGroup = new NioEventLoopGroup(); + ServerBootstrap b = new ServerBootstrap(); + b.option(ChannelOption.SO_BACKLOG, 1024); + b.group(bossGroup, workerGroup) + .channel(NioServerSocketChannel.class) + .handler(new LoggingHandler(LogLevel.DEBUG)) + .childHandler( + new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) { + ChannelPipeline p = ch.pipeline(); + p.addLast(new HttpServerCodec()); + // Accept a request and content up to 100 MiB + // If we don't do this, sometimes the ordering on the wire will result in the server + // rejecting the request before the client has finished sending. + // While our client can handle this scenario and retry, it makes assertions more + // difficult due to the variability of request counts. + p.addLast(new HttpObjectAggregator(100 * 1024 * 1024)); + p.addLast(new HttpServerExpectContinueHandler()); + p.addLast(new Handler(server)); + } + }); + + Channel channel = b.bind(address).syncUninterruptibly().channel(); + + InetSocketAddress socketAddress = (InetSocketAddress) channel.localAddress(); + String suffix = trailingSlash ? "/" : ""; + URI endpoint = URI.create("http://localhost:" + socketAddress.getPort() + suffix); + HttpStorageOptions httpStorageOptions = + HttpStorageOptions.http() + .setHost(endpoint.toString()) + .setProjectId("test-proj") + .setCredentials(NoCredentials.getInstance()) + .setOpenTelemetry(Registry.getInstance().otelSdk.get().get()) + // cut most retry settings by half. we're hitting an in process server. + .setRetrySettings( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofSeconds(25)) + .setInitialRetryDelayDuration(Duration.ofMillis(250)) + .setRetryDelayMultiplier(1.2) + .setMaxRetryDelayDuration(Duration.ofSeconds(16)) + .setMaxAttempts(6) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(25)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(25)) + .build()) + .build(); + return new FakeHttpServer( + endpoint, + channel, + () -> { + bossGroup.shutdownGracefully(); + workerGroup.shutdownGracefully(); + }, + httpStorageOptions); + } + + interface HttpRequestHandler { + FullHttpResponse apply(HttpRequest req) throws Exception; + } + + /** + * Based on + * https://github.com/netty/netty/blob/59aa6e635b9996cf21cd946e64353270679adc73/example/src/main/java/io/netty/example/http/helloworld/HttpHelloWorldServerHandler.java + */ + private static final class Handler extends SimpleChannelInboundHandler { + + private final HttpRequestHandler server; + + private Handler(HttpRequestHandler server) { + this.server = server; + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) { + ctx.flush(); + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpRequest req) throws Exception { + FullHttpResponse resp = server.apply(req); + HttpHeaders headers = resp.headers(); + if (!headers.contains(CONTENT_LENGTH)) { + ByteBuf content = resp.content(); + headers.setInt(CONTENT_LENGTH, content.readableBytes()); + } + headers.set(CONNECTION, CLOSE); + ChannelFuture future = ctx.writeAndFlush(resp); + future.addListener(ChannelFutureListener.CLOSE); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + cause.printStackTrace(); + ctx.close(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeServer.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeServer.java new file mode 100644 index 000000000000..0481f2b062e6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/FakeServer.java @@ -0,0 +1,85 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.it.GrpcPlainRequestLoggingInterceptor; +import com.google.cloud.storage.it.runner.registry.Registry; +import com.google.storage.v2.StorageGrpc; +import com.google.storage.v2.StorageSettings; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Locale; +import java.util.concurrent.TimeUnit; + +final class FakeServer implements AutoCloseable { + + private final Server server; + private final GrpcStorageOptions grpcStorageOptions; + + FakeServer(Server server, GrpcStorageOptions grpcStorageOptions) { + this.server = server; + this.grpcStorageOptions = grpcStorageOptions; + } + + GrpcStorageOptions getGrpcStorageOptions() { + return grpcStorageOptions; + } + + StorageSettings storageSettings() throws IOException { + return grpcStorageOptions.getStorageSettings(); + } + + @Override + public void close() throws InterruptedException { + server.shutdownNow().awaitTermination(10, TimeUnit.SECONDS); + } + + static FakeServer of(StorageGrpc.StorageImplBase service) throws IOException { + InetSocketAddress address = new InetSocketAddress("localhost", 0); + Server server = NettyServerBuilder.forAddress(address).addService(service).build(); + server.start(); + String endpoint = String.format(Locale.US, "%s:%d", address.getHostString(), server.getPort()); + GrpcStorageOptions grpcStorageOptions = + StorageOptions.grpc() + .setHost("http://" + endpoint) + .setProjectId("test-proj") + .setCredentials(NoCredentials.getInstance()) + .setGrpcInterceptorProvider(GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .setEnableGrpcClientMetrics(false) + .setAttemptDirectPath(false) + .setOpenTelemetry(Registry.getInstance().otelSdk.get().get()) + // cut most retry settings by half. we're hitting an in process server. + .setRetrySettings( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofSeconds(25)) + .setInitialRetryDelayDuration(Duration.ofMillis(250)) + .setRetryDelayMultiplier(1.2) + .setMaxRetryDelayDuration(Duration.ofSeconds(16)) + .setMaxAttempts(6) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(1)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(25)) + .build()) + .build(); + return new FakeServer(server, grpcStorageOptions); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java new file mode 100644 index 000000000000..27d96ef6f06f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Test; + +public final class GapicUnbufferedReadableByteChannelTest { + + @Test + public void ensureResponseAreClosed() throws IOException { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(10)); + + AtomicBoolean close = new AtomicBoolean(false); + + ResponseContentLifecycleManager manager = + resp -> ResponseContentLifecycleHandle.create(resp, () -> close.compareAndSet(false, true)); + + try (GapicUnbufferedReadableByteChannel c = + new GapicUnbufferedReadableByteChannel( + SettableApiFuture.create(), + new ZeroCopyServerStreamingCallable<>( + new ServerStreamingCallable() { + @Override + public void call( + ReadObjectRequest request, + ResponseObserver respond, + ApiCallContext context) { + respond.onStart(TestUtils.nullStreamController()); + respond.onResponse( + ReadObjectResponse.newBuilder() + .setChecksummedData(testContent.asChecksummedData()) + .build()); + respond.onComplete(); + } + }, + manager), + ReadObjectRequest.getDefaultInstance(), + Hasher.noop(), + Retrier.attemptOnce(), + Retrying.neverRetry())) { + + ByteBuffer buffer = ByteBuffer.allocate(15); + c.read(buffer); + assertThat(xxd(buffer)).isEqualTo(xxd(testContent.getBytes())); + assertThat(close.get()).isTrue(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUploadSessionBuilderSyntaxTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUploadSessionBuilderSyntaxTest.java new file mode 100644 index 000000000000..dddd5b8e16ef --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUploadSessionBuilderSyntaxTest.java @@ -0,0 +1,186 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +/** + * For our {@link GapicUploadSessionBuilder} a number of items must be provided to ultimately + * construct an instance. This class enumerates some supported usage patterns whether using the + * fluent api from beginning to build or if intermediate variables must be defined along the way. + * + *

These "tests" are primarily validated at compilation time - ensuring source compatibility of + * the builders. Each builder is built which runs some minor validation - such as null checks - but + * otherwise does not have any other logic. + */ +@RunWith(MockitoJUnitRunner.class) +public final class GapicUploadSessionBuilderSyntaxTest { + + private final WriteObjectRequest req = WriteObjectRequest.getDefaultInstance(); + + // The following fields are "mocks" for simplicity’s sake. + // We need them to be non-null, but otherwise they do not matter. + // They have many dependencies which would need to be constructed in order to instantiate a + // literal instance. + @Mock private ClientStreamingCallable write; + + @Mock + private UnaryCallable + startResumableWrite; + + @Before + public void setUp() throws Exception { + when(startResumableWrite.call(any())) + .thenReturn(StartResumableWriteResponse.getDefaultInstance()); + } + + @Test + public void syntax_directUnbuffered_fluent() { + UnbufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .direct() + .unbuffered() + .setRequest(req) + .build(); + } + + @Test + public void syntax_directBuffered_fluent() { + BufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .direct() + .buffered() + .setRequest(req) + .build(); + } + + @Test + public void syntax_resumableUnbuffered_fluent() { + ApiFuture startAsync = + ResumableMedia.gapic() + .write() + .resumableWrite(startResumableWrite, req, Opts.empty(), RetrierWithAlg.attemptOnce()); + UnbufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .resumable() + .unbuffered() + .setStartAsync(startAsync) + .build(); + } + + @Test + public void syntax_resumableBuffered_fluent() { + ApiFuture startAsync = + ResumableMedia.gapic() + .write() + .resumableWrite(startResumableWrite, req, Opts.empty(), RetrierWithAlg.attemptOnce()); + BufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .resumable() + .buffered() + .setStartAsync(startAsync) + .build(); + } + + @Test + public void syntax_directUnbuffered_incremental() { + GapicWritableByteChannelSessionBuilder b1 = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()); + UnbufferedWritableByteChannelSession session = + b1.direct().unbuffered().setRequest(req).build(); + } + + @Test + public void syntax_directBuffered_incremental() { + GapicWritableByteChannelSessionBuilder b1 = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()); + BufferedWritableByteChannelSession session = + b1.direct().buffered().setRequest(req).build(); + } + + @Test + public void syntax_resumableUnbuffered_incremental() { + ApiFuture startAsync = + ResumableMedia.gapic() + .write() + .resumableWrite(startResumableWrite, req, Opts.empty(), RetrierWithAlg.attemptOnce()); + GapicWritableByteChannelSessionBuilder b1 = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()); + UnbufferedWritableByteChannelSession session = + b1.resumable().unbuffered().setStartAsync(startAsync).build(); + } + + @Test + public void syntax_resumableBuffered_incremental() { + ApiFuture startAsync = + ResumableMedia.gapic() + .write() + .resumableWrite(startResumableWrite, req, Opts.empty(), RetrierWithAlg.attemptOnce()); + GapicWritableByteChannelSessionBuilder b1 = + ResumableMedia.gapic() + .write() + .byteChannel(write) + .setHasher(Hasher.noop()) + .setByteStringStrategy(ByteStringStrategy.noCopy()); + BufferedWritableByteChannelSession session = + b1.resumable().buffered().setStartAsync(startAsync).build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GenerateGrpcProtobufReflectConfig.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GenerateGrpcProtobufReflectConfig.java new file mode 100644 index 000000000000..35d5bd888b70 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GenerateGrpcProtobufReflectConfig.java @@ -0,0 +1,89 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import io.github.classgraph.ClassGraph; +import io.github.classgraph.ClassInfo; +import io.github.classgraph.ScanResult; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public final class GenerateGrpcProtobufReflectConfig { + + public static void main(String[] args) throws IOException { + try (ScanResult scanResult = + new ClassGraph().enableAllInfo().acceptPackages("io.grpc").scan()) { + String json = + Stream.of( + Stream.of( + "{\n" + + " \"name\":\"org.apache.commons.logging.LogFactory\",\n" + + " \"allDeclaredFields\":true,\n" + + " \"allDeclaredMethods\":true,\n" + + " \"allDeclaredConstructors\": true\n" + + " }", + "{\n" + + " \"name\":\"org.apache.commons.logging.impl.Jdk14Logger\",\n" + + " \"methods\":[{\"name\":\"\",\"parameterTypes\":[\"java.lang.String\"]" + + " }]\n" + + " }", + "{\n" + + " \"name\":\"org.apache.commons.logging.impl.LogFactoryImpl\",\n" + + " \"allDeclaredFields\":true,\n" + + " \"allDeclaredMethods\":true,\n" + + " \"methods\":[{\"name\":\"\",\"parameterTypes\":[] }]\n" + + " }"), + Stream.of( + scanResult.getSubclasses(AbstractMessage.class).stream(), + scanResult.getSubclasses(AbstractMessage.Builder.class).stream(), + scanResult + .getAllEnums() + .filter(ci -> ci.implementsInterface(ProtocolMessageEnum.class)) + .stream()) + .flatMap(s -> s) + .map(ClassInfo::getName) + .sorted() + .map( + name -> + String.format( + Locale.US, + "{ \"name\": \"%s\", \"queryAllDeclaredConstructors\": true," + + " \"queryAllPublicConstructors\": true," + + " \"queryAllDeclaredMethods\": true, \"allPublicMethods\":" + + " true, \"allDeclaredClasses\": true, \"allPublicClasses\":" + + " true }", + name))) + .flatMap(s -> s) + .collect(Collectors.joining(",\n ", "[\n ", "\n]\n")); + String workingDirectory = System.getProperty("user.dir"); // should be google-cloud-storage + String testResourcesPath = "src/test/resources"; + String reflectConfigResourcePath = + "META-INF/native-image/com/google/cloud/storage/reflect-config.json"; + Path path = Paths.get(workingDirectory, testResourcesPath, reflectConfigResourcePath); + System.err.println("Writing reflect-config.json at path: " + path); + Files.write(path, json.getBytes(StandardCharsets.UTF_8)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslationTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslationTest.java new file mode 100644 index 000000000000..4a7136acfbc7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcToHttpStatusCodeTranslationTest.java @@ -0,0 +1,86 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.ImmutableSet; +import io.grpc.Status.Code; +import java.util.EnumMap; +import java.util.Set; +import org.junit.Test; + +public final class GrpcToHttpStatusCodeTranslationTest { + + @Test + public void grpcCodeToHttpStatusCode_expectedMapping() { + EnumMap expected = new EnumMap<>(Code.class); + expected.put(Code.OK, 200); + expected.put(Code.INVALID_ARGUMENT, 400); + expected.put(Code.OUT_OF_RANGE, 400); + expected.put(Code.UNAUTHENTICATED, 401); + expected.put(Code.PERMISSION_DENIED, 403); + expected.put(Code.NOT_FOUND, 404); + expected.put(Code.FAILED_PRECONDITION, 412); + expected.put(Code.ALREADY_EXISTS, 409); + expected.put(Code.RESOURCE_EXHAUSTED, 429); + expected.put(Code.INTERNAL, 500); + expected.put(Code.UNIMPLEMENTED, 501); + expected.put(Code.UNAVAILABLE, 503); + expected.put(Code.ABORTED, 409); + expected.put(Code.CANCELLED, 0); + expected.put(Code.UNKNOWN, 0); + expected.put(Code.DEADLINE_EXCEEDED, 504); + expected.put(Code.DATA_LOSS, 400); + + EnumMap actual = new EnumMap<>(Code.class); + for (Code c : Code.values()) { + actual.put(c, GrpcToHttpStatusCodeTranslation.grpcCodeToHttpStatusCode(c)); + } + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void resultRetryAlgorithmToCodes_idempotent() { + StorageRetryStrategy strategy = StorageRetryStrategy.getDefaultStorageRetryStrategy(); + + Set actual = + GrpcToHttpStatusCodeTranslation.resultRetryAlgorithmToCodes( + strategy.getIdempotentHandler()); + + ImmutableSet expected = + ImmutableSet.of( + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.INTERNAL, + StatusCode.Code.RESOURCE_EXHAUSTED); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void resultRetryAlgorithmToCodes_nonIdempotent() { + StorageRetryStrategy strategy = StorageRetryStrategy.getDefaultStorageRetryStrategy(); + + Set actual = + GrpcToHttpStatusCodeTranslation.resultRetryAlgorithmToCodes( + strategy.getNonidempotentHandler()); + + assertThat(actual).isEqualTo(ImmutableSet.of()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcTransformPageDecoratorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcTransformPageDecoratorTest.java new file mode 100644 index 000000000000..7221f9bab39b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcTransformPageDecoratorTest.java @@ -0,0 +1,297 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.storage.GrpcStorageImpl.TransformingPageDecorator; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Streams; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CancellationException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.Test; +import org.mockito.Mockito; + +public class GrpcTransformPageDecoratorTest { + + // define some common data used across our tests + private final ApiCallContext apiCallContext = Mockito.mock(ApiCallContext.class); + + // Initial values for the first page + private final ImmutableList page1 = ImmutableList.of("string1", "string2", "string3"); + // Request which will be appended to the second page + private final ImmutableList page2 = ImmutableList.of("string4"); + // Request which will be appended to the second page + private final ImmutableList page3 = ImmutableList.of("string5"); + + // Expected values after the translation + private final List expectedValues = + Streams.concat(page1.stream(), page2.stream(), page3.stream()) + .map(String::toUpperCase) + .collect(ImmutableList.toImmutableList()); + + private final Req req1 = new Req("req1"); + private final Resp resp1 = new Resp("req2", page1); + private final Req req2 = new Req("req2"); + private final Resp resp2 = new Resp("req3", page2); + private final Req req3 = new Req("req3"); + private final Resp resp3 = new Resp(null, page3); + private final ImmutableMap data = + ImmutableMap.of( + req1, resp1, + req2, resp2, + req3, resp3); + + @Test + public void valueTranslationTest() { + UnaryCallable callable = new DataDrivenCallable(data); + ReqRespDescriptor descriptor = new ReqRespDescriptor(); + PageContext context = + PageContext.create(callable, descriptor, req1, apiCallContext); + ReqRespPage page = new ReqRespPage(context, resp1); + TransformingPageDecorator decorator = + new TransformingPageDecorator<>( + page, + String::toUpperCase, + TestUtils.defaultRetrier(), + StorageRetryStrategy.getUniformStorageRetryStrategy().getIdempotentHandler()); + + assertThat(ImmutableList.copyOf(decorator.getValues().iterator())) + .containsExactlyElementsIn( + page1.stream().map(String::toUpperCase).collect(Collectors.toList())); + assertThat(ImmutableList.copyOf(decorator.iterateAll().iterator())) + .containsExactlyElementsIn(expectedValues); + } + + @Test + public void retryWorks() { + FailureInducingCallable callable = new FailureInducingCallable(new DataDrivenCallable(data)); + + ShouldRetryExceptionAlgorithm alg = new ShouldRetryExceptionAlgorithm(); + + ReqRespDescriptor descriptor = new ReqRespDescriptor(); + PageContext context = + PageContext.create(callable, descriptor, req1, apiCallContext); + ReqRespPage page = new ReqRespPage(context, resp1); + TransformingPageDecorator decorator = + new TransformingPageDecorator<>(page, String::toUpperCase, TestUtils.defaultRetrier(), alg); + + ImmutableList actual = ImmutableList.copyOf(decorator.iterateAll().iterator()); + assertThat(actual).containsExactlyElementsIn(expectedValues); + // sometimes shouldRetry is called multiple times, not totally sure why + assertThat(alg.shouldRetryCallCount.get()).isAtLeast(1); + // we expect to attempt the RPC twice, first attempt fails, latter calls succeed + assertThat(callable.callableCallCount.get()).isEqualTo(3); + } + + private static class ReqRespPage extends AbstractPage { + + private final Resp response; + + public ReqRespPage(PageContext context, Resp response) { + super(context, response); + this.response = response; + } + + @Override + public ReqRespPage createPage(PageContext context, Resp response) { + return new ReqRespPage(context, response); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("response", response).toString(); + } + } + + private static class ReqRespDescriptor implements PagedListDescriptor { + + @Override + public String emptyToken() { + return ""; + } + + @Override + public Req injectToken(Req payload, String token) { + return payload.withToken(token); + } + + @Override + public Req injectPageSize(Req payload, int pageSize) { + return payload.withPageSize(pageSize); + } + + @Override + public Integer extractPageSize(Req payload) { + return payload.pageSize; + } + + @Override + public String extractNextToken(Resp payload) { + return payload.nextToken != null ? payload.nextToken : emptyToken(); + } + + @Override + public Iterable extractResources(Resp payload) { + return payload.resources; + } + } + + /** + * Provide an immutable map of {@link Req} to {@link Resp}, when {@link #futureCall(Req, + * ApiCallContext)} is invoked attempt to retrieve the Resp from the provided map, if it's present + * return it, if not return an exception. + */ + private static class DataDrivenCallable extends UnaryCallable { + private final ImmutableMap data; + + private DataDrivenCallable(ImmutableMap data) { + this.data = data; + } + + @Override + public ApiFuture futureCall(Req request, ApiCallContext context) { + if (data.containsKey(request)) { + return ApiFutures.immediateFuture(data.get(request)); + } + return ApiFutures.immediateFailedFuture( + new IllegalStateException("No matching response for request: " + request)); + } + } + + private static final class Req { + private final String token; + private final int pageSize; + + Req(String token) { + this(token, 5); + } + + Req(String token, int pageSize) { + this.token = token; + this.pageSize = pageSize; + } + + Req withToken(String t) { + return new Req(t, pageSize); + } + + Req withPageSize(int i) { + return new Req(token, i); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Req)) { + return false; + } + Req req = (Req) o; + return pageSize == req.pageSize && Objects.equals(token, req.token); + } + + @Override + public int hashCode() { + return Objects.hash(token, pageSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("token", token) + .add("pageSize", pageSize) + .toString(); + } + } + + private static final class Resp { + private final String nextToken; + private final ImmutableList resources; + + public Resp(String nextToken, ImmutableList resources) { + this.resources = resources; + this.nextToken = nextToken; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("nextToken", nextToken) + .add("resources", resources) + .toString(); + } + } + + private static final class ShouldRetryException extends RuntimeException {} + + private static final class ShouldRetryExceptionAlgorithm implements ResultRetryAlgorithm { + private final AtomicInteger shouldRetryCallCount; + + public ShouldRetryExceptionAlgorithm() { + shouldRetryCallCount = new AtomicInteger(0); + } + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, Object prevResponse, TimedAttemptSettings prevSettings) { + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, Object prevResponse) + throws CancellationException { + shouldRetryCallCount.incrementAndGet(); + return TestUtils.findThrowable(ShouldRetryException.class, prevThrowable) != null; + } + } + + private static final class FailureInducingCallable extends UnaryCallable { + + private final AtomicInteger callableCallCount; + private final UnaryCallable delegate; + + FailureInducingCallable(UnaryCallable delegate) { + this.delegate = delegate; + this.callableCallCount = new AtomicInteger(0); + } + + @Override + public ApiFuture futureCall(Req request, ApiCallContext context) { + int callCount = callableCallCount.getAndIncrement(); + if (callCount == 0) { + throw new ShouldRetryException(); + } + return delegate.futureCall(request, context); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcUtilsTest.java new file mode 100644 index 000000000000..9ca5bcd1b78a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/GrpcUtilsTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.Conversions.Codec; +import java.io.IOException; +import java.math.BigInteger; +import java.util.Collections; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Test; + +public final class GrpcUtilsTest { + + @Test + public void closeAll_noNpeIfNullStream() throws IOException { + GrpcUtils.closeAll(Collections.singletonList(null)); + } + + @Test + public void projectNumberResourceCodec_simple() { + Codec<@NonNull BigInteger, @NonNull String> codec = Utils.projectNumberResourceCodec; + + String encode = codec.encode(new BigInteger("34567892123")); + assertThat(encode).isEqualTo("projects/34567892123"); + + BigInteger decode = codec.decode(encode); + assertThat(decode).isEqualTo(new BigInteger("34567892123")); + } + + @Test + public void projectNumberResourceCodec_decode_illegalArgumentException_whenUnParsable() { + String bad = "not-a-projects/123081892932"; + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, () -> Utils.projectNumberResourceCodec.decode(bad)); + + assertThat(iae).hasMessageThat().contains(bad); + } + + @Test + public void projectNumberResourceCodec_decode_nonNull() { + assertThrows(NullPointerException.class, () -> Utils.projectNumberResourceCodec.decode(null)); + } + + @Test + public void projectNumberResourceCodec_encode_nonNull() { + assertThrows(NullPointerException.class, () -> Utils.projectNumberResourceCodec.encode(null)); + } + + @Test + public void projectNumberResourceCodec_decode_notProjectNumber() { + String bad = "projects/not-a-number"; + assertThrows(NumberFormatException.class, () -> Utils.projectNumberResourceCodec.decode(bad)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/HttpContentRangeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/HttpContentRangeTest.java new file mode 100644 index 000000000000..3e4764acb6fd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/HttpContentRangeTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.HttpContentRange.parse; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import java.util.Locale; +import java.util.Locale.Category; +import org.junit.Test; + +public final class HttpContentRangeTest { + + @Test + public void localeDoesNotImpactThings() throws Exception { + // can fail on java11+ + // https://docs.oracle.com/javase/tutorial/i18n/locale/scope.html + Locale before = Locale.getDefault(Category.FORMAT); + try { + // arabic local, also RTL instead of LTR + Locale ar = Locale.forLanguageTag("ar"); + Locale.setDefault(Category.FORMAT, ar); + assertAll( + () -> assertThat(parse("bytes 0-9/9").getHeaderValue()).isEqualTo("bytes 0-9/9"), + () -> assertThat(parse("bytes 0-9/*").getHeaderValue()).isEqualTo("bytes 0-9/*"), + () -> assertThat(parse("bytes */9").getHeaderValue()).isEqualTo("bytes */9"), + () -> assertThat(parse("bytes */*").getHeaderValue()).isEqualTo("bytes */*")); + } finally { + Locale.setDefault(Category.FORMAT, before); + assertThat(Locale.getDefault(Category.FORMAT)).isEqualTo(before); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IOAutoCloseableTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IOAutoCloseableTest.java new file mode 100644 index 000000000000..f82e1da975d7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IOAutoCloseableTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertWithMessage; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public class IOAutoCloseableTest { + + @Test + public void andThenCorrectlyOrdered() throws Exception { + + AtomicInteger counter = new AtomicInteger(1); + + TestIOAutoClosable t1 = TestIOAutoClosable.of(counter); + TestIOAutoClosable t2 = TestIOAutoClosable.of(counter); + TestIOAutoClosable t3 = TestIOAutoClosable.of(counter); + + final IOAutoCloseable then = t1.andThen(t2).andThen(t3); + + then.close(); + + assertAll( + () -> assertWithMessage("t1.closeValue").that(t1.closeValue).isEqualTo(1), + () -> assertWithMessage("t2.closeValue").that(t2.closeValue).isEqualTo(2), + () -> assertWithMessage("t3.closeValue").that(t3.closeValue).isEqualTo(3)); + } + + static final class TestIOAutoClosable implements IOAutoCloseable { + private final AtomicInteger counter; + private long closeValue; + + private TestIOAutoClosable(AtomicInteger counter) { + this.counter = counter; + } + + @Override + public void close() throws IOException { + if (closeValue == 0) { + closeValue = counter.getAndIncrement(); + } + } + + private static TestIOAutoClosable of(AtomicInteger counter) { + return new TestIOAutoClosable(counter); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java new file mode 100644 index 000000000000..78d07c4d9425 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java @@ -0,0 +1,1232 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BidiUploadTestUtils.makeRedirect; +import static com.google.cloud.storage.BidiUploadTestUtils.packRedirectIntoAbortedException; +import static com.google.cloud.storage.BidiUploadTestUtils.timestampNow; +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.AbortedException; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; +import com.google.rpc.Code; +import com.google.rpc.DebugInfo; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteHandle; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.StorageGrpc; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collection; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Ignore; +import org.junit.Test; + +public class ITAppendableUploadFakeTest { + private static final byte[] ALL_OBJECT_BYTES = DataGenerator.base64Characters().genBytes(64); + + private static final Object METADATA = + Object.newBuilder() + .setBucket(BucketName.format("_", "b")) + .setName("o") + .setGeneration(1) + .setSize(_2MiB) + .build(); + private static final BidiWriteObjectRequest REQ_OPEN = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .setChecksummedData( + ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABCDE")).build()) + .build(); + + private static final BlobAppendableUploadConfig UPLOAD_CONFIG = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.maxFlushSize(3)) + .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); + + private static final ChecksummedTestContent content = + ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); + private static final ObjectChecksums checksums = + ObjectChecksums.newBuilder().setCrc32C(content.getCrc32c()).build(); + private static final BidiWriteObjectRequest flushLookup = + BidiWriteObjectRequest.newBuilder().setFlush(true).setStateLookup(true).build(); + private static final BidiWriteObjectRequest abc = incrementalRequest(0, "ABC"); + private static final BidiWriteObjectRequest def = incrementalRequest(3, "DEF"); + private static final BidiWriteObjectRequest ghi = incrementalRequest(6, "GHI"); + private static final BidiWriteObjectRequest j = incrementalRequest(9, "J"); + private static final BidiWriteObjectRequest j_flush = + j.toBuilder().mergeFrom(flushLookup).build(); + private static final BidiWriteObjectRequest j_finish = + j.toBuilder().setFinishWrite(true).setObjectChecksums(checksums).build(); + private static final BidiWriteObjectRequest finish_10 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFinishWrite(true) + .setObjectChecksums(checksums) + .build(); + + private static final BidiWriteObjectRequest open_abc = + REQ_OPEN.toBuilder().mergeFrom(abc).build(); + private static final BidiWriteObjectResponse res_abc = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(3) + .setChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(content.slice(0, 3).getCrc32c()) + .build()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + private static final BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .build()) + .setStateLookup(true) + .build(); + private static final BidiWriteObjectResponse resource_10 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(10) + .setChecksums(checksums) + .setFinalizeTime(timestampNow()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + public static final GetObjectRequest get_generation_mask = + GetObjectRequest.newBuilder() + .setObject(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setReadMask(FieldMask.newBuilder().addPaths(BlobField.GENERATION.getGrpcName()).build()) + .build(); + + private static final ChunkSegmenter smallSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); + + @Test + public void bidiWriteObjectRedirectedError_maxAttempts() throws Exception { + String routingToken1 = "routingToken1"; + String routingToken2 = "routingToken2"; + String routingToken3 = "routingToken3"; + String routingToken4 = "routingToken4"; + String routingToken5 = "routingToken5"; + + BidiWriteHandle writeHandle = + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + + BidiWriteObjectRequest redirectReconcile = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .setWriteHandle(writeHandle) + .build()) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest redirectRequest1 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken1); + BidiWriteObjectRequest redirectRequest2 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken2); + BidiWriteObjectRequest redirectRequest3 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken3); + BidiWriteObjectRequest redirectRequest4 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken4); + + AtomicInteger redirectCounter = new AtomicInteger(); + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + BidiUploadTestUtils.withFlushAndStateLookup(open_abc), + respond -> { + BidiWriteObjectResponse.Builder b = res_abc.toBuilder(); + b.setWriteHandle(writeHandle); + BidiWriteObjectResponse resAbcWithHandle = b.build(); + respond.onNext(resAbcWithHandle); + }, + BidiUploadTestUtils.withFlushAndStateLookup(def), + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken1))); + }, + redirectRequest1, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken2))); + }, + redirectRequest2, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken3))); + }, + redirectRequest3, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken4))); + }, + redirectRequest4, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken5))); + })); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings( + fakeServer.getGrpcStorageOptions().getRetrySettings().toBuilder() + .setRetryDelayMultiplier(1.0) + .setInitialRetryDelayDuration(Duration.ofMillis(10)) + .build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.maxFlushSize(3)) + .withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); + BlobAppendableUpload b = + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), config); + IOException ioe = + assertThrows( + IOException.class, + () -> { + AppendableUploadWriteableByteChannel channel = b.open(); + ByteBuffer wrap = ByteBuffer.wrap(content.getBytes()); + Buffers.emptyTo(wrap, channel); + channel.close(); + }); + + assertAll( + () -> assertThat(redirectCounter.get()).isEqualTo(4), + () -> { + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> b.getResult().get(3, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class); + }, + () -> assertThat(ioe).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(ioe).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class)); + } + } + + /** + * We use a small segmenter (3 byte segments) and flush "ABCDEFGHIJ". We make sure that this + * resolves to segments of "ABC"/"DEF"/"GHI"/"J". + */ + @Test + public void testFlushMultipleSegments() throws Exception { + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + open_abc, + respond -> {}, + def, + respond -> {}, + ghi, + respond -> {}, + j_flush, + respond -> respond.onNext(incrementalResponse(10)), + finish_10, + respond -> { + respond.onNext(resource_10); + respond.onCompleted(); + })); + + runTestFlushMultipleSegments(fake); + } + + /** + * We use a small segmenter and flush "ABCDEFGHIJ", which will resolve to "ABC"/"DEF"/"GHI"/"J". + * While flushing "GHI" we get a retryable error. We make sure that the retry loop handles + * skipping the already-ack'd messages (i.e. "ABC" and "DEF") by using a map to count how many + * times the fake server sees those messages, and throwing an error if it sees them more than + * once. + */ + @Test + public void testFlushMultipleSegments_failsHalfway() throws Exception { + Map map = new ConcurrentHashMap<>(); + Consumer> finish10Respond = + maxRetries(j_finish, resource_10, map, 1); + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + open_abc, + maxRetries(open_abc, res_abc, map, 1), + def, + maxRetries(def, map, 1), + ghi, + retryableErrorOnce(ghi, map, 2), + reconnect, + maxRetries(reconnect, incrementalResponse(6), map, 1), + j_finish, + respond -> { + finish10Respond.accept(respond); + respond.onCompleted(); + }), + ImmutableMap.of( + get_generation_mask, + Object.newBuilder().setGeneration(METADATA.getGeneration()).build())); + + runTestFlushMultipleSegments(fake); + + assertThat(map) + .isEqualTo( + ImmutableMap.of( + open_abc, 1, + def, 1, + ghi, 2, + reconnect, 1, + j_finish, 1)); + } + + /** + * We use a small segmenter and flush "ABCDEFGHIJ", which will resolve to "ABC"/"DEF"/"GHI"/"J" + * While flushing "GHI" we get a retryable error, and the response on the reconnect indicates that + * there was a partial flush (i.e. only "G" got flushed). The retry loop handles skipping the "G" + * and only sending "HI", and updating the offsets accordingly. + */ + @Test + @Ignore("messages splitting") + public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Exception { + BidiWriteHandle writeHandle = + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + + ChunkSegmenter smallSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); + + BidiWriteObjectRequest req1 = + REQ_OPEN.toBuilder() + .setChecksummedData( + ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) + .build(); + + BidiWriteObjectResponse res1 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(3) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .setWriteHandle(writeHandle) + .build(); + + BidiWriteObjectRequest req2 = incrementalRequest(3, "DEF"); + BidiWriteObjectRequest req3 = incrementalRequest(6, "GHI"); + + BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .build()) + .setFlush(true) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest req4 = incrementalRequest(7, "HI"); + + BidiWriteObjectRequest req5 = incrementalRequest(9, "J", true); + BidiWriteObjectRequest req6 = finishMessage(10); + + BidiWriteObjectResponse last = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(10) + .setFinalizeTime(timestampNow()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + Map map = new HashMap<>(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + req1, + maxRetries(req1, null, map, 1), + req2, + maxRetries(req2, null, map, 1), + req3, + retryableErrorOnce(req3, null, map, 1), + reconnect, + maxRetries(reconnect, incrementalResponse(7), map, 1), + req4, + maxRetries(req4, null, map, 1), + req5, + maxRetries(req5, incrementalResponse(10), map, 1), + req6, + maxRetries(req6, last, map, 1)), + ImmutableMap.of( + GetObjectRequest.newBuilder() + .setObject(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setReadMask( + (FieldMask.newBuilder() + .addPaths(Storage.BlobField.GENERATION.getGrpcName()) + .build())) + .build(), + Object.newBuilder().setGeneration(METADATA.getGeneration()).build())); + + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + SettableApiFuture done = SettableApiFuture.create(); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel( + new BidiUploadStreamingStream( + BidiUploadState.appendableNew( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(), + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()), + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()), + smallSegmenter, + 3, + 0); + ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(done.get(777, TimeUnit.MILLISECONDS).getResource().getSize()).isEqualTo(10); + + assertThat(map.get(req1)).isEqualTo(1); + assertThat(map.get(req2)).isEqualTo(1); + assertThat(map.get(req3)).isEqualTo(1); + assertThat(map.get(req4)).isEqualTo(1); + assertThat(map.get(req5)).isEqualTo(1); + } + } + + /** + * In this test, we use a small chunk segmenter that makes 3 byte segments, and do two flushes of + * multiple segments (one with "ABC"/"DEF"/"GHI"/"J" and one with "KLM"/"NOP"/"QRS"/T"). The first + * one flushes normally, but the second one gets a retryable error halfway through, and the result + * of that retryable error indicates that a partial flush occurred. The retry loop handles + * skipping the partially ack'd bytes. This test is just to assure that the {@code begin} variable + * in the channel works properly + */ + @Test + @Ignore("partial message eviction") + public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_partialFlush() + throws Exception { + BidiWriteHandle writeHandle = + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + + ChunkSegmenter smallSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); + + BidiWriteObjectRequest req1 = + REQ_OPEN.toBuilder() + .setChecksummedData( + ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) + .build(); + + BidiWriteObjectResponse res1 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(10) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .setWriteHandle(writeHandle) + .build(); + + BidiWriteObjectRequest req2 = incrementalRequest(10, "KLM"); + BidiWriteObjectRequest req3 = incrementalRequest(13, "NOP"); + + BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .setWriteHandle(writeHandle) + .build()) + .setFlush(true) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest req4 = incrementalRequest(14, "OP"); + + BidiWriteObjectRequest req5 = incrementalRequest(16, "QRS"); + BidiWriteObjectRequest req6 = incrementalRequest(19, "T", true); + BidiWriteObjectRequest req7 = finishMessage(20); + + BidiWriteObjectResponse last = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(20) + .setFinalizeTime(timestampNow()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + Map map = new HashMap<>(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap + .>> + builder() + .putAll( + ImmutableMap.of( + req1, + respond -> {}, + incrementalRequest(3, "DEF"), + respond -> {}, + incrementalRequest(6, "GHI"), + respond -> {}, + incrementalRequest(9, "J", true), + respond -> respond.onNext(res1), + req2, + maxRetries(req2, null, map, 1), + req3, + retryableErrorOnce(req3, null, map, 1), + reconnect, + maxRetries(reconnect, incrementalResponse(14), map, 1), + req4, + maxRetries(req4, null, map, 1), + req5, + maxRetries(req5, null, map, 1), + req6, + maxRetries(req6, incrementalResponse(20), map, 1))) + .putAll(ImmutableMap.of(req7, maxRetries(req7, last, map, 1))) + .build()); + + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + SettableApiFuture done = SettableApiFuture.create(); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel( + new BidiUploadStreamingStream( + BidiUploadState.appendableNew( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(), + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()), + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()), + smallSegmenter, + 3, + 0); + ChecksummedTestContent content1 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content1.getBytes()), channel); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content2.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(done.get(777, TimeUnit.MILLISECONDS).getResource().getSize()).isEqualTo(20); + + assertThat(map.get(reconnect)).isEqualTo(1); + assertThat(map.get(req2)).isEqualTo(1); + assertThat(map.get(req3)).isEqualTo(1); + assertThat(map.get(req4)).isEqualTo(1); + assertThat(map.get(req5)).isEqualTo(1); + assertThat(map.get(req6)).isEqualTo(1); + assertThat(map.get(req7)).isEqualTo(1); + } + } + + /** + * If we get a 200 response with a partial success halfway through a flush of multiple segments, + * the next segment after the partial success will hit a server-side error due to having a larger + * write offset than the current persisted size. We retry this error and the retry loop handles + * skipping the partially ack'd bytes + */ + @Test + /* + @Ignore("Ignore until the new implementation handles partial message consumption. \n" + + "[0:3] + [3:3] + [6:3] -> 8\n" + + "Today we only replay whole messages") + */ + @Ignore("messages splitting") + public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Exception { + BidiWriteHandle writeHandle = + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + + ChunkSegmenter smallSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); + + BidiWriteObjectRequest req1 = + REQ_OPEN.toBuilder() + .setChecksummedData( + ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) + .build(); + + BidiWriteObjectResponse res1 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(8) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .setWriteHandle(writeHandle) + .build(); + + BidiWriteObjectRequest req2 = incrementalRequest(3, "DEF"); + BidiWriteObjectRequest req3 = incrementalRequest(6, "GHI"); + + BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .setWriteHandle(writeHandle) + .build()) + .setFlush(true) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest req4 = incrementalRequest(9, "J", true); + + BidiWriteObjectRequest req5 = incrementalRequest(8, "I"); + BidiWriteObjectRequest req6 = finishMessage(10); + + BidiWriteObjectResponse last = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(10) + .setFinalizeTime(timestampNow()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + Map map = new HashMap<>(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + req1, + maxRetries(req1, null, map, 1), + req2, + maxRetries(req2, null, map, 1), + req3, + maxRetries(req3, null, map, 1), + req4, + respond -> { + map.putIfAbsent(req4, 0); + int attempts = map.get(req4) + 1; + map.put(req4, attempts); + if (attempts == 1) { + respond.onNext(res1); + } else if (attempts == 2) { + respond.onNext(incrementalResponse(10)); + } + }, + reconnect, + maxRetries(reconnect, incrementalResponse(8), map, 1), + req5, + maxRetries(req5, null, map, 1), + req6, + maxRetries(req6, last, map, 1))); + + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + BidiWriteObjectRequest initial = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(); + AppendableUploadState uploadState = + BidiUploadState.appendableNew( + initial, + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + uploadState, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel(stream, smallSegmenter, 3, 0); + ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(stream.getResultFuture().get(777, TimeUnit.MILLISECONDS).getResource().getSize()) + .isEqualTo(10); + + assertThat(map.get(req1)).isEqualTo(1); + assertThat(map.get(req2)).isEqualTo(1); + assertThat(map.get(req3)).isEqualTo(1); + assertThat(map.get(req4)).isEqualTo(2); + assertThat(map.get(req5)).isEqualTo(1); + assertThat(map.get(req6)).isEqualTo(1); + assertThat(map.get(reconnect)).isEqualTo(1); + } + } + + @Test + public void crc32cWorks() throws Exception { + byte[] b = new byte[25]; + DataGenerator.base64Characters().fill(b, 0, 20); + DataGenerator.base64Characters().fill(b, 20, 5); + ChecksummedTestContent abcde = ChecksummedTestContent.of(b, 0, 5); + ChecksummedTestContent fghij = ChecksummedTestContent.of(b, 5, 5); + ChecksummedTestContent klmno = ChecksummedTestContent.of(b, 10, 5); + ChecksummedTestContent pqrst = ChecksummedTestContent.of(b, 15, 5); + ChecksummedTestContent all = ChecksummedTestContent.of(b); + + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(0) + .setWriteObjectSpec(REQ_OPEN.getWriteObjectSpec()) + .setChecksummedData(abcde.asChecksummedData()) + .setFlush(true) + .setStateLookup(true) + .build(); + BidiWriteObjectResponse res1 = incrementalResponse(5); + + BidiWriteObjectRequest req2 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(5) + .setChecksummedData(fghij.asChecksummedData()) + .setFlush(true) + .setStateLookup(true) + .build(); + BidiWriteObjectResponse res2 = incrementalResponse(10); + BidiWriteObjectRequest req3 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setChecksummedData(klmno.asChecksummedData()) + .setFlush(true) + .setStateLookup(true) + .build(); + BidiWriteObjectResponse res3 = incrementalResponse(15); + BidiWriteObjectRequest req4 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(15) + .setChecksummedData(pqrst.asChecksummedData()) + .setFlush(true) + .setStateLookup(true) + .build(); + BidiWriteObjectResponse res4 = incrementalResponse(20); + BidiWriteObjectRequest req5 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(20) + .setChecksummedData(abcde.asChecksummedData()) + .setFlush(true) + .setStateLookup(true) + .build(); + BidiWriteObjectResponse res5 = incrementalResponse(25); + BidiWriteObjectRequest req6 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(25) + .setFinishWrite(true) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(all.getCrc32c()).build()) + .build(); + BidiWriteObjectResponse res6 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(25) + .setFinalizeTime(timestampNow()) + .setChecksums(ObjectChecksums.newBuilder().setCrc32C(all.getCrc32c()).build()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + req1, + respond -> respond.onNext(res1), + req2, + respond -> respond.onNext(res2), + req3, + respond -> respond.onNext(res3), + req4, + respond -> respond.onNext(res4), + req5, + respond -> respond.onNext(res5), + req6, + respond -> { + respond.onNext(res6); + respond.onCompleted(); + })); + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + BlobId id = BlobId.of("b", "o"); + + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.maxFlushSize(5)) + .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), config); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(b), channel); + } + ApiFuture result = upload.getResult(); + result.get(5, TimeUnit.SECONDS); + } + } + + /** + * If a stream is held open for an extended period (i.e. longer than the configured retry timeout) + * and the server returns an error, we want to make sure the currently pending request is able to + * be retried. To accomplish this, the retry context needs to reset it's attempt elapsed timer + * each time a successful response from the server is received. + * + *

This test simulates (using our {@link TestApiClock} the server pausing 60 seconds before + * delivering an ACK. After the ACK, we raise an Unavailable error, the client's retries should be + * able to handle this and pick up where it left off. + */ + @Test + public void + receivingASuccessfulMessageOnTheStreamShouldResetTheElapsedTimerForRetryBudgetCalculation() + throws Exception { + + TestApiClock testClock = TestApiClock.of(0, TestApiClock.addExact(Duration.ofSeconds(1))); + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + flush(open_abc), + respond -> respond.onNext(res_abc), + flush(def), + respond -> { + // when receiving the second message, simulate it taking one minute to process + testClock.advance(Duration.ofMinutes(1)); + // then return the incremental response before erroring with a retryable error + respond.onNext(incrementalResponse(6)); + respond.onError(TestUtils.apiException(Status.Code.UNAVAILABLE, "Unavailable")); + }, + reconnect, + respond -> { + BidiWriteObjectResponse.Builder b = res_abc.toBuilder(); + b.getResourceBuilder() + .setSize(6) + .setChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(content.slice(0, 6).getCrc32c()) + .build()); + respond.onNext(b.build()); + }, + flush(ghi), + respond -> respond.onNext(incrementalResponse(9)), + j_finish, + respond -> respond.onNext(resource_10))); + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setClock(testClock) + .build() + .getService()) { + BlobId id = BlobId.of("b", "o"); + + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + StorageChannelUtils.blockingEmptyTo(content.asByteBuffer(), channel); + } + ApiFuture result = upload.getResult(); + result.get(5, TimeUnit.SECONDS); + } + } + + private static Consumer> maxRetries( + @NonNull BidiWriteObjectRequest req, + Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return maxRetries(req, null, retryMap, maxAttempts); + } + + private static Consumer> maxRetries( + @NonNull BidiWriteObjectRequest req, + @Nullable BidiWriteObjectResponse res, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return respond -> { + int attempts = retryMap.compute(req, (r, count) -> count == null ? 1 : count + 1); + if (attempts > maxAttempts) { + DebugInfo details = + DebugInfo.newBuilder().setDetail(TextFormat.printer().shortDebugString(req)).build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.ABORTED_VALUE) + .setMessage("details") + .addDetails(Any.pack(details)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException t = + Status.ABORTED + .withDescription( + String.format( + Locale.US, + "request received %d times, but only allowed %d times", + attempts, + maxAttempts)) + .asRuntimeException(trailers); + respond.onError(t); + } else { + if (res != null) { + respond.onNext(res); + } + } + }; + } + + private static Consumer> retryableErrorOnce( + @NonNull BidiWriteObjectRequest req, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return retryableErrorOnce(req, null, retryMap, maxAttempts); + } + + private static Consumer> retryableErrorOnce( + @NonNull BidiWriteObjectRequest req, + @Nullable BidiWriteObjectResponse res, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return respond -> { + int attempts = retryMap.compute(req, (r, count) -> count == null ? 1 : count + 1); + if (attempts == 1) { + respond.onError(Status.INTERNAL.asRuntimeException()); + } else if (attempts > maxAttempts) { + respond.onError( + Status.ABORTED + .withDescription("retryableErrorOnce method exceeded max retries in fake") + .asRuntimeException()); + } else { + if (res != null) { + respond.onNext(res); + } + } + }; + } + + private static BidiWriteObjectRequest incrementalRequest( + long offset, String content, boolean flush) { + BidiWriteObjectRequest.Builder builder = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(offset) + .setChecksummedData(ChecksummedTestContent.of(content).asChecksummedData()); + + if (flush) { + builder.setFlush(true).setStateLookup(true); + } + return builder.build(); + } + + private static BidiWriteObjectRequest incrementalRequest(long offset, String content) { + return incrementalRequest(offset, content, false); + } + + private static BidiWriteObjectResponse incrementalResponse(long perSize) { + return BidiWriteObjectResponse.newBuilder().setPersistedSize(perSize).build(); + } + + private static BidiWriteObjectRequest finishMessage(long offset) { + return BidiWriteObjectRequest.newBuilder().setWriteOffset(offset).setFinishWrite(true).build(); + } + + private static BidiWriteObjectRequest flush(BidiWriteObjectRequest req) { + return req.toBuilder().setStateLookup(true).setFlush(true).build(); + } + + private static void runTestFlushMultipleSegments(FakeStorage fake) throws Exception { + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + + BidiWriteObjectRequest initialRequest = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(); + AppendableUploadState state = + BidiUploadState.appendableNew( + initialRequest, + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel(stream, smallSegmenter, 32, 0); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + BidiWriteObjectResponse response = stream.getResultFuture().get(777, TimeUnit.MILLISECONDS); + assertThat(response.getResource().getSize()).isEqualTo(10); + assertThat(response.getResource().getChecksums().getCrc32C()).isEqualTo(content.getCrc32c()); + } + } + + static final class FakeStorage extends StorageGrpc.StorageImplBase { + + private final Map>> db; + private final Map getdb; + + private FakeStorage( + Map>> db) { + this(db, ImmutableMap.of()); + } + + private FakeStorage( + Map>> db, + Map getdb) { + this.db = db; + this.getdb = getdb; + } + + @Override + public void getObject(GetObjectRequest request, StreamObserver responseObserver) { + if (getdb.containsKey(request)) { + Object resp = getdb.get(request); + if (resp.getGeneration() == 0) { + responseObserver.onError(TestUtils.apiException(Status.Code.NOT_FOUND, "not found")); + } else { + responseObserver.onNext(getdb.get(request)); + responseObserver.onCompleted(); + } + } else { + responseObserver.onError(unexpectedRequest(request, getdb.keySet())); + } + } + + @Override + public StreamObserver bidiWriteObject( + StreamObserver respond) { + return new AbstractObserver(respond) { + @Override + public void onNext(BidiWriteObjectRequest req) { + if (db.containsKey(req)) { + db.get(req).accept(respond); + } else { + respond.onError(unexpectedRequest(req, db.keySet())); + } + } + }; + } + + static @NonNull StatusRuntimeException unexpectedRequest( + Message req, Collection messages) { + DebugInfo details = + DebugInfo.newBuilder().setDetail(TextFormat.printer().shortDebugString(req)).build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.UNIMPLEMENTED_VALUE) + .setMessage("details") + .addDetails(Any.pack(details)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StringBuilder sb = new StringBuilder(); + sb.append("Unexpected request.").append("\n"); + sb.append(" actual: ").append("\n ").append(fmtProto(req)).append("\n"); + sb.append(" expected one of: "); + sb.append( + messages.stream() + .map(StorageV2ProtoUtils::fmtProto) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]"))); + return Status.UNIMPLEMENTED.withDescription(sb.toString()).asRuntimeException(trailers); + } + + static FakeStorage of( + Map>> db) { + return new FakeStorage(db); + } + + static FakeStorage of( + Map>> db, + Map getdb) { + return new FakeStorage(db, getdb); + } + } + + abstract static class AbstractObserver implements StreamObserver { + + protected final StreamObserver respond; + + private AbstractObserver(StreamObserver respond) { + this.respond = respond; + } + + @Override + public void onError(Throwable t) { + respond.onError(t); + } + + @Override + public void onCompleted() { + respond.onCompleted(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java new file mode 100644 index 000000000000..9e3ae209fbbe --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java @@ -0,0 +1,344 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.FlushPolicy.MaxFlushSizeFlushPolicy; +import com.google.cloud.storage.FlushPolicy.MinFlushSizeFlushPolicy; +import com.google.cloud.storage.ITAppendableUploadTest.UploadConfigParameters; +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Paths; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = Transport.GRPC) +@Parameterized(UploadConfigParameters.class) +public final class ITAppendableUploadTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.RAPID) + public BucketInfo bucket; + + @Inject public Backend backend; + + @Parameter public Param p; + + @Test + public void appendableUpload_emptyObject() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + assumeTrue( + "only run once", + p.content.length() == UploadConfigParameters.objectSizes.get(0) + && p.uploadConfig.getCloseAction() == UploadConfigParameters.closeActions.get(0) + && p.uploadConfig.getFlushPolicy().equals(UploadConfigParameters.flushPolicies.get(0))); + + BlobAppendableUpload upload = + storage.blobAppendableUpload( + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); + + upload.open().close(); + + BlobInfo actual = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(actual.getSize()).isEqualTo(0); + assertThat(actual.getCrc32c()) + .isEqualTo(Utils.crc32cCodec.encode(Crc32cValue.zero().getValue())); + + assumeFalse( + "Testbench doesn't handle {read_id: 1, read_offset: 0} for a 0 byte object", + backend == Backend.TEST_BENCH); + byte[] actualBytes = readAllBytes(actual); + assertThat(xxd(actualBytes)).isEqualTo(xxd(new byte[0])); + } + + @Test + public void appendableUpload_bytes() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + checkTestbenchIssue733(); + + BlobAppendableUpload upload = + storage.blobAppendableUpload( + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); + + // cut out the middle + 1 byte + int length = p.content.length(); + int mid = length / 2; + ChecksummedTestContent a1 = p.content.slice(0, mid); + ChecksummedTestContent a2 = p.content.slice(mid + 1, length - mid - 1); + ChecksummedTestContent a1_a2 = a1.concat(a2); + Crc32cLengthKnown c1_c2 = Crc32cValue.of(a1_a2.getCrc32c(), a1_a2.length()); + + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + int written1 = Buffers.emptyTo(ByteBuffer.wrap(a1.getBytes()), channel); + assertThat(written1).isEqualTo(a1.length()); + int written2 = Buffers.emptyTo(ByteBuffer.wrap(a2.getBytes()), channel); + assertThat(written2).isEqualTo(a2.length()); + } + + BlobInfo actual = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(actual.getSize()).isEqualTo(c1_c2.getLength()); + assertThat(actual.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(c1_c2.getValue())); + + byte[] actualBytes = readAllBytes(actual); + assertThat(xxd(actualBytes)).isEqualTo(xxd(a1_a2.getBytes())); + } + + @Test + public void explicitFlush() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + checkTestbenchIssue733(); + + BlobAppendableUpload upload = + storage.blobAppendableUpload( + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); + + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + ByteBuffer src = p.content.asByteBuffer(); + ByteBuffer zed = src.slice(); + zed.limit(zed.position() + 1); + src.position(src.position() + 1); + + int written = channel.write(zed); + assertThat(written).isEqualTo(1); + channel.flush(); + + written = StorageChannelUtils.blockingEmptyTo(src, channel); + assertThat(written).isEqualTo(p.content.length() - 1); + } + + BlobInfo gen1 = upload.getResult().get(3, TimeUnit.SECONDS); + assertThat(gen1.getSize()).isEqualTo(p.content.length()); + assertThat(gen1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(p.content.getCrc32c())); + } + + @Test + // Pending work in testbench: https://github.com/googleapis/storage-testbench/issues/723 + // manually verified internally on 2025-03-25 + @CrossRun.Ignore(backends = {Backend.TEST_BENCH}) + public void appendableBlobUploadTakeover() throws Exception { + + List chunks = p.content.chunkup((p.content.length() / 2) + 1); + assertThat(chunks).hasSize(2); + + ChecksummedTestContent c1 = chunks.get(0); + ChecksummedTestContent c2 = chunks.get(1); + + BlobId id = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + BlobAppendableUploadConfig doNotFinalizeConfig = + p.uploadConfig.withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); + + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), doNotFinalizeConfig); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + int written = Buffers.emptyTo(ByteBuffer.wrap(c1.getBytes()), channel); + assertThat(written).isEqualTo(c1.length()); + } + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(c1.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(c1.getCrc32c())); + + BlobAppendableUpload takeOver = + storage.blobAppendableUpload( + BlobInfo.newBuilder(done1.getBlobId()).build(), p.uploadConfig); + try (AppendableUploadWriteableByteChannel channel = takeOver.open()) { + int written = Buffers.emptyTo(ByteBuffer.wrap(c2.getBytes()), channel); + assertThat(written).isEqualTo(c2.length()); + } + BlobInfo done2 = takeOver.getResult().get(5, TimeUnit.SECONDS); + + assertThat(done2.getSize()).isEqualTo(p.content.length()); + assertThat(done2.getCrc32c()).isAnyOf(Utils.crc32cCodec.encode(p.content.getCrc32c()), null); + } + + @Test + public void testUploadFileUsingAppendable() throws Exception { + checkTestbenchIssue733(); + + String objectName = UUID.randomUUID().toString(); + String fileName = + ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy.noPrefix() + .fmtName(objectName, PartRange.of(1)); + BlobId bid = BlobId.of(bucket.getName(), objectName); + int fileSize = p.content.length(); + try (TmpFile tmpFile = + TmpFile.of(Paths.get(System.getProperty("java.io.tmpdir")), fileName + ".", ".bin")) { + try (SeekableByteChannel w = tmpFile.writer()) { + int written = Buffers.emptyTo(ByteBuffer.wrap(p.content.getBytes()), w); + assertThat(written).isEqualTo(p.content.length()); + } + + BlobAppendableUpload appendable = + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), p.uploadConfig); + try (SeekableByteChannel r = tmpFile.reader(); + AppendableUploadWriteableByteChannel w = appendable.open()) { + long copied = ByteStreams.copy(r, w); + assertThat(copied).isEqualTo(fileSize); + } + BlobInfo bi = appendable.getResult().get(5, TimeUnit.SECONDS); + assertThat(bi.getSize()).isEqualTo(fileSize); + } + } + + @Test + // Pending work in testbench: https://github.com/googleapis/storage-testbench/issues/723 + // manually verified internally on 2025-03-25 + @CrossRun.Ignore(backends = {Backend.TEST_BENCH}) + public void takeoverJustToFinalizeWorks() throws Exception { + BlobId bid = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + assumeTrue( + "manually finalizing", + p.uploadConfig.getCloseAction() != CloseAction.FINALIZE_WHEN_CLOSING); + + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), p.uploadConfig); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + int written = Buffers.emptyTo(ByteBuffer.wrap(p.content.getBytes()), channel); + assertThat(written).isEqualTo(p.content.length()); + } + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(p.content.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(p.content.getCrc32c())); + + BlobAppendableUpload takeOver = + storage.blobAppendableUpload( + BlobInfo.newBuilder(done1.getBlobId()).build(), p.uploadConfig); + takeOver.open().finalizeAndClose(); + + BlobInfo done2 = takeOver.getResult().get(5, TimeUnit.SECONDS); + assertAll( + () -> assertThat(done2).isNotNull(), + () -> assertThat(done2.getSize()).isEqualTo(p.content.length()), + () -> assertThat(done2.getCrc32c()).isNotNull()); + } + + private void checkTestbenchIssue733() { + if (backend == Backend.TEST_BENCH + && p.uploadConfig.getCloseAction() == CloseAction.FINALIZE_WHEN_CLOSING) { + int estimatedMessageCount = 0; + FlushPolicy flushPolicy = p.uploadConfig.getFlushPolicy(); + if (flushPolicy instanceof MinFlushSizeFlushPolicy) { + MinFlushSizeFlushPolicy min = (MinFlushSizeFlushPolicy) flushPolicy; + estimatedMessageCount = p.content.length() / min.getMinFlushSize(); + } else if (flushPolicy instanceof MaxFlushSizeFlushPolicy) { + MaxFlushSizeFlushPolicy max = (MaxFlushSizeFlushPolicy) flushPolicy; + estimatedMessageCount = p.content.length() / max.getMaxFlushSize(); + } + // if our int division results in a partial message, ensure we are counting at least one + // message. We have a separate test specifically for empty objects. + estimatedMessageCount = Math.max(estimatedMessageCount, 1); + assumeTrue( + "testbench broken https://github.com/googleapis/storage-testbench/issues/733", + estimatedMessageCount > 1); + } + } + + private byte[] readAllBytes(BlobInfo actual) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + ApiFuture blobReadSessionFuture = storage.blobReadSession(actual.getBlobId()); + try (BlobReadSession read = blobReadSessionFuture.get(2_372, TimeUnit.MILLISECONDS)) { + ApiFuture futureBytes = read.readAs(ReadProjectionConfigs.asFutureBytes()); + return futureBytes.get(2_273, TimeUnit.MILLISECONDS); + } + } + + public static final class UploadConfigParameters implements ParametersProvider { + + private static final ImmutableList flushPolicies = + ImmutableList.of( + FlushPolicy.minFlushSize(1_000), + FlushPolicy.minFlushSize(1_000).withMaxPendingBytes(5_000), + FlushPolicy.maxFlushSize(500_000), + FlushPolicy.minFlushSize(), + FlushPolicy.maxFlushSize()); + private static final ImmutableList closeActions = + ImmutableList.copyOf(CloseAction.values()); + public static final ImmutableList objectSizes = + ImmutableList.of(5, 500, 5_000, 500_000, 5_000_000); + + @Override + public ImmutableList parameters() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (FlushPolicy fp : flushPolicies) { + for (CloseAction ca : closeActions) { + for (int size : objectSizes) { + Param param = + new Param( + ChecksummedTestContent.gen(size), + BlobAppendableUploadConfig.of().withFlushPolicy(fp).withCloseAction(ca)); + builder.add(param); + } + } + } + return builder.build(); + } + } + + public static final class Param { + private final ChecksummedTestContent content; + private final BlobAppendableUploadConfig uploadConfig; + + private Param(ChecksummedTestContent content, BlobAppendableUploadConfig uploadConfig) { + this.content = content; + this.uploadConfig = uploadConfig; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("byteCount", content) + .add("uploadConfig", uploadConfig) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..8742482733dd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = Transport.GRPC) +public final class ITBidiAppendableUnbufferedWritableByteChannelTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.RAPID) + public BucketInfo bucket; + + @Inject public Backend backend; + + @Test + public void nonBufferAlignedWritesLeaveBuffersInTheCorrectState() throws Exception { + BlobId bid = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.minFlushSize(8 * 1024).withMaxPendingBytes(16 * 1024)) + .withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); + ChecksummedTestContent ctc = ChecksummedTestContent.gen(16 * 1024 + 5); + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), config); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + // enqueue 4 bytes, this makes it so the following 8K writes don't evenly fit + checkedEmptyTo(ctc.slice(0, 4).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4, 8192).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4 + 8192, 8192).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4 + 8192 + 8192, 1).asByteBuffer(), channel); + } + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(ctc.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(ctc.getCrc32c())); + } + + private static int checkedEmptyTo(ByteBuffer buf, WritableByteChannel c) throws Exception { + int remaining = buf.remaining(); + int position = buf.position(); + int remaining1 = buf.remaining(); + int written = StorageChannelUtils.blockingEmptyTo(buf, c); + assertAll( + () -> assertThat(written).isEqualTo(position + remaining1), + () -> assertThat(buf.position()).isEqualTo(position + written), + () -> assertThat(buf.remaining()).isEqualTo(remaining1 - written)); + assertThat(written).isEqualTo(remaining); + return written; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITExtraHeadersOptionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITExtraHeadersOptionTest.java new file mode 100644 index 000000000000..05e61e2be6d3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITExtraHeadersOptionTest.java @@ -0,0 +1,379 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.api.gax.paging.Page; +import com.google.cloud.ReadChannel; +import com.google.cloud.ServiceOptions; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.AssertRequestHeaders; +import com.google.cloud.storage.it.AssertRequestHeaders.FilteringPolicy; +import com.google.cloud.storage.it.GrpcRequestAuditing; +import com.google.cloud.storage.it.RequestAuditing; +import com.google.cloud.storage.it.TemporaryBucket; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.ByteStreams; +import com.google.common.truth.IterableSubject; +import io.grpc.ClientInterceptor; +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.time.Clock; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITExtraHeadersOptionTest { + + private static final String HEADER_NAME = "x-my-header"; + private static final String HEADER_VALUE = "value"; + private static final ImmutableMap EXTRA_HEADERS = + ImmutableMap.of(HEADER_NAME, HEADER_VALUE); + @Inject public BucketInfo bucket; + @Inject public Generator generator; + @Inject public Storage baseStorage; + @Inject public Transport transport; + + private Storage storage; + private AssertRequestHeaders headers; + + @Before + public void setUp() throws Exception { + switch (transport) { + case HTTP: + RequestAuditing requestAuditing = new RequestAuditing(); + headers = requestAuditing; + storage = + ((HttpStorageOptions) baseStorage.getOptions()) + .toBuilder() + .setTransportOptions(requestAuditing) + // we're counting requests, disable retries so that if a request fails it won't + // show up as a bad assertion of the test itself + .setRetrySettings(ServiceOptions.getNoRetrySettings()) + .build() + .getService(); + break; + case GRPC: + GrpcRequestAuditing grpcRequestAuditing = new GrpcRequestAuditing(); + headers = grpcRequestAuditing; + GrpcStorageOptions grpcStorageOptions = (GrpcStorageOptions) baseStorage.getOptions(); + GrpcInterceptorProvider grpcInterceptorProvider = + grpcStorageOptions.getGrpcInterceptorProvider(); + storage = + grpcStorageOptions.toBuilder() + // we're counting requests, disable retries so that if a request fails it won't + // show up as a bad assertion of the test itself + .setRetrySettings(ServiceOptions.getNoRetrySettings()) + .setGrpcInterceptorProvider( + () -> { + List interceptors = + grpcInterceptorProvider.getInterceptors(); + return ImmutableList.builder() + .addAll(interceptors) + .add(grpcRequestAuditing) + .build(); + }) + .build() + .getService(); + break; + } + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @Test + public void simpleUnary() throws Exception { + Bucket gen1 = storage.get(bucket.getName(), BucketGetOption.extraHeaders(EXTRA_HEADERS)); + + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + assertAll(() -> subject.containsExactly(HEADER_VALUE)); + } + + @Test + public void pageObjects() throws Exception { + String baseName = generator.randomObjectName(); + Blob blob1 = storage.create(BlobInfo.newBuilder(bucket, baseName + "1").build()); + Blob blob2 = storage.create(BlobInfo.newBuilder(bucket, baseName + "2").build()); + + headers.clear(); + ImmutableList expectedNames = ImmutableList.of(blob1.getName(), blob2.getName()); + Page page = + storage.list( + bucket.getName(), + BlobListOption.prefix(baseName), + BlobListOption.pageSize(1), + BlobListOption.extraHeaders(EXTRA_HEADERS)); + + List collect = page.streamAll().map(BlobInfo::getName).collect(Collectors.toList()); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + assertAll( + () -> assertThat(collect).hasSize(2), + () -> assertThat(collect).containsExactlyElementsIn(expectedNames), + () -> subject.containsExactly(HEADER_VALUE, HEADER_VALUE)); + } + + @Test + public void pageBucket() throws Exception { + String baseName = generator.randomBucketName(); + BucketInfo info1 = BucketInfo.of(baseName + "1"); + BucketInfo info2 = BucketInfo.of(baseName + "2"); + try (TemporaryBucket tmp1 = + TemporaryBucket.newBuilder().setBucketInfo(info1).setStorage(storage).build(); + TemporaryBucket tmp2 = + TemporaryBucket.newBuilder().setBucketInfo(info2).setStorage(storage).build()) { + headers.clear(); + Page page = + storage.list( + BucketListOption.prefix(baseName), + BucketListOption.pageSize(1), + BucketListOption.extraHeaders(EXTRA_HEADERS)); + + List collect = page.streamAll().map(BucketInfo::getName).collect(Collectors.toList()); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + assertAll( + () -> assertThat(collect).containsExactly(info1.getName(), info2.getName()), + () -> subject.containsExactly(HEADER_VALUE, HEADER_VALUE)); + } + } + + @Test + public void readAllBytes() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + Blob gen1 = + storage.create( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + expected, + BlobTargetOption.doesNotExist()); + + headers.clear(); + byte[] actual = + storage.readAllBytes( + gen1.getBlobId(), + BlobSourceOption.generationMatch(), + BlobSourceOption.extraHeaders(EXTRA_HEADERS)); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + String actualXxd = xxd(actual); + + assertAll( + () -> subject.containsExactly(HEADER_VALUE), + () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void reader() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + Blob gen1 = + storage.create( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + expected, + BlobTargetOption.doesNotExist()); + + headers.clear(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = + storage.reader( + gen1.getBlobId(), + BlobSourceOption.generationMatch(), + BlobSourceOption.extraHeaders(EXTRA_HEADERS))) { + ByteStreams.copy(reader, Channels.newChannel(baos)); + } + byte[] actual = baos.toByteArray(); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + String actualXxd = xxd(actual); + + assertAll( + () -> subject.containsExactly(HEADER_VALUE), + () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void directUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + headers.clear(); + Blob gen1 = + storage.create( + info, + expected, + BlobTargetOption.doesNotExist(), + BlobTargetOption.extraHeaders(EXTRA_HEADERS)); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + + byte[] actual = storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.generationMatch()); + String actualXxd = xxd(actual); + + assertAll( + () -> subject.containsExactly(HEADER_VALUE), + () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void resumableUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (WriteChannel writer = + storage.writer( + info, BlobWriteOption.doesNotExist(), BlobWriteOption.extraHeaders(EXTRA_HEADERS))) { + writer.setChunkSize(256 * 1024); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 0, 256 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 256 * 1024, 512 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 512 * 1024, expected.length))); + } + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + + byte[] actual = storage.readAllBytes(info.getBlobId()); + String actualXxd = xxd(actual); + assertAll( + () -> subject.containsExactly(HEADER_VALUE, HEADER_VALUE, HEADER_VALUE, HEADER_VALUE), + () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void batch() throws Exception { + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info2 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info3 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + storage.create(info1, BlobTargetOption.doesNotExist()); + storage.create(info2, BlobTargetOption.doesNotExist()); + storage.create(info3, BlobTargetOption.doesNotExist()); + + headers.clear(); + OffsetDateTime now = Clock.systemUTC().instant().atOffset(ZoneOffset.UTC); + + StorageBatch batch = storage.batch(); + String batchHeaderKey = "x-batch-key"; + StorageBatchResult r1 = + batch.get( + info1.getBlobId(), BlobGetOption.extraHeaders(ImmutableMap.of(batchHeaderKey, "v1"))); + StorageBatchResult r2 = + batch.update( + info2.toBuilder().setCustomTimeOffsetDateTime(now).build(), + BlobTargetOption.extraHeaders(ImmutableMap.of(batchHeaderKey, "v2"))); + StorageBatchResult r3 = + batch.delete( + info3.getBlobId(), + BlobSourceOption.extraHeaders(ImmutableMap.of(batchHeaderKey, "v3"))); + + batch.submit(); + assertAll( + () -> assertThat(r1).isNotNull(), + () -> + assertThat(r2.get().getCustomTimeOffsetDateTime().truncatedTo(ChronoUnit.MILLIS)) + .isEqualTo(now.truncatedTo(ChronoUnit.MILLIS)), + () -> assertThat(r3.get()).isTrue(), + () -> { + IterableSubject subject = headers.assertRequestHeader(batchHeaderKey); + subject.containsExactly("v1", "v2", "v3"); + }); + } + + @Test + public void rewrite() throws Exception { + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info2 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo gen1 = storage.create(info1, BlobTargetOption.doesNotExist()); + + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(gen1.getBlobId()) + .setSourceOptions( + BlobSourceOption.extraHeaders(ImmutableMap.of("x-header-source", HEADER_VALUE))) + .setTarget( + info2, + BlobTargetOption.doesNotExist(), + BlobTargetOption.extraHeaders(ImmutableMap.of("x-header-target", HEADER_VALUE))) + .build(); + headers.clear(); + CopyWriter copyWriter = storage.copy(copyRequest); + copyWriter.getResult(); + assertAll( + () -> { + IterableSubject subject = headers.assertRequestHeader("x-header-source"); + subject.containsExactly(HEADER_VALUE); + }, + () -> { + IterableSubject subject = headers.assertRequestHeader("x-header-target"); + subject.containsExactly(HEADER_VALUE); + }); + } + + @Test + public void compose() throws Exception { + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info2 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo gen1 = storage.create(info1, BlobTargetOption.doesNotExist()); + + ComposeRequest composeRequest = + ComposeRequest.newBuilder() + .addSource(gen1.getBlobId().getName()) + .setTarget(info2) + .setTargetOptions( + BlobTargetOption.doesNotExist(), BlobTargetOption.extraHeaders(EXTRA_HEADERS)) + .build(); + headers.clear(); + Blob blob2 = storage.compose(composeRequest); + assertThat(blob2).isNotNull(); + IterableSubject subject = headers.assertRequestHeader(HEADER_NAME, FilteringPolicy.NO_FILTER); + subject.containsExactly(HEADER_VALUE); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicBidiUnbufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicBidiUnbufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..475ba6b35c9f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicBidiUnbufferedWritableByteChannelTest.java @@ -0,0 +1,932 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.ByteSizeConstants._768KiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import io.grpc.Status.Code; +import io.grpc.stub.CallStreamObserver; +import io.grpc.stub.StreamObserver; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class ITGapicBidiUnbufferedWritableByteChannelTest { + + private static final ChunkSegmenter CHUNK_SEGMENTER = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), _256KiB, _256KiB); + + /** + * + * + *

S.1

+ * + * Attempting to append to a session which has already been finalized should raise an error + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = { name = obj, size = 524288 }
+   *     
client state
+   * write_offset = 0, data = [0:262144]
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset= 0, checksummed_data.content.length = 262144 }
+   *     
response
+   * onNext(BidiWriteObjectResponse{ resources = {name = obj, size = 525288 } })
+   *     
+ */ + @Test + public void scenario1() throws Exception { + + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .setStateLookup(true) + .setFlush(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("obj").setSize(_512KiB).build()) + .build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + SettableApiFuture done = SettableApiFuture.create(); + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + ByteBuffer bb = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(bb)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("invalid"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.2

+ * + * Attempting to finalize a session with fewer bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 524288
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, finish_write = true}
+   *     
response
+   * onNext(BidiWriteObjectResponse{ persisted_size = 525288 })
+   *     
+ */ + @Test + public void scenario2() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setFinishWrite(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder().setPersistedSize(_512KiB).build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("invalid"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.3

+ * + * Attempting to finalize a session with more bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 262144
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(BidiWriteObjectResponse{ persisted_size = 262144 })
+   *     
+ */ + @Test + public void scenario3() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setFinishWrite(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4

+ * + * Attempting to finalize an already finalized session + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, finish_write = true}
+   *     
response
+   * onNext(BidiWriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setFinishWrite(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + channel.close(); + + BidiWriteObjectResponse BidiWriteObjectResponse = done.get(2, TimeUnit.SECONDS); + assertThat(BidiWriteObjectResponse).isEqualTo(resp1); + } + } + + /** + * + * + *

S.4.1

+ * + * Attempting to finalize an already finalized session (ack < expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(BidiWriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4_1() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setFinishWrite(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4.2

+ * + * Attempting to finalize an already finalized session (ack > expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 786432}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(BidiWriteObjectResponse{ resources = {name = obj, size = 786432 } })
+   *     
+ */ + @Test + public void scenario4_2() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setFinishWrite(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_768KiB).build()) + .build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.5

+ * + * Attempt to append to a resumable session with an offset higher than GCS expects + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 0
+   *     
client state
+   * write_offset = 262144, data = [262144:524288]
+   *     
request
+   * BidiWriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, checksummed_data.content.length = 262144}
+   *     
response
+   * onError(Status{code=OUT_OF_RANGE, description="Upload request started at offset '262144', which is past expected offset '0'."})
+   *     
+ */ + @Test + public void scenario5() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB)))) + .setStateLookup(true) + .setFlush(true) + .build(); + StorageImplBase service1 = + new BidiWriteService( + (obs, requests) -> { + if (requests.equals(ImmutableList.of(req1))) { + obs.onError( + TestUtils.apiException( + Code.OUT_OF_RANGE, + "Upload request started at offset '262144', which is past expected offset" + + " '0'.")); + } else { + obs.onError( + TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); + } + }); + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + ByteBuffer bb = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(bb)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.7

+ * + * GCS Acknowledges more bytes than were sent in the PUT + * + *

The client believes the server offset is N, it sends K bytes and the server responds that N + * + 2K bytes are now committed. + * + *

The client has detected data loss and should raise an error and prevent sending of more + * bytes. + */ + @Test + public void scenario7() throws Exception { + + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .setStateLookup(true) + .setFlush(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder().setPersistedSize(_512KiB).build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(buf)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + @Test + public void incremental_success() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .setStateLookup(true) + .setFlush(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + CHUNK_SEGMENTER, + writeCtx, + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_256KiB); + int written = channel.write(buf); + assertAll( + () -> assertThat(buf.remaining()).isEqualTo(0), + () -> assertThat(written).isEqualTo(_256KiB), + () -> assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(_256KiB), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB)); + } + } + + @Test + public void incremental_partialSuccess() throws Exception { + String uploadId = "uploadId"; + BidiWriteObjectRequest req1 = + BidiWriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_512KiB))) + .build()) + .setStateLookup(true) + .setFlush(true) + .build(); + BidiWriteObjectResponse resp1 = + BidiWriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, BidiWriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + BidiWriteService service1 = new BidiWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + BidiResumableWrite resumableWrite = getResumableWrite(uploadId); + BidiWriteCtx writeCtx = new BidiWriteCtx<>(resumableWrite); + + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), _512KiB, _256KiB); + //noinspection resource + GapicBidiUnbufferedWritableByteChannel channel = + new GapicBidiUnbufferedWritableByteChannel( + storageClient.bidiWriteObjectCallable(), + RetrierWithAlg.attemptOnce(), + done, + chunkSegmenter, + writeCtx, + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_512KiB); + int written = channel.write(buf); + assertAll( + () -> assertThat(buf.remaining()).isEqualTo(_256KiB), + () -> assertThat(written).isEqualTo(_256KiB), + () -> + assertWithMessage("totalSentBytes") + .that(writeCtx.getTotalSentBytes().get()) + .isEqualTo(_256KiB), + () -> + assertWithMessage("confirmedBytes") + .that(writeCtx.getConfirmedBytes().get()) + .isEqualTo(_256KiB)); + } + } + + private static @NonNull BidiResumableWrite getResumableWrite(String uploadId) { + StartResumableWriteRequest req = StartResumableWriteRequest.getDefaultInstance(); + StartResumableWriteResponse resp = + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); + return new BidiResumableWrite( + req, resp, id -> BidiWriteObjectRequest.newBuilder().setUploadId(id).build()); + } + + static class BidiWriteService extends StorageImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(BidiWriteService.class); + private final BiConsumer, List> + c; + + private ImmutableList.Builder requests; + + BidiWriteService( + BiConsumer, List> c) { + this.c = c; + this.requests = new ImmutableList.Builder<>(); + } + + BidiWriteService(ImmutableMap, BidiWriteObjectResponse> writes) { + this( + (obs, build) -> { + if (writes.containsKey(build)) { + obs.onNext(writes.get(build)); + last(build) + .filter(BidiWriteObjectRequest::getFinishWrite) + .ifPresent(ignore -> obs.onCompleted()); + } else { + logUnexpectedRequest(writes.keySet(), build); + obs.onError( + TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); + } + }); + } + + private static Optional last(List l) { + if (l.isEmpty()) { + return Optional.empty(); + } else { + return Optional.of(l.get(l.size() - 1)); + } + } + + private static void logUnexpectedRequest( + Set> writes, List build) { + Collector joining = Collectors.joining(",\n\t", "[\n\t", "\n]"); + Collector oneLine = Collectors.joining(",", "[", "]"); + String msg = + String.format( + Locale.US, + "Unexpected Request Chain.%nexpected one of: %s%n but was: %s", + writes.stream() + .map(l -> l.stream().map(StorageV2ProtoUtils::fmtProto).collect(oneLine)) + .collect(joining), + build.stream().map(StorageV2ProtoUtils::fmtProto).collect(oneLine)); + LOGGER.warn(msg); + } + + @Override + public StreamObserver bidiWriteObject( + StreamObserver obs) { + return new Adapter() { + @Override + public void onNext(BidiWriteObjectRequest value) { + requests.add(value); + if ((value.getFlush() && value.getStateLookup()) || value.getFinishWrite()) { + ImmutableList build = requests.build(); + c.accept(obs, build); + } + } + + @Override + public void onError(Throwable t) { + requests = new ImmutableList.Builder<>(); + } + + @Override + public void onCompleted() { + requests = new ImmutableList.Builder<>(); + } + }; + } + } + + private abstract static class Adapter extends CallStreamObserver { + + private Adapter() {} + + @Override + public boolean isReady() { + return true; + } + + @Override + public void setOnReadyHandler(Runnable onReadyHandler) {} + + @Override + public void disableAutoInboundFlowControl() {} + + @Override + public void request(int count) {} + + @Override + public void setMessageCompression(boolean enable) {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicReadTimeoutTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicReadTimeoutTest.java new file mode 100644 index 000000000000..7f9c186d8ff1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicReadTimeoutTest.java @@ -0,0 +1,218 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.apiException; +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.WatchdogTimeoutException; +import com.google.common.base.Stopwatch; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ContentRange; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc; +import com.google.storage.v2.StorageSettings; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.time.Duration; +import java.util.Iterator; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +/** + * ReadObject leverages gRPC ServerStream to read a stream of ReadObjectResponse messages spanning + * the size of the object. If a large object is downloaded and it would take longer than the + * totalTimeout from {@link StorageOptions#getRetrySettings()}{@link RetrySettings#getTotalTimeout() + * #getTotalTimeout()}, gax will interrupt the stream with a DEADLINE_EXCEEDED error. + * + *

Instead of relying on total stream timeout, we rely on idleTimeout for the stream via {@link + * com.google.api.gax.rpc.ServerStreamingCallSettings.Builder#setIdleTimeoutDuration(Duration)}. + * + *

These tests force specific timeout scenarios to happen against an in-process grpc server to + * ensure our configuration of the StorageClient properly translates to the behavior we want. + * + *

NOTE:Unfortunately, these tests are slow as they are waiting on wall clock time in + * several circumstances. + */ +public final class ITGapicReadTimeoutTest { + + private final String objectName = "name"; + private final Object expectedResult = + Object.newBuilder() + .setName(objectName) + .setGeneration(3L) + .setContentType("application/octet-stream") + .build(); + + @Test + public void readObjectStreamTimeoutIsLongerThanDefaultTotalTimeout() + throws IOException, InterruptedException { + byte[] bytes = DataGenerator.base64Characters().genBytes(40); + ByteString data1 = ByteString.copyFrom(bytes, 0, 20); + ByteString data2 = ByteString.copyFrom(bytes, 20, 20); + + ReadObjectRequest req1 = + ReadObjectRequest.newBuilder().setObject(objectName).setReadOffset(0).build(); + ReadObjectResponse resp1 = + ReadObjectResponse.newBuilder() + .setMetadata(expectedResult) + .setContentRange(ContentRange.newBuilder().setStart(0).build()) + .setChecksummedData(getChecksummedData(data1)) + .build(); + ReadObjectResponse resp2 = + ReadObjectResponse.newBuilder() + .setContentRange(ContentRange.newBuilder().setStart(20).build()) + .setChecksummedData(getChecksummedData(data2)) + .build(); + + int totalTimeoutMillis = 10_000; + int sleepDurationMillis = totalTimeoutMillis + 5_000; + StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver out) { + if (request.equals(req1)) { + out.onNext(resp1); + out.onNext(resp2); + try { + Thread.sleep(sleepDurationMillis); + } catch (InterruptedException e) { + out.onError(e); + } + out.onCompleted(); + } else { + out.onError(apiException(Code.PERMISSION_DENIED)); + } + } + }; + + try (FakeServer server = FakeServer.of(fakeStorage)) { + StorageSettings settings = + server.getGrpcStorageOptions().toBuilder() + .setRetrySettings( + RetrySettings.newBuilder() + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(totalTimeoutMillis)) + .build()) + .build() + .getStorageSettings(); + try (StorageClient sc = StorageClient.create(settings)) { + ServerStream ss = sc.readObjectCallable().call(req1); + Iterator iter = ss.iterator(); + + Stopwatch started = Stopwatch.createStarted(); + ReadObjectResponse actualResponse1 = iter.next(); + ReadObjectResponse actualResponse2 = iter.next(); + boolean hasNext = iter.hasNext(); + Stopwatch stop = started.stop(); + // reduce our expectation by 1% to allow for the fact that sleep can sometimes be slightly + // less than the stated amount. + long minimumElapsedTime = sleepDurationMillis - (long) (sleepDurationMillis * 0.01); + assertThat(stop.elapsed(TimeUnit.MILLISECONDS)).isAtLeast(minimumElapsedTime); + assertThat(actualResponse1).isEqualTo(resp1); + assertThat(actualResponse2).isEqualTo(resp2); + assertThat(hasNext).isFalse(); + } + } + } + + @Test + public void ifTheStreamIsIdleItWillBeCanceled() throws IOException, InterruptedException { + byte[] bytes = DataGenerator.base64Characters().genBytes(40); + ReadObjectRequest req1 = + ReadObjectRequest.newBuilder().setObject(objectName).setReadOffset(0).build(); + ReadObjectResponse resp1 = + ReadObjectResponse.newBuilder() + .setMetadata(expectedResult) + .setContentRange(ContentRange.newBuilder().setStart(0).build()) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 0, 10))) + .build(); + + ReadObjectResponse resp2 = + ReadObjectResponse.newBuilder() + .setContentRange(ContentRange.newBuilder().setStart(10).build()) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 10, 10))) + .build(); + ReadObjectResponse resp3 = + ReadObjectResponse.newBuilder() + .setContentRange(ContentRange.newBuilder().setStart(20).build()) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 20, 10))) + .build(); + ReadObjectResponse resp4 = + ReadObjectResponse.newBuilder() + .setContentRange(ContentRange.newBuilder().setStart(30).build()) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 30, 10))) + .build(); + + int totalTimeoutMillis = 10_000; + int sleepDurationMillis = totalTimeoutMillis + 5_000; + StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver out) { + if (request.equals(req1)) { + out.onNext(resp1); + out.onNext(resp2); + out.onNext(resp3); + out.onNext(resp4); + out.onCompleted(); + } else { + out.onError(apiException(Code.PERMISSION_DENIED)); + } + } + }; + + try (FakeServer server = FakeServer.of(fakeStorage)) { + StorageSettings settings = + server.getGrpcStorageOptions().toBuilder() + .setRetrySettings( + RetrySettings.newBuilder() + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(totalTimeoutMillis)) + .build()) + .build() + .getStorageSettings(); + try (StorageClient sc = StorageClient.create(settings)) { + ServerStream ss = sc.readObjectCallable().call(req1); + Iterator iter = ss.iterator(); + + Stopwatch started = Stopwatch.createStarted(); + assertThrows( + WatchdogTimeoutException.class, + () -> { + iter.next(); + Thread.sleep(20_000); + iter.next(); + iter.next(); + iter.next(); + }); + Stopwatch stop = started.stop(); + assertThat(stop.elapsed(TimeUnit.MILLISECONDS)).isAtLeast(sleepDurationMillis); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedChunkedResumableWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedChunkedResumableWritableByteChannelTest.java new file mode 100644 index 000000000000..473e73a9c1e5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedChunkedResumableWritableByteChannelTest.java @@ -0,0 +1,810 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.ByteSizeConstants._768KiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Hasher.GuavaHasher; +import com.google.cloud.storage.ITGapicUnbufferedWritableByteChannelTest.DirectWriteService; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import io.grpc.Status.Code; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Test; + +public final class ITGapicUnbufferedChunkedResumableWritableByteChannelTest { + + public static final GuavaHasher HASHER = Hasher.enabled(); + private static final ChunkSegmenter CHUNK_SEGMENTER = + new ChunkSegmenter(HASHER, ByteStringStrategy.copy(), _256KiB, _256KiB); + + /** + * + * + *

S.1

+ * + * Attempting to append to a session which has already been finalized should raise an error + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = { name = obj, size = 524288 }
+   *     
client state
+   * write_offset = 0, data = [0:262144]
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset= 0, checksummed_data.content.length = 262144 }
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 525288 } })
+   *     
+ */ + @Test + public void scenario1() throws Exception { + + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_256KiB)) + .asChecksummedData()) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("obj").setSize(_512KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + ResumableWrite resumableWrite = getResumableWrite(uploadId); + + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + SettableApiFuture done = SettableApiFuture.create(); + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + ByteBuffer bb = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(bb)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("invalid"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.2

+ * + * Attempting to finalize a session with fewer bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 524288
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ persisted_size = 525288 })
+   *     
+ */ + @Test + public void scenario2() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_512KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("invalid"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.3

+ * + * Attempting to finalize a session with more bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 262144
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ persisted_size = 262144 })
+   *     
+ */ + @Test + public void scenario3() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4

+ * + * Attempting to finalize an already finalized session + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + channel.close(); + + WriteObjectResponse writeObjectResponse = done.get(2, TimeUnit.SECONDS); + assertThat(writeObjectResponse).isEqualTo(resp1); + } + } + + /** + * + * + *

S.4.1

+ * + * Attempting to finalize an already finalized session (ack < expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4_1() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4.2

+ * + * Attempting to finalize an already finalized session (ack > expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 786432}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 786432 } })
+   *     
+ */ + @Test + public void scenario4_2() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_768KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(_512KiB); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_512KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.5

+ * + * Attempt to append to a resumable session with an offset higher than GCS expects + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 0
+   *     
client state
+   * write_offset = 262144, data = [262144:524288]
+   *     
request
+   * WriteObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, checksummed_data.content.length = 262144}
+   *     
response
+   * onError(Status{code=OUT_OF_RANGE, description="Upload request started at offset '262144', which is past expected offset '0'."})
+   *     
+ */ + @Test + public void scenario5() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setChecksummedData( + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_256KiB)) + .asChecksummedData()) + .build(); + StorageImplBase service1 = + new DirectWriteService( + (obs, requests) -> { + if (requests.equals(ImmutableList.of(req1))) { + obs.onError( + TestUtils.apiException( + Code.OUT_OF_RANGE, + "Upload request started at offset '262144', which is past expected offset" + + " '0'.")); + } else { + obs.onError( + TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); + } + }); + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(_256KiB); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + ByteBuffer bb = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(bb)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.7

+ * + * GCS Acknowledges more bytes than were sent in the PUT + * + *

The client believes the server offset is N, it sends K bytes and the server responds that N + * + 2K bytes are now committed. + * + *

The client has detected data loss and should raise an error and prevent sending of more + * bytes. + */ + @Test + public void scenario7() throws Exception { + + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_256KiB)) + .asChecksummedData()) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_512KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_256KiB); + StorageException se = assertThrows(StorageException.class, () -> channel.write(buf)); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + @Test + public void incremental_success() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_256KiB)) + .asChecksummedData()) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + CHUNK_SEGMENTER, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_256KiB); + int written = channel.write(buf); + assertAll( + () -> assertThat(buf.remaining()).isEqualTo(0), + () -> assertThat(written).isEqualTo(_256KiB), + () -> assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(_256KiB), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB)); + } + } + + @Test + public void incremental_partialSuccess() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_512KiB))) + .build()) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), _512KiB, _256KiB); + //noinspection resource + GapicUnbufferedChunkedResumableWritableByteChannel channel = + new GapicUnbufferedChunkedResumableWritableByteChannel( + done, + chunkSegmenter, + storageClient.writeObjectCallable(), + writeCtx, + RetrierWithAlg.attemptOnce(), + GrpcCallContext::createDefault); + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(_512KiB); + int written = channel.write(buf); + assertAll( + () -> assertThat(buf.remaining()).isEqualTo(_256KiB), + () -> assertThat(written).isEqualTo(_256KiB), + () -> + assertWithMessage("totalSentBytes") + .that(writeCtx.getTotalSentBytes().get()) + .isEqualTo(_256KiB), + () -> + assertWithMessage("confirmedBytes") + .that(writeCtx.getConfirmedBytes().get()) + .isEqualTo(_256KiB)); + } + } + + private static @NonNull ResumableWrite getResumableWrite(String uploadId) { + StartResumableWriteRequest req = StartResumableWriteRequest.getDefaultInstance(); + StartResumableWriteResponse resp = + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); + return new ResumableWrite( + req, resp, id -> WriteObjectRequest.newBuilder().setUploadId(id).build()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedDirectWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedDirectWritableByteChannelTest.java new file mode 100644 index 000000000000..e8b9cd7d3b11 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedDirectWritableByteChannelTest.java @@ -0,0 +1,176 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.ByteSizeConstants._768KiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.Hasher.GuavaHasher; +import com.google.cloud.storage.ITGapicUnbufferedWritableByteChannelTest.DirectWriteService; +import com.google.cloud.storage.WriteCtx.SimpleWriteObjectRequestBuilderFactory; +import com.google.cloud.storage.WriteCtx.WriteObjectRequestBuilderFactory; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public final class ITGapicUnbufferedDirectWritableByteChannelTest { + + public static final GuavaHasher HASHER = Hasher.enabled(); + private static final ChunkSegmenter CHUNK_SEGMENTER = + new ChunkSegmenter(HASHER, ByteStringStrategy.copy(), _256KiB, _256KiB); + + /** Attempting to finalize, ack equals expected */ + @Test + public void ack_eq() throws Exception { + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setWriteOffset(_256KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + WriteCtx writeCtx = + WriteCtx.of(WriteObjectRequestBuilderFactory.simple(req1), HASHER); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(0); + + GapicUnbufferedDirectWritableByteChannel channel = + new GapicUnbufferedDirectWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + channel.close(); + + WriteObjectResponse writeObjectResponse = done.get(2, TimeUnit.SECONDS); + assertAll( + () -> assertThat(writeObjectResponse).isEqualTo(resp1), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** Attempting to finalize, ack < expected */ + @Test + public void ack_lt() throws Exception { + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + WriteCtx writeCtx = + WriteCtx.of(WriteObjectRequestBuilderFactory.simple(req1), HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(0); + + //noinspection resource + GapicUnbufferedDirectWritableByteChannel channel = + new GapicUnbufferedDirectWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** Attempting to finalize, ack > expected */ + @Test + public void ack_gt() throws Exception { + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_768KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + WriteCtx writeCtx = + WriteCtx.of(WriteObjectRequestBuilderFactory.simple(req1), HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(0); + + //noinspection resource + GapicUnbufferedDirectWritableByteChannel channel = + new GapicUnbufferedDirectWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedFinalizeOnCloseResumableWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedFinalizeOnCloseResumableWritableByteChannelTest.java new file mode 100644 index 000000000000..e9dd60f7fec6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedFinalizeOnCloseResumableWritableByteChannelTest.java @@ -0,0 +1,333 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.ByteSizeConstants._768KiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.Hasher.GuavaHasher; +import com.google.cloud.storage.ITGapicUnbufferedWritableByteChannelTest.DirectWriteService; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Test; + +public final class ITGapicUnbufferedFinalizeOnCloseResumableWritableByteChannelTest { + + public static final GuavaHasher HASHER = Hasher.enabled(); + private static final ChunkSegmenter CHUNK_SEGMENTER = + new ChunkSegmenter(HASHER, ByteStringStrategy.copy(), _256KiB, _256KiB); + + @Test + public void incrementalResponseForFinalizingRequest() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = WriteObjectResponse.newBuilder().setPersistedSize(_256KiB).build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(0); + + //noinspection resource + GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel channel = + new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("invalid"), + () -> assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(_512KiB), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4

+ * + * Attempting to finalize an already finalized session + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * writeObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 262144, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_256KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_256KiB); + writeCtx.getConfirmedBytes().set(0); + + GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel channel = + new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + channel.close(); + + WriteObjectResponse writeObjectResponse = done.get(2, TimeUnit.SECONDS); + assertThat(writeObjectResponse).isEqualTo(resp1); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(_256KiB); + } + } + + /** + * + * + *

S.4.1

+ * + * Attempting to finalize an already finalized session (ack < expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262144}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * writeObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 262144 } })
+   *     
+ */ + @Test + public void scenario4_1() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_256KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(0); + + //noinspection resource + GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel channel = + new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + /** + * + * + *

S.4.2

+ * + * Attempting to finalize an already finalized session (ack > expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 786432}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * writeObjectRequest{ upload_id = $UPLOAD_ID, write_offset = 524288, finish_write = true}
+   *     
response
+   * onNext(WriteObjectResponse{ resources = {name = obj, size = 786432 } })
+   *     
+ */ + @Test + public void scenario4_2() throws Exception { + String uploadId = "uploadId"; + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(_512KiB) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(0).build()) + .setFinishWrite(true) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("name").setSize(_768KiB).build()) + .build(); + + ImmutableMap, WriteObjectResponse> map = + ImmutableMap.of(ImmutableList.of(req1), resp1); + DirectWriteService service1 = new DirectWriteService(map); + + try (FakeServer fakeServer = FakeServer.of(service1); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageClient storageClient = storage.storageClient; + + SettableApiFuture done = SettableApiFuture.create(); + ResumableWrite resumableWrite = getResumableWrite(uploadId); + WriteCtx writeCtx = WriteCtx.of(resumableWrite, HASHER); + writeCtx.getTotalSentBytes().set(_512KiB); + writeCtx.getConfirmedBytes().set(0); + + //noinspection resource + GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel channel = + new GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel( + done, CHUNK_SEGMENTER, storageClient.writeObjectCallable(), writeCtx); + + StorageException se = assertThrows(StorageException.class, channel::close); + assertAll( + () -> assertThat(se.getCode()).isEqualTo(0), + () -> assertThat(se.getReason()).isEqualTo("dataLoss"), + () -> assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(0), + () -> assertThat(channel.isOpen()).isFalse()); + } + } + + private static @NonNull ResumableWrite getResumableWrite(String uploadId) { + StartResumableWriteRequest req = StartResumableWriteRequest.getDefaultInstance(); + StartResumableWriteResponse resp = + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); + return new ResumableWrite( + req, resp, id -> WriteObjectRequest.newBuilder().setUploadId(id).build()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedReadableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedReadableByteChannelTest.java new file mode 100644 index 000000000000..1e1c05915ab5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedReadableByteChannelTest.java @@ -0,0 +1,342 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.apiException; +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.DataLossException; +import com.google.cloud.storage.ChannelSession.UnbufferedReadSession; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ContentRange; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public final class ITGapicUnbufferedReadableByteChannelTest { + private final byte[] bytes = DataGenerator.base64Characters().genBytes(40); + private final ByteString data1 = ByteString.copyFrom(bytes, 0, 10); + private final ByteString data2 = ByteString.copyFrom(bytes, 10, 10); + private final ByteString data3 = ByteString.copyFrom(bytes, 20, 10); + private final ByteString data4 = ByteString.copyFrom(bytes, 30, 10); + + private final String objectName = "name"; + private final Object expectedResult = + Object.newBuilder() + .setName(objectName) + .setGeneration(3L) + .setContentType("application/octet-stream") + .build(); + + private final ReadObjectRequest req1 = + ReadObjectRequest.newBuilder().setObject(objectName).setReadOffset(0).build(); + private final ReadObjectRequest req2 = + req1.toBuilder().setGeneration(3L).setReadOffset(20).build(); + private final ReadObjectResponse resp1 = + ReadObjectResponse.newBuilder() + .setMetadata(expectedResult) + .setContentRange(ContentRange.newBuilder().setStart(0).build()) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(Hashing.crc32c().hashBytes(bytes).asInt())) + .setChecksummedData(getChecksummedData(data1, Hasher.enabled())) + .build(); + private final ReadObjectResponse resp2 = + ReadObjectResponse.newBuilder() + .setChecksummedData(getChecksummedData(data2, Hasher.enabled())) + .build(); + private final ReadObjectResponse resp3 = + ReadObjectResponse.newBuilder() + .setMetadata(expectedResult) + .setContentRange(ContentRange.newBuilder().setStart(20).build()) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(Hashing.crc32c().hashBytes(bytes).asInt())) + .setChecksummedData(getChecksummedData(data3, Hasher.enabled())) + .build(); + private final ReadObjectResponse resp4 = + ReadObjectResponse.newBuilder() + .setChecksummedData(getChecksummedData(data4, Hasher.enabled())) + .build(); + + /** Define a Storage service that will always return an error during the first readObject */ + private final StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + if (request.equals(req1)) { + responseObserver.onNext(resp1); + responseObserver.onNext(resp2); + responseObserver.onError(apiException(Code.DATA_LOSS)); + } else if (request.equals(req2)) { + responseObserver.onNext(resp3); + responseObserver.onNext(resp4); + responseObserver.onCompleted(); + } else { + responseObserver.onError(apiException(Code.PERMISSION_DENIED)); + } + } + }; + + @Test + public void readRetriesAreProperlyOrdered_readLargerThanMessageSize() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + try (FakeServer server = FakeServer.of(fakeStorage); + StorageClient storageClient = StorageClient.create(server.storageSettings())) { + Retrier retrier = TestUtils.retrierFromStorageOptions(server.getGrpcStorageOptions()); + + UnbufferedReadableByteChannelSession session = + new UnbufferedReadSession<>( + ApiFutures.immediateFuture(req1), + (start, resultFuture) -> + new GapicUnbufferedReadableByteChannel( + resultFuture, + new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable(), + ResponseContentLifecycleManager.noop()), + start, + Hasher.noop(), + retrier, + retryOnly(DataLossException.class))); + byte[] actualBytes = new byte[40]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + Object actualResult = session.getResult().get(1000, TimeUnit.MILLISECONDS); + assertThat(actualResult).isEqualTo(expectedResult); + assertThat(actualBytes).isEqualTo(bytes); + } + } + + @Test + public void readRetriesAreProperlyOrdered_readSmallerThanMessageSize() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + try (FakeServer server = FakeServer.of(fakeStorage); + StorageClient storageClient = StorageClient.create(server.storageSettings())) { + Retrier retrier = TestUtils.retrierFromStorageOptions(server.getGrpcStorageOptions()); + + UnbufferedReadableByteChannelSession session = + new UnbufferedReadSession<>( + ApiFutures.immediateFuture(req1), + (start, resultFuture) -> + new GapicUnbufferedReadableByteChannel( + resultFuture, + new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable(), + ResponseContentLifecycleManager.noop()), + start, + Hasher.noop(), + retrier, + retryOnly(DataLossException.class))); + byte[] actualBytes = new byte[40]; + ImmutableList buffers = TestUtils.subDivide(actualBytes, 2); + try (UnbufferedReadableByteChannel c = session.open()) { + for (ByteBuffer buf : buffers) { + c.read(buf); + } + } + Object actualResult = session.getResult().get(1000, TimeUnit.MILLISECONDS); + assertThat(actualResult).isEqualTo(expectedResult); + assertThat(actualBytes).isEqualTo(bytes); + } + } + + @Test + public void ioException_if_generation_changes() throws IOException, InterruptedException { + StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + + final AtomicInteger invocationCount = new AtomicInteger(0); + + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + int count = invocationCount.getAndIncrement(); + if (request.equals(req1)) { + if (count == 0) { + responseObserver.onNext(resp1); + responseObserver.onNext(resp2); + responseObserver.onError(apiException(Code.DATA_LOSS)); + } + } else if (request.equals(req2)) { + ReadObjectResponse.Builder builder = resp3.toBuilder(); + // increment the generation, as if it had been updated between initial read and retry + builder.getMetadataBuilder().setGeneration(expectedResult.getGeneration() + 1); + responseObserver.onNext(builder.build()); + responseObserver.onNext(resp4); + responseObserver.onCompleted(); + } else { + responseObserver.onError(apiException(Code.PERMISSION_DENIED)); + } + } + }; + + try (FakeServer server = FakeServer.of(fakeStorage); + StorageClient storageClient = StorageClient.create(server.storageSettings())) { + Retrier retrier = TestUtils.retrierFromStorageOptions(server.getGrpcStorageOptions()); + + UnbufferedReadableByteChannelSession session = + new UnbufferedReadSession<>( + ApiFutures.immediateFuture(req1), + (start, resultFuture) -> + new GapicUnbufferedReadableByteChannel( + resultFuture, + new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable(), + ResponseContentLifecycleManager.noop()), + start, + Hasher.noop(), + retrier, + retryOnly(DataLossException.class))); + byte[] actualBytes = new byte[40]; + try (UnbufferedReadableByteChannel c = session.open()) { + IOException ioException = + assertThrows(IOException.class, () -> c.read(ByteBuffer.wrap(actualBytes))); + + assertThat(ioException).hasMessageThat().containsMatch(".*Generation.*3.*4.*"); + } + } + } + + @Test + public void ifCrc32cMismatchIndividualMessage_restartFromCorrectOffset() + throws IOException, InterruptedException { + StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + if (request.equals(req1)) { + responseObserver.onNext(resp1); + responseObserver.onNext(resp2); + ReadObjectResponse.Builder b = resp3.toBuilder(); + // set a bad checksum value + b.getChecksummedDataBuilder().setCrc32C(1); + responseObserver.onNext(b.build()); + } else if (request.equals(req2)) { + responseObserver.onNext(resp3); + responseObserver.onNext(resp4); + responseObserver.onCompleted(); + } else { + responseObserver.onError(apiException(Code.PERMISSION_DENIED)); + } + } + }; + try (FakeServer server = FakeServer.of(fakeStorage); + StorageClient storageClient = StorageClient.create(server.storageSettings())) { + Retrier retrier = TestUtils.retrierFromStorageOptions(server.getGrpcStorageOptions()); + + UnbufferedReadableByteChannelSession session = + new UnbufferedReadSession<>( + ApiFutures.immediateFuture(req1), + (start, resultFuture) -> + new GapicUnbufferedReadableByteChannel( + resultFuture, + new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable(), + ResponseContentLifecycleManager.noop()), + start, + Hasher.enabled(), + retrier, + retryOnly(DataLossException.class))); + byte[] actualBytes = new byte[40]; + try (UnbufferedReadableByteChannel c = session.open()) { + int read = c.read(ByteBuffer.wrap(actualBytes)); + + assertThat(read).isEqualTo(40); + assertThat(xxd(actualBytes)).isEqualTo(xxd(bytes)); + } + } + } + + @Test + public void overRead_handledProperly() throws IOException, InterruptedException { + StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + responseObserver.onNext(resp1); + responseObserver.onNext(resp2); + responseObserver.onNext(resp3); + responseObserver.onNext(resp4); + responseObserver.onCompleted(); + } + }; + try (FakeServer server = FakeServer.of(fakeStorage); + StorageClient storageClient = StorageClient.create(server.storageSettings())) { + Retrier retrier = TestUtils.retrierFromStorageOptions(server.getGrpcStorageOptions()); + + UnbufferedReadableByteChannelSession session = + new UnbufferedReadSession<>( + ApiFutures.immediateFuture(req1), + (start, resultFuture) -> + new GapicUnbufferedReadableByteChannel( + resultFuture, + new ZeroCopyServerStreamingCallable<>( + storageClient.readObjectCallable(), + ResponseContentLifecycleManager.noop()), + start, + Hasher.noop(), + retrier, + retryOnly(DataLossException.class))); + byte[] actualBytes = new byte[41]; + //noinspection resource + UnbufferedReadableByteChannel c = session.open(); + ByteBuffer buf = ByteBuffer.wrap(actualBytes); + int read1 = c.read(buf); + assertThat(read1).isAtLeast(1); + int read2 = c.read(buf); + assertThat(read2).isEqualTo(-1); + assertThrows(ClosedChannelException.class, () -> c.read(buf)); + } + } + + private static ResultRetryAlgorithm retryOnly(Class c) { + return new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, java.lang.Object previousResponse) { + return previousThrowable instanceof StorageException + && c.isAssignableFrom(previousThrowable.getCause().getClass()); + } + }; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..3aa567a4d6a0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGapicUnbufferedWritableByteChannelTest.java @@ -0,0 +1,462 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.apiException; +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.WriteCtx.SimpleWriteObjectRequestBuilderFactory; +import com.google.cloud.storage.WriteCtx.WriteObjectRequestBuilderFactory; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Status.Code; +import io.grpc.stub.CallStreamObserver; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class ITGapicUnbufferedWritableByteChannelTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(ITGapicUnbufferedWritableByteChannelTest.class); + + private static final Hasher HASHER = Hasher.enabled(); + private static final ChunkSegmenter segmenter = + new ChunkSegmenter(HASHER, ByteStringStrategy.copy(), 10, 5); + + private static final String uploadId = "upload-id"; + + private static final Object obj = Object.newBuilder().setBucket("buck").setName("obj").build(); + private static final WriteObjectSpec spec = WriteObjectSpec.newBuilder().setResource(obj).build(); + + private static final StartResumableWriteRequest startReq = + StartResumableWriteRequest.newBuilder().setWriteObjectSpec(spec).build(); + private static final StartResumableWriteResponse startResp = + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); + + private static final byte[] bytes = DataGenerator.base64Characters().genBytes(40); + private static final WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 0, 10), HASHER)) + .build(); + private static final WriteObjectRequest req2 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(10) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 10, 10), HASHER)) + .build(); + private static final WriteObjectRequest req3 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(20) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 20, 10), HASHER)) + .build(); + private static final WriteObjectRequest req4 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(30) + .setChecksummedData(getChecksummedData(ByteString.copyFrom(bytes, 30, 10), HASHER)) + .build(); + private static final WriteObjectRequest req5 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(40) + .setFinishWrite(true) + .setObjectChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(HASHER.hash(ByteBuffer.wrap(bytes)).getValue()) + .build()) + .build(); + + private static final WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder().setPersistedSize(10).build(); + private static final WriteObjectResponse resp2 = + WriteObjectResponse.newBuilder().setPersistedSize(20).build(); + private static final WriteObjectResponse resp3 = + WriteObjectResponse.newBuilder().setPersistedSize(30).build(); + private static final WriteObjectResponse resp4 = + WriteObjectResponse.newBuilder().setPersistedSize(40).build(); + private static final WriteObjectResponse resp5 = + WriteObjectResponse.newBuilder().setResource(obj.toBuilder().setSize(40)).build(); + + private static final ResumableWrite reqFactory = + new ResumableWrite(startReq, startResp, TestUtils.onlyUploadId()); + + @Test + public void directUpload() throws IOException, InterruptedException, ExecutionException { + + byte[] bytes = DataGenerator.base64Characters().genBytes(40); + WriteObjectRequest req1 = + ITGapicUnbufferedWritableByteChannelTest.req1.toBuilder() + .clearUploadId() + .setWriteObjectSpec(spec) + .build(); + WriteObjectRequest req2 = + ITGapicUnbufferedWritableByteChannelTest.req2.toBuilder().clearUploadId().build(); + WriteObjectRequest req3 = + ITGapicUnbufferedWritableByteChannelTest.req3.toBuilder().clearUploadId().build(); + WriteObjectRequest req4 = + ITGapicUnbufferedWritableByteChannelTest.req4.toBuilder().clearUploadId().build(); + WriteObjectRequest req5 = + ITGapicUnbufferedWritableByteChannelTest.req5.toBuilder().clearUploadId().build(); + + WriteObjectResponse resp = resp5; + + WriteObjectRequest base = WriteObjectRequest.newBuilder().setWriteObjectSpec(spec).build(); + SimpleWriteObjectRequestBuilderFactory reqFactory = + WriteObjectRequestBuilderFactory.simple(base); + + StorageImplBase service = + new DirectWriteService( + ImmutableMap.of(ImmutableList.of(req1, req2, req3, req4, req5), resp)); + try (FakeServer fake = FakeServer.of(service); + StorageClient sc = + PackagePrivateMethodWorkarounds.maybeGetStorageClient( + fake.getGrpcStorageOptions().getService())) { + assertThat(sc).isNotNull(); + SettableApiFuture result = SettableApiFuture.create(); + try (GapicUnbufferedDirectWritableByteChannel c = + new GapicUnbufferedDirectWritableByteChannel( + result, segmenter, sc.writeObjectCallable(), WriteCtx.of(reqFactory, HASHER))) { + c.write(ByteBuffer.wrap(bytes)); + } + assertThat(result.get()).isEqualTo(resp); + } + } + + @Test + public void resumableUpload() throws IOException, InterruptedException, ExecutionException { + ImmutableMap, WriteObjectResponse> writes = + ImmutableMap., WriteObjectResponse>builder() + .put(ImmutableList.of(req1), resp1) + .put(ImmutableList.of(req2), resp2) + .put(ImmutableList.of(req3), resp3) + .put(ImmutableList.of(req4), resp4) + .put(ImmutableList.of(req5), resp5) + .build(); + StorageImplBase service = new DirectWriteService(writes); + try (FakeServer fake = FakeServer.of(service); + StorageClient sc = StorageClient.create(fake.storageSettings())) { + SettableApiFuture result = SettableApiFuture.create(); + GapicUnbufferedChunkedResumableWritableByteChannel c = + new GapicUnbufferedChunkedResumableWritableByteChannel( + result, + segmenter, + sc.writeObjectCallable(), + WriteCtx.of(reqFactory, HASHER), + RetrierWithAlg.attemptOnce(), + Retrying::newCallContext); + ArrayList debugMessages = new ArrayList<>(); + try { + ImmutableList buffers = TestUtils.subDivide(bytes, 10); + for (ByteBuffer buf : buffers) { + debugMessages.add(String.format(Locale.US, "Writing buffer. buf = %s", buf)); + int written = c.write(buf); + debugMessages.add(String.format(Locale.US, "Wrote bytes. written = %2d", written)); + } + // explicitly only close on success so we can trap the original error that maybe have + // happened before we reach here. + // Realistically, calling close here isn't strictly necessary because once we leave the + // try block for FakeServer the server will shut down. + c.close(); + } catch (PermissionDeniedException ignore) { + for (String debugMessage : debugMessages) { + LOGGER.warn(debugMessage); + } + } + assertThat(result.get()).isEqualTo(resp5); + } + } + + @Test + public void startResumableUpload_deadlineExceeded_isRetried() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + + String uploadId = UUID.randomUUID().toString(); + AtomicInteger callCount = new AtomicInteger(0); + StorageImplBase service = + new StorageImplBase() { + @Override + public void startResumableWrite( + StartResumableWriteRequest req, StreamObserver respond) { + if (callCount.getAndIncrement() > 0) { + respond.onNext( + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build()); + respond.onCompleted(); + } + } + }; + try (FakeServer fake = FakeServer.of(service)) { + GrpcStorageImpl gsi = (GrpcStorageImpl) fake.getGrpcStorageOptions().getService(); + ApiFuture f = + gsi.startResumableWrite( + GrpcCallContext.createDefault(), + WriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("bucket").setName("name").build()) + .setIfGenerationMatch(0) + .build()) + .build(), + Opts.empty()); + + ResumableWrite resumableWrite = f.get(2, TimeUnit.MINUTES); + assertThat(callCount.get()).isEqualTo(2); + assertThat(resumableWrite.newBuilder().build().getUploadId()).isEqualTo(uploadId); + } + } + + @Test + public void resumableUpload_chunkAutomaticRetry() + throws IOException, InterruptedException, ExecutionException { + AtomicBoolean req2SendErr = new AtomicBoolean(true); + AtomicBoolean req4SendErr = new AtomicBoolean(true); + AtomicInteger writeCount = new AtomicInteger(0); + StorageImplBase service = + new DirectWriteService( + (obs, requests) -> { + writeCount.getAndIncrement(); + if (requests.equals(ImmutableList.of(req1))) { + obs.onNext(resp1); + obs.onCompleted(); + } else if (requests.equals(ImmutableList.of(req2))) { + obs.onNext(resp2); + if (req2SendErr.get()) { + req2SendErr.set(false); + obs.onError(apiException(Code.DATA_LOSS)); + } else { + obs.onCompleted(); + } + } else if (requests.equals(ImmutableList.of(req3))) { + obs.onNext(resp3); + obs.onCompleted(); + } else if (requests.equals(ImmutableList.of(req4))) { + obs.onNext(resp4); + if (req4SendErr.get()) { + req4SendErr.set(false); + obs.onError(apiException(Code.DATA_LOSS)); + } else { + obs.onCompleted(); + } + } else if (requests.equals(ImmutableList.of(req5))) { + obs.onNext(resp5); + obs.onCompleted(); + } else { + DirectWriteService.logUnexpectedRequest( + ImmutableSet.of( + ImmutableList.of(req1), + ImmutableList.of(req2), + ImmutableList.of(req3), + ImmutableList.of(req4), + ImmutableList.of(req5)), + requests); + obs.onError( + TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); + } + }); + WriteCtx writeCtx; + try (FakeServer fake = FakeServer.of(service); + StorageClient sc = StorageClient.create(fake.storageSettings())) { + SettableApiFuture result = SettableApiFuture.create(); + try (GapicUnbufferedChunkedResumableWritableByteChannel c = + new GapicUnbufferedChunkedResumableWritableByteChannel( + result, + segmenter, + sc.writeObjectCallable(), + WriteCtx.of(reqFactory, HASHER), + TestUtils.retrierFromStorageOptions(fake.getGrpcStorageOptions()) + .withAlg(Retrying.alwaysRetry()), + Retrying::newCallContext)) { + writeCtx = c.getWriteCtx(); + ImmutableList buffers = TestUtils.subDivide(bytes, 10); + c.write(buffers.get(0)); + assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(10); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(10); + c.write(buffers.get(1)); + assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(20); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(20); + c.write(buffers.get(2)); + assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(30); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(30); + c.write(buffers.get(3)); + assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(40); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(40); + } + assertThat(result.get()).isEqualTo(resp5); + } + assertThat(req2SendErr.get()).isFalse(); + assertThat(req4SendErr.get()).isFalse(); + assertThat(writeCount.get()).isEqualTo(7); + + assertThat(writeCtx.getTotalSentBytes().get()).isEqualTo(40); + assertThat(writeCtx.getConfirmedBytes().get()).isEqualTo(40); + } + + @Test + public void resumableUpload_finalizeWhenWriteAndCloseCalledEvenWhenQuantumAligned() + throws IOException, InterruptedException, ExecutionException { + ImmutableMap, WriteObjectResponse> writes = + ImmutableMap., WriteObjectResponse>builder() + .put( + ImmutableList.of( + req1, + req2.toBuilder().clearUploadId().build(), + req3.toBuilder().clearUploadId().build(), + req4.toBuilder().clearUploadId().build(), + req5.toBuilder().clearUploadId().build()), + resp5) + .build(); + StorageImplBase service = new DirectWriteService(writes); + try (FakeServer fake = FakeServer.of(service); + StorageClient sc = StorageClient.create(fake.storageSettings())) { + SettableApiFuture result = SettableApiFuture.create(); + GapicUnbufferedChunkedResumableWritableByteChannel c = + new GapicUnbufferedChunkedResumableWritableByteChannel( + result, + segmenter, + sc.writeObjectCallable(), + WriteCtx.of(reqFactory, HASHER), + RetrierWithAlg.attemptOnce(), + Retrying::newCallContext); + try { + int written = c.writeAndClose(ByteBuffer.wrap(bytes)); + assertThat(written).isEqualTo(bytes.length); + } catch (PermissionDeniedException ignore) { + } + assertThat(result.get()).isEqualTo(resp5); + } + } + + static class DirectWriteService extends StorageImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(DirectWriteService.class); + private final BiConsumer, List> c; + + private ImmutableList.Builder requests; + + DirectWriteService( + BiConsumer, List> c) { + this.c = c; + this.requests = new ImmutableList.Builder<>(); + } + + DirectWriteService(ImmutableMap, WriteObjectResponse> writes) { + this( + (obs, build) -> { + if (writes.containsKey(build)) { + obs.onNext(writes.get(build)); + obs.onCompleted(); + } else { + logUnexpectedRequest(writes.keySet(), build); + obs.onError( + TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); + } + }); + } + + private static void logUnexpectedRequest( + Set> writes, List build) { + Collector joining = Collectors.joining(",\n\t", "[\n\t", "\n]"); + Collector oneLine = Collectors.joining(",", "[", "]"); + String msg = + String.format( + Locale.US, + "Unexpected Request Chain.%nexpected one of: %s%n but was: %s", + writes.stream() + .map(l -> l.stream().map(StorageV2ProtoUtils::fmtProto).collect(oneLine)) + .collect(joining), + build.stream().map(StorageV2ProtoUtils::fmtProto).collect(oneLine)); + LOGGER.warn(msg); + } + + @Override + public StreamObserver writeObject(StreamObserver obs) { + return new Adapter() { + @Override + public void onNext(WriteObjectRequest value) { + requests.add(value); + } + + @Override + public void onError(Throwable t) {} + + @Override + public void onCompleted() { + ImmutableList build = requests.build(); + c.accept(obs, build); + requests = new ImmutableList.Builder<>(); + } + }; + } + } + + private abstract static class Adapter extends CallStreamObserver { + + private Adapter() {} + + @Override + public boolean isReady() { + return true; + } + + @Override + public void setOnReadyHandler(Runnable onReadyHandler) {} + + @Override + public void disableAutoInboundFlowControl() {} + + @Override + public void request(int count) {} + + @Override + public void setMessageCompression(boolean enable) {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcMetricsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcMetricsTest.java new file mode 100644 index 000000000000..e71b9077dc4a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcMetricsTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public class ITGrpcMetricsTest { + @Test + public void testGrpcMetrics() { + GrpcStorageOptions grpcStorageOptions = StorageOptions.grpc().build(); + assertThat( + OpenTelemetryBootstrappingUtils.getCloudMonitoringEndpoint( + "storage.googleapis.com:443", "storage.googleapis.com")) + .isEqualTo("monitoring.googleapis.com:443"); + + GCPResourceProvider resourceProvider = new GCPResourceProvider(); + Attributes detectedAttributes = resourceProvider.getAttributes(); + SdkMeterProvider provider = + OpenTelemetryBootstrappingUtils.createMeterProvider( + "monitoring.googleapis.com:443", + grpcStorageOptions.getProjectId(), + detectedAttributes, + false); + + /* + * SDKMeterProvider doesn't expose the relevant fields we want to test, but they are present in + * the String representation, so we'll check that instead. Most of the resources are auto-set, + * and will depend on environment, which could cause flakes to check. We're only responsible for + * setting the project ID, endpoint, and Histogram boundaries, so we'll just check those + */ + String result = provider.toString(); + + // What the project ID will be will depend on the environment, so we just make sure it's present + // and not null/empty + assertThat(result).doesNotContain("project_id=\"\""); + assertThat(result).doesNotContain("project_id=null"); + assertThat(result).contains("project_id"); + assertThat(result).contains("host_id"); + assertThat(result).contains("cloud_platform"); + assertThat(result).contains("location"); + assertThat(result).contains("instance_id"); + assertThat(result).contains("gcp.resource_type"); + assertThat(result).contains("api"); + + // This is the check for the Seconds histogram boundary. We can't practically check for every + // boundary, + // but if *any* are present, that means they're different from the results and we successfully + // set them + assertThat(result).contains("1.2"); + + // This is the check for the Size boundary + assertThat(result).contains("131072"); + } + + @Test + public void testGrpcMetrics_universeDomain() { + assertThat("monitoring.my-universe-domain.com:443") + .isEqualTo( + OpenTelemetryBootstrappingUtils.getCloudMonitoringEndpoint( + "storage.my-universe-domain.com:443", "my-universe-domain.com")); + } + + @Test + public void testGrpcMetrics_private() { + assertThat("private.googleapis.com:443") + .isEqualTo( + OpenTelemetryBootstrappingUtils.getCloudMonitoringEndpoint( + "private.googleapis.com:443", null)); + } + + @Test + public void testGrpcMetrics_restricted() { + assertThat("restricted.googleapis.com:443") + .isEqualTo( + OpenTelemetryBootstrappingUtils.getCloudMonitoringEndpoint( + "restricted.googleapis.com:443", null)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGzipReadableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGzipReadableByteChannelTest.java new file mode 100644 index 000000000000..2bf6f9ea47ae --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGzipReadableByteChannelTest.java @@ -0,0 +1,370 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.common.io.ByteStreams; +import com.google.protobuf.ByteString; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.security.SecureRandom; +import java.util.concurrent.ExecutionException; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +@RunWith(Enclosed.class) +public class ITGzipReadableByteChannelTest { + + @SuppressWarnings("PointlessArithmeticExpression") + private static final int _1KiB = 1 * 1024; + + private static final int _2KiB = 2 * 1024; + private static final int _3KiB = _1KiB + _2KiB; + private static final SecureRandom rand = new SecureRandom(); + + private static final byte[] dataUncompressed = DataGenerator.rand(rand).genBytes(_3KiB); + private static final byte[] dataCompressed = TestUtils.gzipBytes(dataUncompressed); + private static final ByteString contentUncompressed1 = + ByteString.copyFrom(dataUncompressed, 0, _2KiB); + private static final ByteString contentUncompressed2 = + ByteString.copyFrom(dataUncompressed, _2KiB, _1KiB); + private static final ByteString contentCompressed1 = + ByteString.copyFrom(dataCompressed, 0, _2KiB); + private static final ByteString contentCompressed2 = + ByteString.copyFrom(dataCompressed, _2KiB, dataCompressed.length - _2KiB); + private static final ReadObjectRequest reqUncompressed = + ReadObjectRequest.newBuilder() + .setBucket("projects/_/buckets/buck") + .setObject("obj-uncompressed") + .build(); + private static final ReadObjectRequest reqCompressed = + ReadObjectRequest.newBuilder() + .setBucket("projects/_/buckets/buck") + .setObject("obj-compressed") + .build(); + + private static final ReadObjectResponse respUncompressed1 = + ReadObjectResponse.newBuilder() + .setMetadata(Object.newBuilder().setContentEncoding("identity").build()) + .setChecksummedData(getChecksummedData(contentUncompressed1)) + .build(); + private static final ReadObjectResponse respUncompressed2 = + ReadObjectResponse.newBuilder() + .setChecksummedData(getChecksummedData(contentUncompressed2)) + .build(); + + private static final ReadObjectResponse respCompressed1 = + ReadObjectResponse.newBuilder() + .setMetadata(Object.newBuilder().setContentEncoding("gzip").build()) + .setChecksummedData(getChecksummedData(contentCompressed1)) + .build(); + private static final ReadObjectResponse respCompressed2 = + ReadObjectResponse.newBuilder() + .setChecksummedData(getChecksummedData(contentCompressed2)) + .build(); + + public static final class Uncompressed { + private static final StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + if (request.equals(reqUncompressed)) { + responseObserver.onNext(respUncompressed1); + responseObserver.onNext(respUncompressed2); + responseObserver.onCompleted(); + } else { + responseObserver.onError(TestUtils.apiException(Status.Code.UNIMPLEMENTED)); + } + } + }; + + @ClassRule(order = 1) + public static final AutoClosableFixture fakeServer = + AutoClosableFixture.of(() -> FakeServer.of(fakeStorage)); + + @ClassRule(order = 2) + public static final AutoClosableFixture storageClient = + AutoClosableFixture.of( + () -> StorageClient.create(fakeServer.getInstance().storageSettings())); + + @Test + public void autoGzipDecompress_true() throws IOException { + UnbufferedReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + storageClient.getInstance().readObjectCallable(), + ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions( + fakeServer.getInstance().getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .setAutoGzipDecompression(true) + .unbuffered() + .setReadObjectRequest(reqUncompressed) + .build(); + + byte[] actualBytes = new byte[dataUncompressed.length]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + assertThat(actualBytes).isEqualTo(dataUncompressed); + } + + @Test + public void autoGzipDecompress_false() throws IOException { + UnbufferedReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + storageClient.getInstance().readObjectCallable(), + ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions( + fakeServer.getInstance().getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .setAutoGzipDecompression(false) + .unbuffered() + .setReadObjectRequest(reqUncompressed) + .build(); + + byte[] actualBytes = new byte[dataUncompressed.length]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + assertThat(actualBytes).isEqualTo(dataUncompressed); + } + } + + public static final class Compressed { + + private static final StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + if (request.equals(reqCompressed)) { + responseObserver.onNext(respCompressed1); + responseObserver.onNext(respCompressed2); + responseObserver.onCompleted(); + } else { + responseObserver.onError(TestUtils.apiException(Status.Code.UNIMPLEMENTED)); + } + } + }; + + @ClassRule(order = 1) + public static final AutoClosableFixture fakeServer = + AutoClosableFixture.of(() -> FakeServer.of(fakeStorage)); + + @ClassRule(order = 2) + public static final AutoClosableFixture storageClient = + AutoClosableFixture.of( + () -> StorageClient.create(fakeServer.getInstance().storageSettings())); + + @ClassRule(order = 3) + public static final AutoClosableFixture storageFixture = + AutoClosableFixture.of(() -> fakeServer.getInstance().getGrpcStorageOptions().getService()); + + @Test + public void autoGzipDecompress_true() throws IOException { + UnbufferedReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + storageClient.getInstance().readObjectCallable(), + ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions( + fakeServer.getInstance().getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .setAutoGzipDecompression(true) + .unbuffered() + .setReadObjectRequest(reqCompressed) + .build(); + + byte[] actualBytes = new byte[dataUncompressed.length]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + assertThat(actualBytes).isEqualTo(dataUncompressed); + } + + @Test + public void autoGzipDecompress_false() throws IOException { + UnbufferedReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + storageClient.getInstance().readObjectCallable(), + ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions( + fakeServer.getInstance().getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .setAutoGzipDecompression(false) + .unbuffered() + .setReadObjectRequest(reqCompressed) + .build(); + + byte[] actualBytes = new byte[dataCompressed.length]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + assertThat(actualBytes).isEqualTo(dataCompressed); + } + + @Test + public void autoGzipDecompress_default_disabled() throws IOException { + UnbufferedReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + storageClient.getInstance().readObjectCallable(), + ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions( + fakeServer.getInstance().getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .unbuffered() + .setReadObjectRequest(reqCompressed) + .build(); + + byte[] actualBytes = new byte[dataCompressed.length]; + try (UnbufferedReadableByteChannel c = session.open()) { + c.read(ByteBuffer.wrap(actualBytes)); + } + assertThat(actualBytes).isEqualTo(dataCompressed); + } + + @Test + public void storage_readAllBytes_defaultCompressed() { + Storage s = storageFixture.getInstance(); + byte[] actual = s.readAllBytes(BlobId.of("buck", "obj-compressed")); + assertThat(actual).isEqualTo(dataCompressed); + } + + @Test + public void storage_readAllBytes_returnRawInputStream_true() { + Storage s = storageFixture.getInstance(); + byte[] actual = + s.readAllBytes( + BlobId.of("buck", "obj-compressed"), + BlobSourceOption.shouldReturnRawInputStream(true)); + assertThat(actual).isEqualTo(dataCompressed); + } + + @Test + public void storage_reader_defaultCompressed() throws Exception { + Storage s = storageFixture.getInstance(); + byte[] actual = new byte[dataCompressed.length]; + try (ReadChannel c = s.reader(BlobId.of("buck", "obj-compressed"))) { + c.read(ByteBuffer.wrap(actual)); + } + assertThat(actual).isEqualTo(dataCompressed); + } + + @Test + public void storage_reader_returnRawInputStream_true() throws Exception { + Storage s = storageFixture.getInstance(); + byte[] actual = new byte[dataCompressed.length]; + try (ReadChannel c = + s.reader( + BlobId.of("buck", "obj-compressed"), + BlobSourceOption.shouldReturnRawInputStream(true))) { + c.read(ByteBuffer.wrap(actual)); + } + assertThat(actual).isEqualTo(dataCompressed); + } + } + + public static final class Behavior { + + @Test + public void properlyTracksEOF() throws IOException, InterruptedException, ExecutionException { + final StorageGrpc.StorageImplBase fakeStorage = + new StorageGrpc.StorageImplBase() { + int count = 0; + + @Override + public void readObject( + ReadObjectRequest request, StreamObserver responseObserver) { + if (count++ == 0) { + responseObserver.onNext( + ReadObjectResponse.newBuilder() + .setMetadata(Object.newBuilder().setSize(1).build()) + .setChecksummedData(getChecksummedData(ByteString.copyFromUtf8("a"))) + .build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(TestUtils.apiException(Status.Code.UNIMPLEMENTED)); + } + } + }; + + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + StorageClient sc = StorageClient.create(fakeServer.storageSettings())) { + ReadableByteChannelSession session = + ResumableMedia.gapic() + .read() + .byteChannel( + new ZeroCopyServerStreamingCallable<>( + sc.readObjectCallable(), ResponseContentLifecycleManager.noop()), + TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler()) + .setHasher(Hasher.noop()) + .setAutoGzipDecompression(true) + .unbuffered() + .setReadObjectRequest(reqUncompressed) + .build(); + + byte[] expected = new byte[] {(byte) 'a'}; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadableByteChannel c = session.open()) { + ByteStreams.copy(c, Channels.newChannel(baos)); + } + byte[] actual = baos.toByteArray(); + assertThat(actual).isEqualTo(expected); + assertThat(session.getResult().get()).isNotNull(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionPutTaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionPutTaskTest.java new file mode 100644 index 000000000000..5401a1df0ed6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionPutTaskTest.java @@ -0,0 +1,882 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._128KiBL; +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._256KiBL; +import static com.google.cloud.storage.ByteSizeConstants._512KiBL; +import static com.google.cloud.storage.ByteSizeConstants._768KiBL; +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_RANGE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.junit.Assert.assertThrows; + +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.json.JsonObjectParser; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.FakeHttpServer.HttpRequestHandler; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.common.collect.ImmutableMap; +import io.grpc.netty.shaded.io.netty.buffer.ByteBuf; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.netty.shaded.io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus; +import java.math.BigInteger; +import java.net.URI; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +@ParallelFriendly +public final class ITJsonResumableSessionPutTaskTest { + private static final GsonFactory gson = GsonFactory.getDefaultInstance(); + private static final NetHttpTransport transport = new NetHttpTransport.Builder().build(); + private static final HttpResponseStatus RESUME_INCOMPLETE = + HttpResponseStatus.valueOf(308, "Resume Incomplete"); + private static final HttpResponseStatus APPEND_GREATER_THAN_CURRENT_SIZE = + HttpResponseStatus.valueOf(503, ""); + private HttpClientContext httpClientContext; + + @Rule public final TemporaryFolder temp = new TemporaryFolder(); + + @Before + public void setUp() throws Exception { + httpClientContext = + HttpClientContext.of(transport.createRequestFactory(), new JsonObjectParser(gson)); + } + + @Test + public void emptyObjectHappyPath() throws Exception { + + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + so.setName("object-name").setSize(BigInteger.ZERO); + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(ByteRangeSpec.explicitClosed(0L, 0L), 0)); + + ResumableOperationResult<@Nullable StorageObject> operationResult = task.call(); + StorageObject object = operationResult.getObject(); + assertThat(object).isNotNull(); + assertThat(operationResult.getPersistedSize()).isEqualTo(0L); + } + } + + /** + * + * + *

S.7

+ * + * GCS Acknowledges more bytes than were sent in the PUT + * + *

The client believes the server offset is N, it sends K bytes and the server responds that N + * + 2K bytes are now committed. + * + *

The client has detected data loss and should raise an error and prevent sending of more + * bytes. + */ + @Test + public void scenario7() throws Exception { + + HttpRequestHandler handler = + req -> { + String contentRangeString = req.headers().get(CONTENT_RANGE); + HttpContentRange parse = HttpContentRange.parse(contentRangeString); + long endInclusive = ((HttpContentRange.HasRange) parse).range().endOffsetInclusive(); + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + ByteRangeSpec range = ByteRangeSpec.explicitClosed(0L, endInclusive + 1); + resp.headers().set(HttpHeaderNames.RANGE, range.getHttpRangeHeader()); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(ByteRangeSpec.explicitClosed(0L, 10L))); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + assertThat(confirmedBytes.get()).isEqualTo(-1L); + } + } + + /** + * + * + *

S.1

+ * + * Attempting to append to a session which has already been finalized should raise an error + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = { name= obj, persisted_size = 524288 }
+   *     
client state
+   * write_offset = 0, data = [0:262144]
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes 0-262143/*
+   *     
response
+   * 200 OK
+   * Content-Type: application/json; charset=utf-8
+   *
+   * {"name": "obj", "size": 524288}
+   *     
+ */ + @Test + public void scenario1() throws Exception { + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + URI uri = URI.create(req.uri()); + so.setName("object") + .setBucket("bucket") + .setGeneration(1L) + .setMetageneration(1L) + .setSize(BigInteger.valueOf(_512KiBL)) + .setMetadata(ImmutableMap.of("upload_id", uri.toString())); + + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler); + TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.newFolder().toPath(), _256KiBL)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.of(tmpFile.getPath()), + HttpContentRange.of(ByteRangeSpec.explicit(0L, _256KiBL))); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("invalid"); + assertThat(confirmedBytes.get()).isEqualTo(-1L); + } + } + + /** + * + * + *

S.2

+ * + * Attempting to finalize a session with fewer bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 524288
+   *     
client state
+   * write_offset = 262144, finish = true
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes */262144
+   *     
response
+   * 308 Resume Incomplete
+   * Range: bytes=0-524287
+   *     
+ */ + @Test + public void scenario2() throws Exception { + + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + ByteRangeSpec range = ByteRangeSpec.explicit(0L, _512KiBL); + resp.headers().set(HttpHeaderNames.RANGE, range.getHttpRangeHeader()); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(_256KiBL)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("invalid"); + assertThat(confirmedBytes.get()).isEqualTo(-1L); + } + } + + /** + * + * + *

S.3

+ * + * Attempting to finalize a session with more bytes than GCS acknowledges. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 262144
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes */524288
+   *     
response
+   * 308 Resume Incomplete
+   * Range: bytes=0-262143
+   *     
+ */ + @Test + public void scenario3() throws Exception { + + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + ByteRangeSpec range = ByteRangeSpec.explicit(0L, _256KiBL); + resp.headers().set(HttpHeaderNames.RANGE, range.getHttpRangeHeader()); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(_512KiBL)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + assertThat(confirmedBytes.get()).isEqualTo(-1L); + } + } + + /** + * + * + *

S.4

+ * + * Attempting to finalize an already finalized session + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262114}
+   *     
client state
+   * write_offset = 262114, finish = true
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes */262114
+   *     
response
+   * 200 Ok
+   * Content-Type: application/json; charset=utf-8
+   *
+   * {"name": "obj", "size": 262114}
+   *     
+ */ + @Test + public void scenario4() throws Exception { + + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + URI uri = URI.create(req.uri()); + so.setName("object") + .setBucket("bucket") + .setGeneration(1L) + .setMetageneration(1L) + .setSize(BigInteger.valueOf(_256KiBL)) + .setMetadata(ImmutableMap.of("upload_id", uri.toString())); + + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(_256KiBL)); + + ResumableOperationResult<@Nullable StorageObject> operationResult = task.call(); + StorageObject call = operationResult.getObject(); + assertThat(call).isNotNull(); + assertThat(call.getMetadata()).containsEntry("upload_id", uri.getPath()); + assertThat(operationResult.getPersistedSize()).isEqualTo(_256KiBL); + } + } + + /** + * + * + *

S.4.1

+ * + * Attempting to finalize an already finalized session (ack < expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262114}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes */524288
+   *     
response
+   * 200 Ok
+   * Content-Type: application/json; charset=utf-8
+   *
+   * {"name": "obj", "size": 262114}
+   *     
+ */ + @Test + public void scenario4_1() throws Exception { + + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + URI uri = URI.create(req.uri()); + so.setName("object") + .setBucket("bucket") + .setGeneration(1L) + .setMetageneration(1L) + .setSize(BigInteger.valueOf(_256KiBL)) + .setMetadata(ImmutableMap.of("upload_id", uri.toString())); + + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(_512KiBL)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + assertThat(confirmedBytes.get()).isEqualTo(-1); + } + } + + /** + * + * + *

S.4.2

+ * + * Attempting to finalize an already finalized session (ack > expected) + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * resource = {name = obj1, size = 262114}
+   *     
client state
+   * write_offset = 524288, finish = true
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes */131072
+   *     
response
+   * 200 Ok
+   * Content-Type: application/json; charset=utf-8
+   *
+   * {"name": "obj", "size": 262114}
+   *     
+ */ + @Test + public void scenario4_2() throws Exception { + + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + URI uri = URI.create(req.uri()); + so.setName("object") + .setBucket("bucket") + .setGeneration(1L) + .setMetageneration(1L) + .setSize(BigInteger.valueOf(_256KiBL)) + .setMetadata(ImmutableMap.of("upload_id", uri.toString())); + + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(_128KiBL)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + assertThat(confirmedBytes.get()).isEqualTo(-1); + } + } + + /** + * + * + *

S.5

+ * + * Attempt to append to a resumable session with an offset higher than GCS expects + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
server state
+   * persisted_size = 262144
+   *     
client state
+   * write_offset = 524288, data = [524288:786432]
+   *     
request
+   * PUT $UPLOAD_ID
+   * Content-Range: bytes 524288-786431/*
+   *     
response
+   * 503
+   * Content-Type: text/plain; charset=utf-8
+   *
+   * Invalid request. According to the Content-Range header, the upload offset is 524288 byte(s), which exceeds already uploaded size of 262144 byte(s).
+   *     
+ */ + @Test + public void scenario5() throws Exception { + + HttpRequestHandler handler = + req -> { + // error message from GCS circa 2023-02 + ByteBuf buf = + Unpooled.wrappedBuffer( + "Invalid request. According to the Content-Range header, the upload offset is 524288 byte(s), which exceeds already uploaded size of 262144 byte(s)." + .getBytes(StandardCharsets.UTF_8)); + FullHttpResponse resp = + new DefaultFullHttpResponse( + req.protocolVersion(), APPEND_GREATER_THAN_CURRENT_SIZE, buf); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler); + TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.newFolder().toPath(), _256KiBL)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.of(tmpFile.getPath()), + HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, _768KiBL))); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + assertThat(confirmedBytes.get()).isEqualTo(-1); + } + } + + @Test + public void _503_emptyBody() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), APPEND_GREATER_THAN_CURRENT_SIZE); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler); + TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.newFolder().toPath(), _256KiBL)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.of(tmpFile.getPath()), + HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, _768KiBL))); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(503); + assertThat(confirmedBytes.get()).isEqualTo(-1); + } + } + + @Test + public void jsonParseFailure() throws Exception { + + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + URI uri = URI.create(req.uri()); + so.setName("object") + .setBucket("bucket") + .setGeneration(1L) + .setMetageneration(1L) + .setSize(BigInteger.ZERO) + .setMetadata(ImmutableMap.of("upload_id", uri.toString())); + + byte[] bytes = gson.toByteArray(so); + ByteBuf buf = Unpooled.wrappedBuffer(bytes, 0, bytes.length / 2); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + AtomicLong confirmedBytes = new AtomicLong(-1L); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(0)); + + StorageException se = assertThrows(StorageException.class, task::call); + // the parse error happens while trying to read the success object, make sure we raise it as + // a client side retryable exception + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo(null); + // Finalization was successful, but we can't confirm the number of bytes due to the parse + // error + assertThat(confirmedBytes.get()).isEqualTo(-1); + + ResultRetryAlgorithm idempotentHandler = + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler(); + boolean shouldRetry = idempotentHandler.shouldRetry(se, null); + assertThat(shouldRetry).isTrue(); + } + } + + @Test + public void jsonDeserializationOnlyAttemptedWhenContentPresent() throws Exception { + + HttpRequestHandler handler = + req -> { + DefaultFullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK); + resp.headers().set(CONTENT_TYPE, "text/html; charset=UTF-8"); + resp.headers().set("x-goog-stored-content-length", "0"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + httpClientContext, + jsonResumableWrite(uri), + RewindableContent.empty(), + HttpContentRange.of(0)); + + ResumableOperationResult<@Nullable StorageObject> operationResult = task.call(); + StorageObject call = operationResult.getObject(); + assertThat(call).isNull(); + assertThat(operationResult.getPersistedSize()).isEqualTo(0L); + } + } + + @Test + public void attemptToRewindOutOfBoundsThrows_lower() { + RewindableContent content = RewindableContent.of(); + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + null, null, content, HttpContentRange.of(ByteRangeSpec.relativeLength(10L, 10L))); + + IllegalArgumentException iae = + assertThrows(IllegalArgumentException.class, () -> task.rewindTo(9)); + assertThat(iae).hasMessageThat().isEqualTo("Rewind offset is out of bounds. (10 <= 9 < 20)"); + } + + @Test + public void attemptToRewindOutOfBoundsThrows_upper() { + RewindableContent content = RewindableContent.of(); + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + null, null, content, HttpContentRange.of(ByteRangeSpec.relativeLength(10L, 10L))); + + IllegalArgumentException iae = + assertThrows(IllegalArgumentException.class, () -> task.rewindTo(20)); + assertThat(iae).hasMessageThat().isEqualTo("Rewind offset is out of bounds. (10 <= 20 < 20)"); + } + + @Test + public void repeatedRewindsToTheSameLocationWork() { + ByteBuffer buf1 = DataGenerator.base64Characters().genByteBuffer(_256KiB); + ByteBuffer buf2 = DataGenerator.base64Characters().genByteBuffer(_256KiB); + RewindableContent content = RewindableContent.of(buf1, buf2); + JsonResumableSessionPutTask task = + new JsonResumableSessionPutTask( + null, null, content, HttpContentRange.of(ByteRangeSpec.relativeLength(0L, _512KiBL))); + + task.rewindTo(0); + assertThat(buf1.position()).isEqualTo(0); + assertThat(buf2.position()).isEqualTo(0); + + int last = buf1.capacity(); + buf1.position(last); + buf2.position(last); + + task.rewindTo(_256KiBL); + assertThat(buf1.remaining()).isEqualTo(0); + assertThat(buf2.position()).isEqualTo(0); + + task.rewindTo(_256KiBL); + assertThat(buf1.remaining()).isEqualTo(0); + assertThat(buf2.position()).isEqualTo(0); + + task.rewindTo(_256KiBL + 13); + assertThat(buf1.remaining()).isEqualTo(0); + assertThat(buf2.position()).isEqualTo(13); + + task.rewindTo(_256KiBL + 13); + assertThat(buf1.remaining()).isEqualTo(0); + assertThat(buf2.position()).isEqualTo(13); + } + + static @NonNull JsonResumableWrite jsonResumableWrite(URI uploadUrl) { + return JsonResumableWrite.of(new StorageObject(), ImmutableMap.of(), uploadUrl.toString(), 0); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionQueryTaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionQueryTaskTest.java new file mode 100644 index 000000000000..c50c01783087 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionQueryTaskTest.java @@ -0,0 +1,263 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiBL; +import static com.google.cloud.storage.ITJsonResumableSessionPutTaskTest.jsonResumableWrite; +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.junit.Assert.assertThrows; + +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.json.JsonObjectParser; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.FakeHttpServer.HttpRequestHandler; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.common.collect.ImmutableMap; +import io.grpc.netty.shaded.io.netty.buffer.ByteBuf; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.netty.shaded.io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus; +import java.math.BigInteger; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.UUID; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +@ParallelFriendly +public final class ITJsonResumableSessionQueryTaskTest { + private static final GsonFactory gson = GsonFactory.getDefaultInstance(); + private static final NetHttpTransport transport = new NetHttpTransport.Builder().build(); + private static final HttpResponseStatus RESUME_INCOMPLETE = + HttpResponseStatus.valueOf(308, "Resume Incomplete"); + private static final HttpResponseStatus APPEND_GREATER_THAN_CURRENT_SIZE = + HttpResponseStatus.valueOf(503, ""); + + private HttpClientContext httpClientContext; + + @Before + public void setUp() throws Exception { + httpClientContext = + HttpClientContext.of(transport.createRequestFactory(), new JsonObjectParser(gson)); + } + + @Test + public void successfulSession() throws Exception { + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + so.setName("object-name").setSize(BigInteger.ZERO); + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + ResumableOperationResult<@Nullable StorageObject> result = task.call(); + StorageObject object = result.getObject(); + assertThat(object).isNotNull(); + assertThat(result.getPersistedSize()).isEqualTo(0L); + } + } + + @Test + public void successfulSession_noObject() throws Exception { + HttpRequestHandler handler = + req -> { + DefaultFullHttpResponse response = new DefaultFullHttpResponse(req.protocolVersion(), OK); + response.headers().set("X-Goog-Stored-Content-Length", 0); + return response; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + ResumableOperationResult<@Nullable StorageObject> result = task.call(); + StorageObject object = result.getObject(); + assertThat(object).isNull(); + assertThat(result.getPersistedSize()).isEqualTo(0L); + } + } + + @Test + public void incompleteSession() throws Exception { + HttpRequestHandler handler = + req -> { + DefaultFullHttpResponse response = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + response + .headers() + .set( + HttpHeaderNames.RANGE, + ByteRangeSpec.relativeLength(0L, _256KiBL).getHttpRangeHeader()); + return response; + }; + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + ResumableOperationResult<@Nullable StorageObject> result = task.call(); + assertThat(result.getPersistedSize()).isEqualTo(_256KiBL); + } + } + + @Test + public void incompleteSession_missingRangeHeader() throws Exception { + HttpRequestHandler handler = + req -> new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + ResumableOperationResult<@Nullable StorageObject> result = task.call(); + assertThat(result.getPersistedSize()).isEqualTo(0); + assertThat(result.getObject()).isNull(); + } + } + + @Test + public void successfulSession_noJson_noStoredContentLength() throws Exception { + HttpRequestHandler handler = req -> new DefaultFullHttpResponse(req.protocolVersion(), OK); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + } + } + + @Test + public void successfulSession_noSize() throws Exception { + HttpRequestHandler handler = + req -> { + StorageObject so = new StorageObject(); + so.setName("object-name"); + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + } + } + + @Test + public void query_badOffset() throws Exception { + HttpRequestHandler handler = + req -> { + // error message from GCS circa 2023-02 + ByteBuf buf = + Unpooled.wrappedBuffer( + "Invalid request. According to the Content-Range header, the upload offset is 524288 byte(s), which exceeds already uploaded size of 262144 byte(s)." + .getBytes(StandardCharsets.UTF_8)); + FullHttpResponse resp = + new DefaultFullHttpResponse( + req.protocolVersion(), APPEND_GREATER_THAN_CURRENT_SIZE, buf); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(0); + assertThat(se.getReason()).isEqualTo("dataLoss"); + } + } + + @Test + public void _503_emptyBody() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), APPEND_GREATER_THAN_CURRENT_SIZE); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + URI uri = + fakeHttpServer.createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())); + + JsonResumableSessionQueryTask task = + new JsonResumableSessionQueryTask(httpClientContext, jsonResumableWrite(uri)); + + StorageException se = assertThrows(StorageException.class, task::call); + assertThat(se.getCode()).isEqualTo(503); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionTest.java new file mode 100644 index 000000000000..398bb9e51f6c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITJsonResumableSessionTest.java @@ -0,0 +1,450 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._1MiB; +import static com.google.cloud.storage.ByteSizeConstants._1MiBL; +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._256KiBL; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiBL; +import static com.google.cloud.storage.ByteSizeConstants._768KiBL; +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_RANGE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.RANGE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus.OK; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus.SERVICE_UNAVAILABLE; + +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.json.JsonObjectParser; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.FakeHttpServer.HttpRequestHandler; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.grpc.netty.shaded.io.netty.buffer.ByteBuf; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.netty.shaded.io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpRequest; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public final class ITJsonResumableSessionTest { + private static final GsonFactory gson = GsonFactory.getDefaultInstance(); + private static final NetHttpTransport transport = new NetHttpTransport.Builder().build(); + private static final HttpResponseStatus RESUME_INCOMPLETE = + HttpResponseStatus.valueOf(308, "Resume Incomplete"); + private static final RetrierWithAlg RETRIER = + new DefaultRetrier( + UnaryOperator.identity(), + RetryingDependencies.simple( + NanoClock.getDefaultClock(), + RetrySettings.newBuilder().setMaxAttempts(3).build())) + .withAlg(StorageRetryStrategy.getUniformStorageRetryStrategy().getIdempotentHandler()); + private HttpClientContext httpClientContext; + + @Rule public final TemporaryFolder temp = new TemporaryFolder(); + + @Before + public void setUp() throws Exception { + httpClientContext = + HttpClientContext.of(transport.createRequestFactory(), new JsonObjectParser(gson)); + } + + @Test + public void rewindWillQueryStatusOnlyWhenDirty() throws Exception { + HttpContentRange range1 = HttpContentRange.of(ByteRangeSpec.explicit(0L, _512KiBL)); + HttpContentRange range2 = HttpContentRange.query(); + HttpContentRange range3 = HttpContentRange.of(ByteRangeSpec.explicit(_256KiBL, _512KiBL)); + + final List requests = Collections.synchronizedList(new ArrayList<>()); + HttpRequestHandler handler = + req -> { + requests.add(req); + String contentRange = req.headers().get(CONTENT_RANGE); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + if (range1.getHeaderValue().equals(contentRange)) { + return new DefaultFullHttpResponse(req.protocolVersion(), SERVICE_UNAVAILABLE); + } else if (range2.getHeaderValue().equals(contentRange)) { + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _256KiBL).getHttpRangeHeader()); + } else { + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _512KiBL).getHttpRangeHeader()); + } + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler); + TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.newFolder().toPath(), _512KiBL)) { + String uploadUrl = + fakeHttpServer + .createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())) + .toString(); + + JsonResumableWrite resumableWrite = + JsonResumableWrite.of(null, ImmutableMap.of(), uploadUrl, 0); + JsonResumableSession session = + new JsonResumableSession(httpClientContext, RETRIER, resumableWrite); + + ResumableOperationResult<@Nullable StorageObject> operationResult = + session.put(RewindableContent.of(tmpFile.getPath()), range1); + StorageObject call = operationResult.getObject(); + assertThat(call).isNull(); + assertThat(operationResult.getPersistedSize()).isEqualTo(_512KiBL); + } + + List actual = + requests.stream().map(r -> r.headers().get(CONTENT_RANGE)).collect(Collectors.toList()); + + List expected = + ImmutableList.of(range1.getHeaderValue(), range2.getHeaderValue(), range3.getHeaderValue()); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void retryAttemptWillReturnQueryResultIfPersistedSizeMatchesSpecifiedEndOffset() + throws Exception { + HttpContentRange range1 = HttpContentRange.of(ByteRangeSpec.explicit(0L, _512KiBL)); + HttpContentRange range2 = HttpContentRange.query(); + HttpContentRange range3 = HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, _768KiBL)); + + final List requests = Collections.synchronizedList(new ArrayList<>()); + HttpRequestHandler handler = + req -> { + requests.add(req); + String contentRange = req.headers().get(CONTENT_RANGE); + DefaultFullHttpResponse resp; + if (range1.getHeaderValue().equals(contentRange)) { + resp = new DefaultFullHttpResponse(req.protocolVersion(), SERVICE_UNAVAILABLE); + } else if (range2.getHeaderValue().equals(contentRange)) { + resp = new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _512KiBL).getHttpRangeHeader()); + } else { + resp = new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers() + .set(RANGE, ByteRangeSpec.explicit(_512KiBL, _768KiBL).getHttpRangeHeader()); + } + return resp; + }; + + ByteBuffer buf1 = DataGenerator.base64Characters().genByteBuffer(_512KiB); + ByteBuffer buf2 = DataGenerator.base64Characters().genByteBuffer(_256KiB); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + String uploadUrl = + fakeHttpServer + .createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())) + .toString(); + + JsonResumableWrite resumableWrite = + JsonResumableWrite.of(null, ImmutableMap.of(), uploadUrl, 0); + JsonResumableSession session = + new JsonResumableSession(httpClientContext, RETRIER, resumableWrite); + + ResumableOperationResult<@Nullable StorageObject> operationResult1 = + session.put(RewindableContent.of(buf1), range1); + StorageObject call1 = operationResult1.getObject(); + assertThat(call1).isNull(); + assertThat(operationResult1.getPersistedSize()).isEqualTo(_512KiBL); + + ResumableOperationResult<@Nullable StorageObject> operationResult2 = + session.put(RewindableContent.of(buf2), range3); + StorageObject call2 = operationResult2.getObject(); + assertThat(call2).isNull(); + assertThat(operationResult2.getPersistedSize()).isEqualTo(_768KiBL); + } + + List actual = + requests.stream().map(r -> r.headers().get(CONTENT_RANGE)).collect(Collectors.toList()); + + List expected = + ImmutableList.of(range1.getHeaderValue(), range2.getHeaderValue(), range3.getHeaderValue()); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewindOfContentIsRelativeToItsBeginOffsetOfTheOverallObject() throws Exception { + HttpContentRange range1 = HttpContentRange.of(ByteRangeSpec.explicit(0L, _512KiBL)); + HttpContentRange range2 = HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, _768KiBL)); + HttpContentRange range3 = HttpContentRange.query(); + + final AtomicBoolean fail = new AtomicBoolean(true); + final List requests = Collections.synchronizedList(new ArrayList<>()); + HttpRequestHandler handler = + req -> { + requests.add(req); + String contentRange = req.headers().get(CONTENT_RANGE); + DefaultFullHttpResponse resp; + if (range1.getHeaderValue().equals(contentRange) + || range3.getHeaderValue().equals(contentRange)) { + resp = new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _512KiBL).getHttpRangeHeader()); + } else if (range2.getHeaderValue().equals(contentRange)) { + if (fail.getAndSet(false)) { + resp = new DefaultFullHttpResponse(req.protocolVersion(), SERVICE_UNAVAILABLE); + } else { + resp = new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers() + .set(RANGE, ByteRangeSpec.explicit(_512KiBL, _768KiBL).getHttpRangeHeader()); + } + } else { + resp = new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers() + .set(RANGE, ByteRangeSpec.explicit(_512KiBL, _768KiBL).getHttpRangeHeader()); + } + return resp; + }; + + ByteBuffer buf1 = DataGenerator.base64Characters().genByteBuffer(_512KiB); + ByteBuffer buf2 = DataGenerator.base64Characters().genByteBuffer(_256KiB); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + String uploadUrl = + fakeHttpServer + .createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())) + .toString(); + + JsonResumableWrite resumableWrite = + JsonResumableWrite.of(null, ImmutableMap.of(), uploadUrl, 0); + JsonResumableSession session = + new JsonResumableSession(httpClientContext, RETRIER, resumableWrite); + + ResumableOperationResult<@Nullable StorageObject> operationResult1 = + session.put(RewindableContent.of(buf1), range1); + StorageObject call1 = operationResult1.getObject(); + assertThat(call1).isNull(); + assertThat(operationResult1.getPersistedSize()).isEqualTo(_512KiBL); + + ResumableOperationResult<@Nullable StorageObject> operationResult2 = + session.put(RewindableContent.of(buf2), range2); + StorageObject call2 = operationResult2.getObject(); + assertThat(call2).isNull(); + assertThat(operationResult2.getPersistedSize()).isEqualTo(_768KiBL); + } + + List actual = + requests.stream().map(r -> r.headers().get(CONTENT_RANGE)).collect(Collectors.toList()); + + List expected = + ImmutableList.of( + range1.getHeaderValue(), + range2.getHeaderValue(), + range3.getHeaderValue(), + range2.getHeaderValue()); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void finalChunkContainsCorrectCrc32cHeader() throws Exception { + ChecksummedTestContent ctc = ChecksummedTestContent.gen(_512KiB); + String expectedHashHeader = "crc32c=" + ctc.getCrc32cBase64(); + List chunks = ctc.chunkup(_256KiB); + ChecksummedTestContent chunk1 = chunks.get(0); + ChecksummedTestContent chunk2 = chunks.get(1); + + AtomicReference capturedInitialHash = new AtomicReference<>(); + AtomicReference capturedFinalHash = new AtomicReference<>(); + + HttpRequestHandler handler = + req -> { + String contentRange = req.headers().get(CONTENT_RANGE); + String currentHash = req.headers().get("x-goog-hash"); + + if (contentRange.contains("/*")) { // First chunk (non-final) + capturedInitialHash.set(currentHash); + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers() + .set( + HttpHeaderNames.RANGE, + ByteRangeSpec.explicit(0L, _256KiBL).getHttpRangeHeader()); + return resp; + } else { // Second chunk (final) + capturedFinalHash.set(currentHash); + + StorageObject so = + new StorageObject().setName("object").setSize(BigInteger.valueOf(_512KiBL)); + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + FullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + } + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + String uploadUrl = + fakeHttpServer + .createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())) + .toString(); + + JsonResumableWrite resumableWrite = + JsonResumableWrite.of( + null, ImmutableMap.of(), uploadUrl, 0, Hasher.enabled(), Crc32cValue.zero()); + JsonResumableSession session = + new JsonResumableSession(httpClientContext, RETRIER, resumableWrite); + + ResumableOperationResult<@Nullable StorageObject> put1 = + session.put( + RewindableContent.of(chunk1.asByteBuffer()), + HttpContentRange.of(ByteRangeSpec.explicit(0L, _256KiBL))); + assertThat(put1.getObject()).isNull(); + assertThat(put1.getPersistedSize()).isEqualTo(_256KiBL); + + ResumableOperationResult<@Nullable StorageObject> put2 = + session.put( + RewindableContent.of(chunk2.asByteBuffer()), + HttpContentRange.of(ByteRangeSpec.explicit(0L, _256KiBL), ctc.length())); + assertThat(put2.getObject()).isNotNull(); + assertThat(put2.getPersistedSize()).isEqualTo(_512KiBL); + + assertThat(capturedInitialHash.get()).isNull(); + assertThat(capturedFinalHash.get()).isEqualTo(expectedHashHeader); + } + } + + @Test + public void retriesOfPartiallyConsumedBytesChecksumCorrectly() throws Exception { + ChecksummedTestContent ctc = ChecksummedTestContent.gen(_1MiB); + String expectedFullHashHeader = "crc32c=" + ctc.getCrc32cBase64(); + ChecksummedTestContent chunk1 = ctc.slice(0, _256KiB); + ChecksummedTestContent chunk2 = ctc.slice(_256KiB, _256KiB); + ChecksummedTestContent chunk3 = ctc.slice(_512KiB, _512KiB); + HttpContentRange expectedContentRange1 = + HttpContentRange.of(ByteRangeSpec.explicit(0L, (long) chunk1.length())); + HttpContentRange expectedContentRange2 = + HttpContentRange.of(ByteRangeSpec.explicit(_256KiBL, _256KiBL + chunk2.length())); + HttpContentRange expectedContentrange3 = + HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, (long) ctc.length()), ctc.length()); + HttpContentRange retriedFullContentRange = + HttpContentRange.of(ByteRangeSpec.explicit(_768KiBL, (long) ctc.length()), ctc.length()); + + AtomicLong requestCount = new AtomicLong(0); + List hashes = Collections.synchronizedList(new ArrayList<>()); + + HttpRequestHandler handler = + req -> { + requestCount.incrementAndGet(); + HttpContentRange contentRange = + HttpContentRange.parse(req.headers().get("Content-Range")); + String hashHeader = req.headers().get("x-goog-hash"); + if (hashHeader != null) { + hashes.add(hashHeader); + } + + if (expectedContentRange1.equals(contentRange)) { + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _256KiBL).getHttpRangeHeader()); + return resp; + } else if (expectedContentRange2.equals(contentRange)) { + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _512KiBL).getHttpRangeHeader()); + return resp; + } else if (expectedContentrange3.equals(contentRange)) { + // simulate a broken connection -- except instead of breaking the connection (which is + // very difficult to do with netty) return a 503. + return new DefaultFullHttpResponse( + req.protocolVersion(), HttpResponseStatus.SERVICE_UNAVAILABLE); + } else if (HttpContentRange.query().equals(contentRange)) { + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), RESUME_INCOMPLETE); + resp.headers().set(RANGE, ByteRangeSpec.explicit(0L, _768KiBL).getHttpRangeHeader()); + return resp; + } else if (retriedFullContentRange.equals(contentRange)) { + StorageObject so = + new StorageObject().setName("object").setSize(BigInteger.valueOf(_1MiB)); + ByteBuf buf = Unpooled.wrappedBuffer(gson.toByteArray(so)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/json; charset=utf-8"); + return resp; + } + return new DefaultFullHttpResponse( + req.protocolVersion(), HttpResponseStatus.INTERNAL_SERVER_ERROR); + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + String uploadUrl = + fakeHttpServer + .createUri( + "/upload/{uploadId}", ImmutableMap.of("uploadId", UUID.randomUUID().toString())) + .toString(); + + JsonResumableWrite resumableWrite = + JsonResumableWrite.of( + null, ImmutableMap.of(), uploadUrl, 0, Hasher.enabled(), Crc32cValue.zero()); + JsonResumableSession session = + new JsonResumableSession(httpClientContext, RETRIER, resumableWrite); + + ResumableOperationResult<@Nullable StorageObject> result1 = + session.put( + RewindableContent.of(chunk1.asByteBuffer()), + HttpContentRange.of(ByteRangeSpec.explicit(0L, _256KiBL))); + assertThat(result1.getObject()).isNull(); + assertThat(result1.getPersistedSize()).isEqualTo(_256KiBL); + ResumableOperationResult<@Nullable StorageObject> result2 = + session.put( + RewindableContent.of(chunk2.asByteBuffer()), + HttpContentRange.of(ByteRangeSpec.explicit(_256KiBL, _512KiBL))); + assertThat(result2.getObject()).isNull(); + assertThat(result2.getPersistedSize()).isEqualTo(_512KiBL); + ResumableOperationResult<@Nullable StorageObject> result3 = + session.put( + RewindableContent.of(chunk3.asByteBuffer()), + HttpContentRange.of(ByteRangeSpec.explicit(_512KiBL, _1MiBL), ctc.length())); + assertThat(result3.getObject()).isNotNull(); + assertThat(result3.getPersistedSize()).isEqualTo(_1MiBL); + + assertThat(requestCount.get()).isEqualTo(5); + assertThat(hashes) + .isEqualTo(ImmutableList.of(expectedFullHashHeader, expectedFullHashHeader)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITMultipartUploadHttpRequestManagerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITMultipartUploadHttpRequestManagerTest.java new file mode 100644 index 000000000000..2d1c6b6cfd85 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITMultipartUploadHttpRequestManagerTest.java @@ -0,0 +1,1563 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.junit.Assert.assertThrows; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.PropertyAccessor; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import com.google.api.client.http.HttpResponseException; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.storage.FakeHttpServer.HttpRequestHandler; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompletedMultipartUpload; +import com.google.cloud.storage.multipartupload.model.CompletedPart; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.MultipartUpload; +import com.google.cloud.storage.multipartupload.model.ObjectLockMode; +import com.google.cloud.storage.multipartupload.model.Part; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.hash.Hashing; +import io.grpc.netty.shaded.io.netty.buffer.ByteBuf; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.netty.shaded.io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpRequest; +import io.grpc.netty.shaded.io.netty.handler.codec.http.FullHttpResponse; +import io.grpc.netty.shaded.io.netty.handler.codec.http.HttpResponseStatus; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Collections; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +@ParallelFriendly +public final class ITMultipartUploadHttpRequestManagerTest { + private static final XmlMapper xmlMapper; + + static { + xmlMapper = new XmlMapper(); + xmlMapper.registerModule(new JavaTimeModule()); + xmlMapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); + SimpleModule module = new SimpleModule(); + module.addSerializer( + StorageClass.class, + new JsonSerializer() { + @Override + public void serialize( + StorageClass value, JsonGenerator gen, SerializerProvider serializers) + throws java.io.IOException { + gen.writeString(value.toString()); + } + }); + xmlMapper.registerModule(module); + } + + @Rule public final TemporaryFolder temp = new TemporaryFolder(); + + @Test + public void sendCreateMultipartUploadRequest_success() throws Exception { + HttpRequestHandler handler = + req -> { + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .build(); + + CreateMultipartUploadResponse response = + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + assertThat(response.key()).isEqualTo("test-key"); + assertThat(response.uploadId()).isEqualTo("test-upload-id"); + } + } + + @Test + public void createFrom_withExistingUserAgent() throws Exception { + HttpRequestHandler handler = + req -> { + boolean hasCustom = + req.headers().getAll("User-Agent").stream() + .anyMatch(agent -> agent.contains("my-custom-agent")); + assertThat(hasCustom).isTrue(); + // check that it does not also contain the gcloud-java generated header + boolean hasDefault = + req.headers().getAll("User-Agent").stream() + .anyMatch(agent -> agent.contains("gcloud-java/")); + assertThat(hasDefault).isFalse(); + + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + HttpStorageOptions options = + fakeHttpServer.getHttpStorageOptions().toBuilder() + .setHeaderProvider( + FixedHeaderProvider.create(ImmutableMap.of("User-Agent", "my-custom-agent"))) + .build(); + + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(options); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void createFrom_withoutExistingUserAgent() throws Exception { + HttpRequestHandler handler = + req -> { + boolean hasDefault = + req.headers().getAll("User-Agent").stream() + .anyMatch(agent -> agent.startsWith("gcloud-java/")); + assertThat(hasDefault).isTrue(); + + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_error() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request)); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withCannedAcl() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-acl")).isEqualTo("authenticated-read"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .cannedAcl(Storage.PredefinedAcl.AUTHENTICATED_READ) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withMetadata() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-meta-key1")).isEqualTo("value1"); + assertThat(req.headers().get("x-goog-meta-key2")).isEqualTo("value2"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .metadata(ImmutableMap.of("key1", "value1", "key2", "value2")) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withStorageClass() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-storage-class")).isEqualTo("ARCHIVE"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .storageClass(StorageClass.ARCHIVE) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withKmsKeyName() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-encryption-kms-key-name")) + .isEqualTo("projects/p/locations/l/keyRings/r/cryptoKeys/k"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .kmsKeyName("projects/p/locations/l/keyRings/r/cryptoKeys/k") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withObjectLockMode() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-object-lock-mode")).isEqualTo("GOVERNANCE"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .objectLockMode(ObjectLockMode.GOVERNANCE) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withObjectLockRetainUntilDate() throws Exception { + OffsetDateTime retainUtil = OffsetDateTime.of(2024, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + HttpRequestHandler handler = + req -> { + OffsetDateTime actual = + Utils.offsetDateTimeRfc3339Codec.decode( + req.headers().get("x-goog-object-lock-retain-until-date")); + assertThat(actual).isEqualTo(retainUtil); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .objectLockRetainUntilDate(retainUtil) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withCustomTime() throws Exception { + OffsetDateTime customTime = OffsetDateTime.of(2024, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + HttpRequestHandler handler = + req -> { + OffsetDateTime actual = + Utils.offsetDateTimeRfc3339Codec.decode(req.headers().get("x-goog-custom-time")); + assertThat(actual).isEqualTo(customTime); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .customTime(customTime) + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withContentDisposition() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("Content-Disposition")) + .isEqualTo("attachment; filename=\"test.txt\""); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .contentDisposition("attachment; filename=\"test.txt\"") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withContentEncoding() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("Content-Encoding")).isEqualTo("gzip"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .contentEncoding("gzip") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withContentLanguage() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("Content-Language")).isEqualTo("en"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .contentLanguage("en") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withCacheControl() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("Cache-Control")) + .isEqualTo("no-cache, no-store, max-age=0, must-revalidate"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .cacheControl("no-cache, no-store, max-age=0, must-revalidate") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withUserProject() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .userProject("test-project") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_withContentType() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().getAll("Content-Type")).containsExactly("audio/mp4"); + + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("audio/mp4") + .build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendCreateMultipartUploadRequest_sendsContentLength() throws Exception { + HttpRequestHandler handler = + req -> { + // See https://docs.cloud.google.com/storage/docs/xml-api/reference-headers#contentlength + assertThat(req.headers().get("Content-Length")).isEqualTo("0"); + + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder().bucket("test-bucket").key("test-key").build(); + + multipartUploadHttpRequestManager.sendCreateMultipartUploadRequest(request); + } + } + + @Test + public void sendListPartsRequest_success() throws Exception { + HttpRequestHandler handler = + req -> { + OffsetDateTime lastModified = OffsetDateTime.of(2024, 5, 8, 17, 50, 0, 0, ZoneOffset.UTC); + ListPartsResponse listPartsResponse = + ListPartsResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumberMarker(0) + .nextPartNumberMarker(1) + .maxParts(1) + .truncated(false) + .parts( + Collections.singletonList( + Part.builder() + .partNumber(1) + .eTag("\"etag\"") + .size(123) + .lastModified(lastModified) + .build())) + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(listPartsResponse)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .maxParts(1) + .partNumberMarker(0) + .build(); + + ListPartsResponse response = multipartUploadHttpRequestManager.sendListPartsRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + assertThat(response.key()).isEqualTo("test-key"); + assertThat(response.uploadId()).isEqualTo("test-upload-id"); + assertThat(response.partNumberMarker()).isEqualTo(0); + assertThat(response.nextPartNumberMarker()).isEqualTo(1); + assertThat(response.maxParts()).isEqualTo(1); + assertThat(response.truncated()).isFalse(); + assertThat(response.parts()).hasSize(1); + Part part = response.parts().get(0); + assertThat(part.partNumber()).isEqualTo(1); + assertThat(part.eTag()).isEqualTo("\"etag\""); + assertThat(part.size()).isEqualTo(123); + assertThat(part.lastModified()) + .isEqualTo(OffsetDateTime.of(2024, 5, 8, 17, 50, 0, 0, ZoneOffset.UTC)); + } + } + + @Test + public void sendListPartsRequest_bucketNotFound() throws Exception { + HttpRequestHandler handler = + req -> + new DefaultFullHttpResponse( + req.protocolVersion(), + HttpResponseStatus.NOT_FOUND, + Unpooled.wrappedBuffer("Bucket not found".getBytes(StandardCharsets.UTF_8))); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendListPartsRequest(request)); + } + } + + @Test + public void sendListPartsRequest_keyNotFound() throws Exception { + HttpRequestHandler handler = + req -> + new DefaultFullHttpResponse( + req.protocolVersion(), + HttpResponseStatus.NOT_FOUND, + Unpooled.wrappedBuffer("Key not found".getBytes(StandardCharsets.UTF_8))); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendListPartsRequest(request)); + } + } + + @Test + public void sendListPartsRequest_badRequest() throws Exception { + HttpRequestHandler handler = + req -> + new DefaultFullHttpResponse( + req.protocolVersion(), + HttpResponseStatus.BAD_REQUEST, + Unpooled.wrappedBuffer("Invalid uploadId".getBytes(StandardCharsets.UTF_8))); + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("invalid-upload-id") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendListPartsRequest(request)); + } + } + + @Test + public void sendListPartsRequest_errorResponse() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendListPartsRequest(request)); + } + } + + @Test + public void sendListPartsRequest_withUserProject() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + ListPartsResponse listPartsResponse = + ListPartsResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .maxParts(1) + .partNumberMarker(0) + .parts(ImmutableList.of()) + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(listPartsResponse)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListPartsRequest request = + ListPartsRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .userProject("test-project") + .build(); + + ListPartsResponse response = multipartUploadHttpRequestManager.sendListPartsRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + } + } + + @Test + public void sendAbortMultipartUploadRequest_success() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.uri()).contains("?uploadId=test-upload-id"); + AbortMultipartUploadResponse response = new AbortMultipartUploadResponse(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + AbortMultipartUploadRequest request = + AbortMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + + AbortMultipartUploadResponse response = + multipartUploadHttpRequestManager.sendAbortMultipartUploadRequest(request); + + assertThat(response).isNotNull(); + } + } + + @Test + public void sendAbortMultipartUploadRequest_error() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + AbortMultipartUploadRequest request = + AbortMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendAbortMultipartUploadRequest(request)); + } + } + + @Test + public void sendAbortMultipartUploadRequest_withUserProject() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + AbortMultipartUploadResponse response = new AbortMultipartUploadResponse(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + AbortMultipartUploadRequest request = + AbortMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .userProject("test-project") + .build(); + + AbortMultipartUploadResponse response = + multipartUploadHttpRequestManager.sendAbortMultipartUploadRequest(request); + + assertThat(response).isNotNull(); + } + } + + @Test + public void sendCompleteMultipartUploadRequest_success() throws Exception { + HttpRequestHandler handler = + req -> { + CompleteMultipartUploadResponse response = + CompleteMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .etag("\"test-etag\"") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CompleteMultipartUploadRequest request = + CompleteMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .multipartUpload( + CompletedMultipartUpload.builder() + .parts( + ImmutableList.of( + CompletedPart.builder().partNumber(1).eTag("\"etag1\"").build(), + CompletedPart.builder().partNumber(2).eTag("\"etag2\"").build())) + .build()) + .build(); + + CompleteMultipartUploadResponse response = + multipartUploadHttpRequestManager.sendCompleteMultipartUploadRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + assertThat(response.key()).isEqualTo("test-key"); + assertThat(response.etag()).isEqualTo("\"test-etag\""); + } + } + + @Test + public void sendCompleteMultipartUploadRequest_error() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CompleteMultipartUploadRequest request = + CompleteMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .multipartUpload( + CompletedMultipartUpload.builder() + .parts( + ImmutableList.of( + CompletedPart.builder().partNumber(1).eTag("\"etag1\"").build(), + CompletedPart.builder().partNumber(2).eTag("\"etag2\"").build())) + .build()) + .build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendCompleteMultipartUploadRequest(request)); + } + } + + @Test + public void sendCompleteMultipartUploadRequest_body() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpRequest fullHttpRequest = (FullHttpRequest) req; + ByteBuf content = fullHttpRequest.content(); + String body = content.toString(StandardCharsets.UTF_8); + assertThat(body) + .isEqualTo( + "1\"etag1\"2\"etag2\""); + CompleteMultipartUploadResponse response = + CompleteMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .etag("\"test-etag\"") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CompleteMultipartUploadRequest request = + CompleteMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .multipartUpload( + CompletedMultipartUpload.builder() + .parts( + ImmutableList.of( + CompletedPart.builder().partNumber(1).eTag("\"etag1\"").build(), + CompletedPart.builder().partNumber(2).eTag("\"etag2\"").build())) + .build()) + .build(); + + multipartUploadHttpRequestManager.sendCompleteMultipartUploadRequest(request); + } + } + + @Test + public void sendCompleteMultipartUploadRequest_withUserProject() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + CompleteMultipartUploadResponse response = + CompleteMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .etag("\"test-etag\"") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CompleteMultipartUploadRequest request = + CompleteMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .userProject("test-project") + .multipartUpload( + CompletedMultipartUpload.builder() + .parts( + ImmutableList.of( + CompletedPart.builder().partNumber(1).eTag("\"etag1\"").build(), + CompletedPart.builder().partNumber(2).eTag("\"etag2\"").build())) + .build()) + .build(); + + CompleteMultipartUploadResponse response = + multipartUploadHttpRequestManager.sendCompleteMultipartUploadRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + assertThat(response.key()).isEqualTo("test-key"); + assertThat(response.etag()).isEqualTo("\"test-etag\""); + } + } + + @Test + public void sendUploadPartRequest_success() throws Exception { + String etag = "\"af1ed31420542285653c803a34aa839a\""; + String content = "hello world"; + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + + HttpRequestHandler handler = + req -> { + assertThat(req.uri()).contains("?partNumber=1&uploadId=test-upload-id"); + FullHttpRequest fullReq = (FullHttpRequest) req; + ByteBuf requestContent = fullReq.content(); + byte[] receivedBytes = new byte[requestContent.readableBytes()]; + requestContent.readBytes(receivedBytes); + assertThat(receivedBytes).isEqualTo(contentBytes); + + DefaultFullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK); + resp.headers().set("ETag", etag); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + UploadPartRequest request = + UploadPartRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumber(1) + .build(); + + UploadPartResponse response = + multipartUploadHttpRequestManager.sendUploadPartRequest( + request, RewindableContent.of(ByteBuffer.wrap(contentBytes))); + + assertThat(response).isNotNull(); + assertThat(response.eTag()).isEqualTo(etag); + } + } + + @Test + public void sendUploadPartRequest_withChecksums() throws Exception { + String etag = "\"af1ed31420542285653c803a34aa839a\""; + String content = "hello world"; + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + String md5 = Hashing.md5().hashBytes(contentBytes).toString(); + String crc32c = "yZRlqg=="; + + HttpRequestHandler handler = + req -> { + assertThat(req.uri()).contains("?partNumber=1&uploadId=test-upload-id"); + assertThat(req.headers().get("x-goog-hash")).contains("crc32c=" + crc32c); + FullHttpRequest fullReq = (FullHttpRequest) req; + ByteBuf requestContent = fullReq.content(); + byte[] receivedBytes = new byte[requestContent.readableBytes()]; + requestContent.readBytes(receivedBytes); + assertThat(receivedBytes).isEqualTo(contentBytes); + + DefaultFullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK); + resp.headers().set("ETag", etag); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + UploadPartRequest request = + UploadPartRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumber(1) + .build(); + + UploadPartResponse response = + multipartUploadHttpRequestManager.sendUploadPartRequest( + request, RewindableContent.of(ByteBuffer.wrap(contentBytes))); + + assertThat(response).isNotNull(); + assertThat(response.eTag()).isEqualTo(etag); + } + } + + @Test + public void sendUploadPartRequest_withCustomChecksum() throws Exception { + String etag = "\"af1ed31420542285653c803a34aa839a\""; + ChecksummedTestContent content = + ChecksummedTestContent.of("hello world".getBytes(StandardCharsets.UTF_8)); + + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-hash")) + .isEqualTo("crc32c=" + content.getCrc32cBase64()); + FullHttpRequest fullReq = (FullHttpRequest) req; + ByteBuf requestContent = fullReq.content(); + byte[] receivedBytes = new byte[requestContent.readableBytes()]; + requestContent.readBytes(receivedBytes); + assertThat(receivedBytes).isEqualTo(content.getBytes()); + DefaultFullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK); + resp.headers().set("ETag", etag); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + UploadPartRequest request = + UploadPartRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumber(1) + .crc32c(content.getCrc32cBase64()) + .build(); + UploadPartResponse response = + multipartUploadHttpRequestManager.sendUploadPartRequest( + request, RewindableContent.of(content.asByteBuffer())); + assertThat(response).isNotNull(); + assertThat(response.eTag()).isEqualTo(etag); + } + } + + @Test + public void sendUploadPartRequest_withUserProject() throws Exception { + String etag = "\"af1ed31420542285653c803a34aa839a\""; + byte[] contentBytes = "hello world".getBytes(StandardCharsets.UTF_8); + + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + DefaultFullHttpResponse resp = new DefaultFullHttpResponse(req.protocolVersion(), OK); + resp.headers().set("ETag", etag); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + UploadPartRequest request = + UploadPartRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumber(1) + .userProject("test-project") + .build(); + + UploadPartResponse response = + multipartUploadHttpRequestManager.sendUploadPartRequest( + request, RewindableContent.of(ByteBuffer.wrap(contentBytes))); + + assertThat(response).isNotNull(); + assertThat(response.eTag()).isEqualTo(etag); + } + } + + @Test + public void sendUploadPartRequest_error() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + UploadPartRequest request = + UploadPartRequest.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .partNumber(1) + .build(); + + assertThrows( + HttpResponseException.class, + () -> + multipartUploadHttpRequestManager.sendUploadPartRequest( + request, RewindableContent.empty())); + } + } + + @Test + public void sendListMultipartUploadsRequest_success() throws Exception { + HttpRequestHandler handler = + req -> { + ListMultipartUploadsResponse listMultipartUploadsResponse = + ListMultipartUploadsResponse.builder() + .bucket("test-bucket") + .keyMarker("key-marker") + .uploadIdMarker("upload-id-marker") + .nextKeyMarker("next-key-marker") + .nextUploadIdMarker("next-upload-id-marker") + .maxUploads(1) + .truncated(false) + .uploads( + ImmutableList.of( + MultipartUpload.newBuilder() + .key("test-key") + .uploadId("test-upload-id") + .storageClass(StorageClass.STANDARD) + .initiated( + OffsetDateTime.of(2025, 11, 11, 0, 0, 0, 0, ZoneOffset.UTC)) + .build())) + .build(); + // Jackson fails to serialize ImmutableList without GuavaModule. + // We use reflection to replace it with ArrayList for the test. + forceSetUploads(listMultipartUploadsResponse, listMultipartUploadsResponse.uploads()); + + ByteBuf buf = + Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(listMultipartUploadsResponse)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + + resp.headers().set("Content-Type", "application/xml; charset=utf-8"); + resp.headers().set("Content-Length", resp.content().readableBytes()); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + + ListMultipartUploadsRequest request = + ListMultipartUploadsRequest.builder() + .bucket("test-bucket") + .maxUploads(1) + .keyMarker("key-marker") + .uploadIdMarker("upload-id-marker") + .build(); + + ListMultipartUploadsResponse response = + multipartUploadHttpRequestManager.sendListMultipartUploadsRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + assertThat(response.uploads()).hasSize(1); + + MultipartUpload upload = response.uploads().get(0); + assertThat(upload.key()).isEqualTo("test-key"); + assertThat(upload.storageClass()).isEqualTo(StorageClass.STANDARD); + assertThat(upload.initiated()) + .isEqualTo(OffsetDateTime.of(2025, 11, 11, 0, 0, 0, 0, ZoneOffset.UTC)); + } + } + + @Test + public void sendListMultipartUploadsRequest_error() throws Exception { + HttpRequestHandler handler = + req -> { + FullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), HttpResponseStatus.BAD_REQUEST); + resp.headers().set(CONTENT_TYPE, "text/plain; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + ListMultipartUploadsRequest request = + ListMultipartUploadsRequest.builder().bucket("test-bucket").build(); + + assertThrows( + HttpResponseException.class, + () -> multipartUploadHttpRequestManager.sendListMultipartUploadsRequest(request)); + } + } + + @Test + public void sendListMultipartUploadsRequest_withUserProject() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.headers().get("x-goog-user-project")).isEqualTo("test-project"); + ListMultipartUploadsResponse listMultipartUploadsResponse = + ListMultipartUploadsResponse.builder() + .bucket("test-bucket") + .keyMarker("key-marker") + .uploadIdMarker("upload-id-marker") + .nextKeyMarker("next-key-marker") + .nextUploadIdMarker("next-upload-id-marker") + .maxUploads(1) + .truncated(false) + .uploads(ImmutableList.of()) + .build(); + // Jackson fails to serialize ImmutableList without GuavaModule. + // We use reflection to replace it with ArrayList for the test. + forceSetUploads(listMultipartUploadsResponse, listMultipartUploadsResponse.uploads()); + + ByteBuf buf = + Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(listMultipartUploadsResponse)); + + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + + resp.headers().set("Content-Type", "application/xml; charset=utf-8"); + resp.headers().set("Content-Length", resp.content().readableBytes()); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler)) { + MultipartUploadHttpRequestManager multipartUploadHttpRequestManager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + + ListMultipartUploadsRequest request = + ListMultipartUploadsRequest.builder() + .bucket("test-bucket") + .maxUploads(1) + .userProject("test-project") + .build(); + + ListMultipartUploadsResponse response = + multipartUploadHttpRequestManager.sendListMultipartUploadsRequest(request); + + assertThat(response).isNotNull(); + assertThat(response.bucket()).isEqualTo("test-bucket"); + } + } + + @Test + public void hostWithoutTrailingSlash_urlConstructedCorrectly() throws Exception { + HttpRequestHandler handler = + req -> { + assertThat(req.uri()).startsWith("/test-bucket/test-key"); + CreateMultipartUploadResponse response = + CreateMultipartUploadResponse.builder() + .bucket("test-bucket") + .key("test-key") + .uploadId("test-upload-id") + .build(); + ByteBuf buf = Unpooled.wrappedBuffer(xmlMapper.writeValueAsBytes(response)); + DefaultFullHttpResponse resp = + new DefaultFullHttpResponse(req.protocolVersion(), OK, buf); + resp.headers().set(CONTENT_TYPE, "application/xml; charset=utf-8"); + return resp; + }; + + try (FakeHttpServer fakeHttpServer = FakeHttpServer.of(handler, false)) { + MultipartUploadHttpRequestManager manager = + MultipartUploadHttpRequestManager.createFrom(fakeHttpServer.getHttpStorageOptions()); + CreateMultipartUploadRequest request = + CreateMultipartUploadRequest.builder() + .bucket("test-bucket") + .key("test-key") + .contentType("application/octet-stream") + .build(); + + CreateMultipartUploadResponse response = manager.sendCreateMultipartUploadRequest(request); + + assertThat(response.bucket()).isEqualTo("test-bucket"); + } + } + + private void forceSetUploads( + ListMultipartUploadsResponse response, java.util.List uploads) { + try { + java.lang.reflect.Field uploadsField = + ListMultipartUploadsResponse.class.getDeclaredField("uploads"); + uploadsField.setAccessible(true); + uploadsField.set(response, new java.util.ArrayList<>(uploads)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java new file mode 100644 index 000000000000..1cb32704982b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java @@ -0,0 +1,1860 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.maybeGetStorageDataClient; +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; +import static com.google.cloud.storage.TestUtils.apiException; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.DataLossException; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Hasher.UncheckedChecksumMismatchException; +import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratingBlobReadSession; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.StorageDataClient.FastOpenObjectReadSession; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.GrpcPlainRequestLoggingInterceptor; +import com.google.cloud.storage.it.GrpcRequestAuditing; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.common.io.ByteStreams; +import com.google.common.truth.Correspondence; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; +import com.google.rpc.DebugInfo; +import com.google.storage.v2.BidiReadHandle; +import com.google.storage.v2.BidiReadObjectError; +import com.google.storage.v2.BidiReadObjectRedirectedError; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.CommonObjectRequestParams; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectRangeData; +import com.google.storage.v2.ReadRange; +import com.google.storage.v2.ReadRangeError; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.WritableByteChannel; +import java.security.Key; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Test; +import org.junit.function.ThrowingRunnable; + +public final class ITObjectReadSessionFakeTest { + + private static final Object METADATA = + Object.newBuilder() + .setBucket(BucketName.format("_", "b")) + .setName("o") + .setGeneration(1) + .setSize(_2MiB) + .build(); + private static final BidiReadObjectRequest REQ_OPEN = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .build(); + private static final BidiReadObjectResponse RES_OPEN = + BidiReadObjectResponse.newBuilder().setMetadata(METADATA).build(); + private static final byte[] ALL_OBJECT_BYTES = DataGenerator.base64Characters().genBytes(64); + private static final Metadata.Key X_GOOG_REQUEST_PARAMS = + Metadata.Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + private static final Metadata.Key X_GOOG_GCS_IDEMPOTENCY_TOKEN = + Metadata.Key.of("x-goog-gcs-idempotency-token", Metadata.ASCII_STRING_MARSHALLER); + private static final Metadata.Key X_GOOG_USER_PROJECT = + Metadata.Key.of("x-goog-user-project", Metadata.ASCII_STRING_MARSHALLER); + private static final Correspondence IS_UUID = + Correspondence.transforming(UUID::fromString, "is a UUID"); + + /** + * + * + *
    + *
  1. Open blob descriptor + *
  2. attempt to read bytes 10-20 + *
  3. server responds with a redirect + *
  4. expect a new stream open with the specified redirect token, read handle and pending read + * of bytes 10-20 + *
+ */ + @Test + public void bidiReadObjectRedirectedError() throws Exception { + + String routingToken = UUID.randomUUID().toString(); + BidiReadHandle readHandle = + BidiReadHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + BidiReadObjectRequest req2 = read(1, 10, 10); + BidiReadObjectRequest req3 = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(1) + .setReadHandle(readHandle) + .setRoutingToken(routingToken) + .build()) + .addReadRanges(getReadRange(1, 10, 10)) + .build(); + + ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content.asChecksummedData()) + .setReadRange(getReadRange(1, 10, 10)) + .setRangeEnd(true) + .build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> respond.onNext(RES_OPEN), + req2, + respond -> { + BidiReadObjectRedirectedError redirect = + BidiReadObjectRedirectedError.newBuilder() + .setReadHandle(readHandle) + .setRoutingToken(routingToken) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage("redirect") + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.UNAVAILABLE.withDescription("redirect").asRuntimeException(trailers); + respond.onError(statusRuntimeException); + }, + req3, + respond -> respond.onNext(res2))); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureBlobDescriptor.get(5, TimeUnit.SECONDS)) { + byte[] actual = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10L, 10L))) + .get(1, TimeUnit.SECONDS); + + assertThat(xxd(actual)).isEqualTo(xxd(content.getBytes())); + } + } + } + + /** + * + * + *
    + *
  1. Attempt to open blob descriptor + *
  2. server responds with a redirect + *
  3. expect a new stream open with the specified redirect token + *
+ */ + @Test + public void bidiReadObjectRedirectedError_onOpen() throws Exception { + String routingToken = UUID.randomUUID().toString(); + BidiReadHandle readHandle = + BidiReadHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build(); + BidiReadObjectRequest req2 = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setReadHandle(readHandle) + .setRoutingToken(routingToken) + .build()) + .build(); + + BidiReadObjectResponse res1 = + BidiReadObjectResponse.newBuilder() + .setMetadata(Object.newBuilder().setBucket("b").setName("o").setGeneration(1).build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> { + BidiReadObjectRedirectedError redirect = + BidiReadObjectRedirectedError.newBuilder() + .setReadHandle(readHandle) + .setRoutingToken(routingToken) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage("redirect") + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.UNAVAILABLE.withDescription("redirect").asRuntimeException(trailers); + respond.onError(statusRuntimeException); + }, + req2, + respond -> respond.onNext(res1))); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setGrpcInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureBlobDescriptor.get(5, TimeUnit.SECONDS)) { + assertThat(bd).isNotNull(); + } + } + } + + @Test + public void bidiReadObjectRedirectedError_maxRedirectAttempts() throws Exception { + AtomicInteger reqCounter = new AtomicInteger(0); + StorageImplBase fake = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver responseObserver) { + return new AbstractObserver(responseObserver) { + @Override + public void onNext(BidiReadObjectRequest value) { + int requestCount = reqCounter.incrementAndGet(); + BidiReadObjectRedirectedError redirect = + BidiReadObjectRedirectedError.newBuilder() + .setReadHandle( + BidiReadHandle.newBuilder() + .setHandle( + ByteString.copyFromUtf8( + String.format("handle-%03d", requestCount))) + .build()) + .setRoutingToken(String.format("token-%03d", requestCount)) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage(String.format("redirect %03d", requestCount)) + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.UNAVAILABLE + .withDescription(String.format("redirect %03d", requestCount)) + .asRuntimeException(trailers); + respond.onError(statusRuntimeException); + } + }; + } + }; + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = + storage.blobReadSession(id, BlobSourceOption.userProject("user-project")); + + StorageException se = + assertThrows( + StorageException.class, + () -> { + try { + futureBlobDescriptor.get(5, TimeUnit.SECONDS); + } catch (ExecutionException e) { + throw e.getCause(); + } + }); + + assertThat(se.getCode()).isEqualTo(503); + assertThat(se).hasCauseThat().isInstanceOf(UnavailableException.class); + assertThat(reqCounter.get()).isEqualTo(4); + } + } + + @Test + public void bidiReadObjectError() throws Exception { + + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 5); + BidiReadObjectRequest req2 = read(1, 10, 10); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 10, 5)) + .build()) + .build(); + BidiReadObjectError err2 = + BidiReadObjectError.newBuilder() + .addReadRangeErrors( + ReadRangeError.newBuilder() + .setReadId(1) + .setStatus( + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.ABORTED_VALUE) + .build()) + .build()) + .build(); + + ChecksummedTestContent content3 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 15, 5); + BidiReadObjectRequest req3 = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(1) + .build()) + .addReadRanges(getReadRange(2, 15, 5)) + .build(); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content3.asChecksummedData()) + .setReadRange(getReadRange(2, 15, 5)) + .setRangeEnd(true) + .build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> respond.onNext(RES_OPEN), + req2, + respond -> { + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage("fail read_id: 1") + .addDetails(Any.pack(err2)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.UNAVAILABLE.withDescription("redirect").asRuntimeException(trailers); + respond.onNext(res2); + respond.onError(statusRuntimeException); + }, + req3, + respond -> respond.onNext(res3))); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureBlobDescriptor.get(5, TimeUnit.SECONDS)) { + StorageException se = + assertThrows( + StorageException.class, + () -> { + try { + ApiFuture future = + bd.readAs( + ReadProjectionConfigs.asFutureBytes() + .withRangeSpec(RangeSpec.of(10L, 10L))); + future.get(5, TimeUnit.SECONDS); + } catch (ExecutionException e) { + throw e.getCause(); + } + }); + assertThat(se).hasCauseThat().isInstanceOf(AbortedException.class); + byte[] actual = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(15L, 5L))) + .get(2, TimeUnit.SECONDS); + assertThat(actual).hasLength(5); + assertThat(xxd(actual)).isEqualTo(xxd(content3.getBytes())); + } + } + } + + @Test + public void expectRetryForRangeWithFailedChecksumValidation() throws Exception { + + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + + ChecksummedTestContent content2_1 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); + ChecksummedTestContent content2_2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 20, 10); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2_1 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_1.asChecksummedData()) + .setReadRange(getReadRange(1, 10, 10)) + .build()) + .build(); + BidiReadObjectResponse res2_2 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_2.asChecksummedData().toBuilder().setCrc32C(1)) + .setReadRange(getReadRange(1, 20, 10)) + .setRangeEnd(true) + .build()) + .build(); + + BidiReadObjectRequest req3 = read(2, 20, 10); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_2.asChecksummedData()) + .setReadRange(getReadRange(2, 20, 10)) + .setRangeEnd(true) + .build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> respond.onNext(RES_OPEN), + req2, + respond -> { + respond.onNext(res2_1); + respond.onNext(res2_2); + }, + req3, + respond -> respond.onNext(res3))); + + runTestAgainstFakeServer(fake, RangeSpec.of(10L, 20L), expected); + } + + @Test + public void objectRangeData_offset_notAligned_lt() throws Exception { + + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 9, 20); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 9, content2)) + .setRangeEnd(true) + .build()) + .build(); + + ChecksummedTestContent content3 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 29, 1); + BidiReadObjectRequest req3 = read(2, 29, 1); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content3.asChecksummedData()) + .setReadRange(getReadRange(2, 29, content3)) + .setRangeEnd(true) + .build()) + .build(); + + ImmutableMap db = + ImmutableMap.builder() + .put(REQ_OPEN, RES_OPEN) + .put(req2, res2) + .put(req3, res3) + .buildOrThrow(); + + runTestAgainstFakeServer(FakeStorage.from(db), RangeSpec.of(10L, 20L), expected); + } + + @Test + public void objectRangeData_offset_notAligned_gt() throws Exception { + + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 11, 20); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 11, content2)) + .setRangeEnd(true) + .build()) + .build(); + + ChecksummedTestContent content3 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + BidiReadObjectRequest req3 = read(2, 10, 20); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content3.asChecksummedData()) + .setReadRange(getReadRange(2, 10, content3)) + .setRangeEnd(true) + .build()) + .build(); + + ImmutableMap db = + ImmutableMap.builder() + .put(REQ_OPEN, RES_OPEN) + .put(req2, res2) + .put(req3, res3) + .buildOrThrow(); + + runTestAgainstFakeServer(FakeStorage.from(db), RangeSpec.of(10L, 20L), expected); + } + + @Test + public void readRange_retrySettingsApplicable_attempt() throws Exception { + + AtomicInteger reqCounter = new AtomicInteger(0); + StorageImplBase fake = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver responseObserver) { + return new AbstractObserver(responseObserver) { + @Override + public void onNext(BidiReadObjectRequest request) { + int reqCount = reqCounter.getAndIncrement(); + if (request.equals(REQ_OPEN)) { + respond.onNext(RES_OPEN); + } else { + + BidiReadObjectResponse.Builder b = BidiReadObjectResponse.newBuilder(); + request.getReadRangesList().stream() + .map(r -> r.toBuilder().setReadLength(1).build()) + .map( + r -> + ObjectRangeData.newBuilder() + .setReadRange(r) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent(ByteString.copyFrom(new byte[] {'A'})) + // explicitly send a bad checksum to induce failure + .setCrc32C(reqCount) + .build()) + .build()) + .forEach(b::addObjectDataRanges); + + respond.onNext(b.build()); + } + } + }; + } + }; + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(3).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + try (BlobReadSession bd = futureBlobDescriptor.get(5, TimeUnit.SECONDS)) { + ApiFuture future = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10L, 10L))); + + StorageException se = + assertThrows( + StorageException.class, () -> TestUtils.await(future, 5, TimeUnit.SECONDS)); + assertThat(se).hasCauseThat().isInstanceOf(DataLossException.class); + DataLossException dataLossException = (DataLossException) se.getCause(); + assertThat(dataLossException).isInstanceOf(UncheckedChecksumMismatchException.class); + String suppressedMessages = TestUtils.messagesToText(se); + assertAll( + () -> + assertThat(suppressedMessages) + .contains("Operation failed to complete within attempt budget"), + () -> + assertThat(suppressedMessages) + .contains( + "Mismatch checksum value. Expected crc32c{0x00000001} actual" + + " crc32c{0xe16dcdee}"), + () -> + assertThat(suppressedMessages) + .contains( + "Mismatch checksum value. Expected crc32c{0x00000002} actual" + + " crc32c{0xe16dcdee}"), + () -> assertThat(suppressedMessages).contains("Asynchronous task failed")); + } + } + } + + @Test + public void retrySettingsApplicable_objectRangeData_offset_notAligned_gt() throws Exception { + + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 11, 20); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 11, content2)) + .setRangeEnd(true) + .build()) + .build(); + + ChecksummedTestContent content3 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 12, 20); + BidiReadObjectRequest req3 = read(2, 10, 20); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content3.asChecksummedData()) + .setReadRange(getReadRange(2, 12, content3)) + .setRangeEnd(true) + .build()) + .build(); + + ImmutableMap db = + ImmutableMap.builder() + .put(REQ_OPEN, RES_OPEN) + .put(req2, res2) + .put(req3, res3) + .buildOrThrow(); + + try (FakeServer fakeServer = FakeServer.of(FakeStorage.from(db)); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(2).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + ApiFuture future = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10L, 20L))); + + StorageException se = + assertThrows( + StorageException.class, () -> TestUtils.await(future, 5, TimeUnit.SECONDS)); + assertThat(se).hasCauseThat().isInstanceOf(OutOfRangeException.class); + String suppressedMessages = TestUtils.messagesToText(se); + assertAll( + () -> + assertThat(suppressedMessages) + .contains("Operation failed to complete within attempt budget"), + () -> + assertThat(suppressedMessages) + .contains("position = 10, readRange.read_offset = 11"), + () -> assertThat(suppressedMessages).contains("Asynchronous task failed")); + } + } + } + + @Test + public void validateReadRemovedFromStateWhenFailed() throws Exception { + + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(req2.getReadRangesList().get(0)) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent(ByteString.copyFrom(new byte[] {'A'})) + // explicitly send a bad checksum to induce failure + .setCrc32C(1) + .build()) + .build()) + .build(); + + FakeStorage fake = FakeStorage.from(ImmutableMap.of(REQ_OPEN, RES_OPEN, req2, res2)); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + ObjectReadSessionImpl orsi = getObjectReadSessionImpl(bd); + + ApiFuture future = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10L, 20L))); + ExecutionException ee = + assertThrows(ExecutionException.class, () -> future.get(5, TimeUnit.SECONDS)); + + assertThat(ee) + .hasCauseThat() + .hasCauseThat() + .isInstanceOf(UncheckedChecksumMismatchException.class); + + ObjectReadSessionStreamRead outstandingRead = orsi.state.getOutstandingRead(1L); + assertThat(outstandingRead).isNull(); + } + } + } + + @Test + public void requestOptionsShouldBePresentInRequest() throws Exception { + + String keyB64 = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + Key key = new SecretKeySpec(BaseEncoding.base64().decode(keyB64), "AES256"); + byte[] keySha256 = Hashing.sha256().hashBytes(key.getEncoded()).asBytes(); + BidiReadObjectRequest reqOpen = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setIfGenerationMatch(1) + .setIfGenerationNotMatch(2) + .setIfMetagenerationMatch(3) + .setIfMetagenerationNotMatch(4) + .setCommonObjectRequestParams( + CommonObjectRequestParams.newBuilder() + .setEncryptionAlgorithm("AES256") + .setEncryptionKeyBytes(ByteString.copyFrom(key.getEncoded())) + .setEncryptionKeySha256Bytes(ByteString.copyFrom(keySha256)))) + .build(); + BidiReadObjectResponse resOpen = + BidiReadObjectResponse.newBuilder().setMetadata(METADATA).build(); + + FakeStorage fake = FakeStorage.from(ImmutableMap.of(reqOpen, resOpen)); + + GrpcRequestAuditing requestAuditing = new GrpcRequestAuditing(); + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .setGrpcInterceptorProvider( + () -> + ImmutableList.of( + requestAuditing, GrpcPlainRequestLoggingInterceptor.getInstance())) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = + storage.blobReadSession( + id, + BlobSourceOption.generationMatch(1), + BlobSourceOption.generationNotMatch(2), + BlobSourceOption.metagenerationMatch(3), + BlobSourceOption.metagenerationNotMatch(4), + BlobSourceOption.decryptionKey(key), + BlobSourceOption.userProject("my-awesome-project")); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + // by the time we reach here the test has already passed/failed + assertAll( + () -> assertThat(bd).isNotNull(), + () -> + requestAuditing + .assertRequestHeader(X_GOOG_REQUEST_PARAMS) + .contains("bucket=" + METADATA.getBucket()), + () -> requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN).hasSize(1), + () -> { + // make sure we get a UUID in our header + requestAuditing + .assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN) + .comparingElementsUsing(IS_UUID) + .doesNotContain(UUID.randomUUID()); + }, + () -> + requestAuditing + .assertRequestHeader(X_GOOG_USER_PROJECT) + .contains("my-awesome-project")); + } + } + } + + @Test + public void failedStreamRestartShouldFailAllPendingReads() throws Exception { + final Set reads = Collections.synchronizedSet(new HashSet<>()); + StorageImplBase fakeStorage = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver responseObserver) { + return new AbstractObserver(responseObserver) { + @Override + public void onNext(BidiReadObjectRequest request) { + if (request.equals(REQ_OPEN)) { + respond.onNext(RES_OPEN); + return; + } + + reads.add(request); + + if (reads.size() == 3) { + respond.onError(Status.UNAVAILABLE.asRuntimeException()); + } + } + }; + } + }; + + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + ApiFuture f1 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(1, 1))); + ApiFuture f2 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(2, 2))); + ApiFuture f3 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(3, 3))); + + List successful = + ApiFutures.successfulAsList(ImmutableList.of(f1, f2, f3)).get(5, TimeUnit.SECONDS); + assertThat(successful).isEqualTo(Lists.newArrayList(null, null, null)); + + assertAll( + () -> { + Set readRanges = + reads.stream() + .map(BidiReadObjectRequest::getReadRangesList) + .flatMap(Collection::stream) + .map(ITObjectReadSessionFakeTest::fmt) + .collect(Collectors.toSet()); + Set expected = + Stream.of(getReadRange(1, 1, 1), getReadRange(2, 2, 2), getReadRange(3, 3, 3)) + .map(ITObjectReadSessionFakeTest::fmt) + .collect(Collectors.toSet()); + assertThat(readRanges).isEqualTo(expected); + }, + assert503(f1), + assert503(f2), + assert503(f3)); + } + } + } + + // todo: in the future this should also interrupt and fail any child streams. + // for example, when an individual range is streamed and we don't want backpressure + // from the consumer to slow down the network stream of all reads. + @Test + public void closingBlobDescriptorShouldFailAllPendingReads() throws Exception { + BidiReadObjectRequest req2 = read(1, 1, 1); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(req2.getReadRangesList().get(0)) + .setChecksummedData( + getChecksummedData(ByteString.copyFromUtf8("A"), Hasher.enabled())) + .setRangeEnd(true)) + .build(); + final Set reads = Collections.synchronizedSet(new HashSet<>()); + StorageImplBase fakeStorage = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver responseObserver) { + return new AbstractObserver(responseObserver) { + @Override + public void onNext(BidiReadObjectRequest request) { + if (request.equals(REQ_OPEN)) { + respond.onNext(RES_OPEN); + return; + } else if (request.equals(req2)) { + respond.onNext(res2); + } + reads.add(request); + } + }; + } + }; + + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + // issue three different range reads + ApiFuture f1 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(1, 1))); + ApiFuture f2 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(2, 2))); + ApiFuture f3 = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(3, 3))); + + // make sure the first read succeeded + byte[] actual = TestUtils.await(f1, 5, TimeUnit.SECONDS); + + // close the "parent" + bd.close(); + + assertAll( + () -> { + // make sure all three ranges were sent to the server + Set readRanges = + reads.stream() + .map(BidiReadObjectRequest::getReadRangesList) + .flatMap(Collection::stream) + .map(ITObjectReadSessionFakeTest::fmt) + .collect(Collectors.toSet()); + Set expected = + Stream.of(getReadRange(1, 1, 1), getReadRange(2, 2, 2), getReadRange(3, 3, 3)) + .map(ITObjectReadSessionFakeTest::fmt) + .collect(Collectors.toSet()); + assertThat(readRanges).isEqualTo(expected); + }, + () -> { + assertThat(ByteString.copyFrom(actual)).isEqualTo(ByteString.copyFromUtf8("A")); + }, + // make sure the other two pending reads fail + assertStatusCodeIs(f2, 0), + assertStatusCodeIs(f3, 0), + () -> { + // the futures are already verified to be resolved based on the two previous + // assertions get them again for our additional assertions + ExecutionException ee2 = assertThrows(ExecutionException.class, f2::get); + ExecutionException ee3 = assertThrows(ExecutionException.class, f3::get); + StorageException se2 = (StorageException) ee2.getCause(); + StorageException se3 = (StorageException) ee3.getCause(); + + assertAll( + () -> assertThat(se2).isNotSameInstanceAs(se3), + () -> + assertThat(se2) + .hasCauseThat() + .isInstanceOf(AsyncSessionClosedException.class), + () -> + assertThat(se3) + .hasCauseThat() + .isInstanceOf(AsyncSessionClosedException.class)); + }); + } + } + } + + @Test + public void streaming() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_2MiB)); + BidiReadObjectRequest req2 = + BidiReadObjectRequest.newBuilder().addReadRanges(getReadRange(1, 0, 0)).build(); + + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(getReadRange(1, 0, _2MiB)) + .setRangeEnd(true) + .setChecksummedData(testContent.asChecksummedData())) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> respond.onNext(RES_OPEN), + req2, + respond -> respond.onNext(res2))); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setGrpcInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (BlobReadSession bd = futureBlobDescriptor.get(5, TimeUnit.SECONDS); + ScatteringByteChannel c = bd.readAs(ReadProjectionConfigs.asChannel())) { + ByteStreams.copy(c, Channels.newChannel(baos)); + } + + byte[] actual = baos.toByteArray(); + assertThat(xxd(actual)).isEqualTo(xxd(testContent.getBytes())); + } + } + + @Test + public void retryableErrorWhileOpeningIsRetried() throws Exception { + AtomicInteger reqCounter = new AtomicInteger(0); + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> { + int i = reqCounter.incrementAndGet(); + if (i <= 1) { + ApiException apiException = + apiException(Code.UNAVAILABLE, String.format("{unavailable %d}", i)); + respond.onError(apiException); + } else { + respond.onNext(RES_OPEN); + } + })); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(3).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + try (BlobReadSession bd = futureBlobDescriptor.get(20, TimeUnit.SECONDS)) { + assertThat(bd).isNotNull(); + } + } + } + + @Test + public void onCompleteWithoutAValue() throws Exception { + // I'm not sure if this is something that can actually happen in practice, but is being here + // to ensure it's at least accounted for, rather than a null pointer exception or something else + // equally cryptic. + FakeStorage fake = FakeStorage.of(ImmutableMap.of(REQ_OPEN, StreamObserver::onCompleted)); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(3).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureBlobDescriptor = storage.blobReadSession(id); + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> futureBlobDescriptor.get(20, TimeUnit.SECONDS)); + assertAll( + () -> assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class), + () -> + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(UnavailableException.class), + () -> assertThat(((StorageException) ee.getCause()).getCode()).isEqualTo(0), + () -> { + String messages = TestUtils.messagesToText(ee.getCause()); + assertThat(messages).contains("Unretryable error"); + }); + } + } + + /** + * Create a read that will attempt to read a range as a channel and read another range as an + * accumulated byte array. + * + *

Because a channel could block, this should result in two streams being opened against the + * server. + * + *

validate that two streams are opened and that getting the accumulated byte array can succeed + * while the channel hasn't been fully consumed. + */ + @Test + public void blobDescriptorTransparentlyForksStreamIfNeeded() throws Exception { + ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + ChecksummedTestContent content1 = ChecksummedTestContent.of(content.getBytes(), 0, 10); + ChecksummedTestContent content2 = ChecksummedTestContent.of(content.getBytes(), 10, 10); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2_1 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content1.asChecksummedData()) + .setReadRange(getReadRange(1, 10, content1)) + .build()) + .build(); + BidiReadObjectResponse res2_2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 20, content2)) + .setRangeEnd(true) + .build()) + .build(); + + BidiReadObjectRequest req3 = + read(2, 10, 20).toBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .build()) + .build(); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content.asChecksummedData()) + .setReadRange(getReadRange(2, 10, content)) + .setRangeEnd(true) + .build()) + .build(); + + AtomicInteger bidiReadObjectCount = new AtomicInteger(); + CountDownLatch cdl = new CountDownLatch(1); + + StorageImplBase fakeStorage = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver respond) { + bidiReadObjectCount.getAndIncrement(); + return new StreamObserver() { + @Override + public void onNext(BidiReadObjectRequest request) { + if (request.equals(REQ_OPEN)) { + respond.onNext(RES_OPEN); + } else if (request.equals(req2)) { + // respond with the first half of the bytes, then wait for the second request to + // be received before sending the second half. + respond.onNext(res2_1); + try { + cdl.await(); + } catch (InterruptedException e) { + respond.onError(TestUtils.apiException(Code.UNIMPLEMENTED, e.getMessage())); + } + respond.onNext(res2_2); + respond.onCompleted(); + } else if (request.equals(req3)) { + respond.onNext(res3); + respond.onCompleted(); + // signal the second request was received + cdl.countDown(); + } else { + respond.onError(TestUtils.apiException(Code.UNIMPLEMENTED, "Unexpected request")); + } + } + + @Override + public void onError(Throwable t) { + System.out.println("ITObjectReadSessionFakeTest.onError"); + respond.onError(t); + } + + @Override + public void onCompleted() { + System.out.println("ITObjectReadSessionFakeTest.onCompleted"); + respond.onCompleted(); + } + }; + } + }; + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + ByteBuffer buf = ByteBuffer.allocate(50); + byte[] bytes = new byte[0]; + Exception caught = null; + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + try (ScatteringByteChannel c = + bd.readAs(ReadProjectionConfigs.asChannel().withRangeSpec(RangeSpec.of(10L, 20L)))) { + buf.limit(5); + Buffers.fillFrom(buf, c); + buf.limit(buf.capacity()); + ApiFuture future = + bd.readAs( + ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10L, 20L))); + bytes = future.get(3, TimeUnit.SECONDS); + Buffers.fillFrom(buf, c); + } + } catch (Exception e) { + // stash off any runtime failure so we can still do our assertions to help determine + // the true failure + caught = e; + } finally { + final byte[] finalBytes = bytes; + final Exception finalCaught = caught; + assertAll( + () -> assertThat(bidiReadObjectCount.get()).isEqualTo(2), + () -> + assertWithMessage("Channel bytes missmatch") + .that(xxd(buf)) + .isEqualTo(xxd(content.getBytes())), + () -> + assertWithMessage("Future bytes missmatch") + .that(xxd(finalBytes)) + .isEqualTo(xxd(content.getBytes())), + () -> { + if (finalCaught != null) { + throw new Exception("exception during test", finalCaught); + } + }); + } + } + } + + @Test + public void gettingSessionFromFastOpenKeepsTheSessionOpenUntilClosed() throws Exception { + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 30); + + ChecksummedTestContent content1 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); + ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 20, 10); + ChecksummedTestContent content3 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 30, 10); + BidiReadObjectRequest req1 = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .addReadRanges(getReadRange(1, 10, 10)) + .build(); + BidiReadObjectResponse res1 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(getReadRange(1, 10, content1)) + .setChecksummedData(content1.asChecksummedData()) + .setRangeEnd(true) + .build()) + .build(); + + BidiReadObjectRequest req2 = read(2, 20, 10); + BidiReadObjectResponse res2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(getReadRange(2, 20, content2)) + .setChecksummedData(content2.asChecksummedData()) + .setRangeEnd(true) + .build()) + .build(); + BidiReadObjectRequest req3 = read(3, 30, 10); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setReadRange(getReadRange(3, 30, content3)) + .setChecksummedData(content3.asChecksummedData()) + .setRangeEnd(true) + .build()) + .build(); + + ImmutableMap db = + ImmutableMap.builder() + .put(req1, res1) + .put(req2, res2) + .put(req3, res3) + .buildOrThrow(); + + FakeStorage fakeStorage = FakeStorage.from(db); + + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = + fakeServer.getGrpcStorageOptions().toBuilder() + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService()) { + StorageDataClient dataClient = maybeGetStorageDataClient(storage); + assertThat(dataClient).isNotNull(); + + BidiReadObjectRequest req = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .build(); + + ApiFuture>> future = + dataClient.fastOpenReadSession( + req, + GrpcCallContext.createDefault(), + ReadProjectionConfigs.asFutureByteString().withRangeSpec(RangeSpec.of(10, 10))); + + ByteString bytes = ByteString.empty(); + Exception caught = null; + + try (FastOpenObjectReadSession> fastOpenChannel = + future.get(5, TimeUnit.SECONDS); + ObjectReadSession session = fastOpenChannel.getSession()) { + ApiFuture futureBytes1 = fastOpenChannel.getProjection(); + try (DisposableByteString disposableByteString = futureBytes1.get()) { + bytes = bytes.concat(disposableByteString.byteString()); + } + + ApiFuture futureBytes2 = + session.readAs( + ReadProjectionConfigs.asFutureByteString().withRangeSpec(RangeSpec.of(20L, 10L))); + try (DisposableByteString disposableByteString = futureBytes2.get()) { + bytes = bytes.concat(disposableByteString.byteString()); + } + + ApiFuture futureBytes3 = + session.readAs( + ReadProjectionConfigs.asFutureByteString().withRangeSpec(RangeSpec.of(30L, 10L))); + try (DisposableByteString disposableByteString = futureBytes3.get()) { + bytes = bytes.concat(disposableByteString.byteString()); + } + + } catch (Exception e) { + // stash off any runtime failure so we can still do our assertions to help determine + // the true failure + caught = e; + } finally { + final ByteString finalBytes = bytes; + final Exception finalCaught = caught; + assertAll( + () -> assertThat(xxd(finalBytes)).isEqualTo(xxd(expected.getBytes())), + () -> { + if (finalCaught != null) { + throw new Exception("exception during test", finalCaught); + } + }); + } + } + } + + @Test + public void expectRetryForRangeWithFailedChecksumValidation_channel() throws Exception { + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + + ChecksummedTestContent content2_1 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); + ChecksummedTestContent content2_2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 20, 10); + BidiReadObjectRequest req2 = read(1, 10, 20); + BidiReadObjectResponse res2_1 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_1.asChecksummedData()) + .setReadRange(getReadRange(1, 10, 10)) + .build()) + .build(); + BidiReadObjectResponse res2_2 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_2.asChecksummedData().toBuilder().setCrc32C(1)) + .setReadRange(getReadRange(1, 20, 10)) + .setRangeEnd(true) + .build()) + .build(); + + BidiReadObjectRequest req3 = + BidiReadObjectRequest.newBuilder().addReadRanges(getReadRange(2, 20, 10)).build(); + BidiReadObjectResponse res3 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2_2.asChecksummedData()) + .setReadRange(getReadRange(2, 20, 10)) + .setRangeEnd(true) + .build()) + .build(); + + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + REQ_OPEN, + respond -> respond.onNext(RES_OPEN), + req2, + respond -> { + respond.onNext(res2_1); + respond.onNext(res2_2); + }, + req3, + respond -> respond.onNext(res3))); + + try (FakeServer fakeServer = FakeServer.of(fake); + Storage storage = fakeServer.getGrpcStorageOptions().getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ScatteringByteChannel r = + bd.readAs(ReadProjectionConfigs.asChannel().withRangeSpec(RangeSpec.of(10L, 20L))); + WritableByteChannel w = Channels.newChannel(baos)) { + ByteStreams.copy(r, w); + } + + byte[] actual = baos.toByteArray(); + Crc32cLengthKnown actualCrc32c = Hasher.enabled().hash(ByteBuffer.wrap(actual)); + + byte[] expectedBytes = expected.getBytes(); + Crc32cLengthKnown expectedCrc32c = + Crc32cValue.of(expected.getCrc32c(), expectedBytes.length); + + assertAll( + () -> assertThat(actual).hasLength(expectedBytes.length), + () -> assertThat(xxd(actual)).isEqualTo(xxd(expectedBytes)), + () -> assertThat(actualCrc32c).isEqualTo(expectedCrc32c)); + } + } + } + + /** + * Define a test where multiple reads for the same session will be performed, and some of those + * reads cause OUT_OF_RANGE errors. + * + *

An OUT_OF_RANGE error is delivered as a stream level status, which means any reads which + * share a stream must be restarted while the read that caused the OUT_OF_RANGE should be failed. + * + *

Verify this behavior for both channel based and future byte[] based. + */ + @Test + public void serverOutOfRangeIsNotRetried() throws Exception { + ChecksummedTestContent expected = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + + BidiReadObjectResponse dataResp = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(expected.asChecksummedData()) + .setReadRange(getReadRange(0, 10, 20)) + .setRangeEnd(true) + .build()) + .build(); + + AtomicInteger bidiReadObjectCount = new AtomicInteger(); + ExecutorService exec = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("exec-%d").build()); + + // The test will submit 4 different reads to the server, we want to wait until all 4 are + // received by the server before sending any responses. + CountDownLatch serverWaitCdl = new CountDownLatch(4); + // Then, we want the test to wait for all read responses to be returned from the server before + // beginning assertions. + CountDownLatch testWaitCdl = new CountDownLatch(4); + + StorageImplBase fakeStorage = + new StorageImplBase() { + @Override + public StreamObserver bidiReadObject( + StreamObserver respond) { + bidiReadObjectCount.getAndIncrement(); + return new StreamObserver() { + @Override + public void onNext(BidiReadObjectRequest request) { + if (request.equals(REQ_OPEN)) { + respond.onNext(RES_OPEN); + } else if (request.getReadRangesList().get(0).getReadOffset() == 10) { + exec.submit( + () -> { + try { + // when receiving a request on the stream for the valid range + // send it to a background thread that will wait for all reads to be setup + serverWaitCdl.await(); + BidiReadObjectResponse.Builder b = dataResp.toBuilder(); + ReadRange readRange = request.getReadRangesList().get(0); + ObjectRangeData.Builder bb = dataResp.getObjectDataRanges(0).toBuilder(); + bb.getReadRangeBuilder().setReadId(readRange.getReadId()); + b.setObjectDataRanges(0, bb.build()); + respond.onNext(b.build()); + testWaitCdl.countDown(); + } catch (InterruptedException e) { + respond.onError( + TestUtils.apiException(Code.UNIMPLEMENTED, e.getMessage())); + } + }); + } else if (bidiReadObjectCount.getAndIncrement() >= 1) { + Optional readRange = request.getReadRangesList().stream().findFirst(); + String message = + String.format( + Locale.US, + "OUT_OF_RANGE read_offset = %d", + readRange.map(ReadRange::getReadOffset).orElse(0L)); + long readId = readRange.map(ReadRange::getReadId).orElse(0L); + + BidiReadObjectError err2 = + BidiReadObjectError.newBuilder() + .addReadRangeErrors( + ReadRangeError.newBuilder() + .setReadId(readId) + .setStatus( + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.OUT_OF_RANGE_VALUE) + .build()) + .build()) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage("fail read_id: " + readId) + .addDetails(Any.pack(err2)) + .addDetails( + Any.pack( + DebugInfo.newBuilder() + .setDetail(message) + .addStackEntries( + TextFormat.printer().shortDebugString(request)) + .build())) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.OUT_OF_RANGE.withDescription(message).asRuntimeException(trailers); + respond.onError(statusRuntimeException); + testWaitCdl.countDown(); + } else { + respond.onError( + apiException( + Code.UNIMPLEMENTED, + "Unexpected request { " + + TextFormat.printer().shortDebugString(request) + + " }")); + } + } + + @Override + public void onError(Throwable t) { + respond.onError(t); + } + + @Override + public void onCompleted() { + respond.onCompleted(); + } + }; + } + }; + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = fakeServer.getGrpcStorageOptions().getService()) { + + BlobId id = BlobId.of("b", "o"); + + // define the number of seconds our futures are willing to wait before timeout. + // In general everything should resolve in a small number of millis, this is more of a + // safeguard to prevent the whole suite hanging if there is an issue. + int timeoutSeconds = 5; + try (BlobReadSession session = + storage.blobReadSession(id).get(timeoutSeconds, TimeUnit.SECONDS)) { + + ApiFuture expectSuccessFuture = + session.readAs( + ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10, 20))); + serverWaitCdl.countDown(); + + ApiFuture expectFailureFuture = + session.readAs( + ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.beginAt(37))); + serverWaitCdl.countDown(); + + ReadAsChannel readAsChannel = ReadProjectionConfigs.asChannel(); + Future expectSuccessChannel = + exec.submit( + () -> { + try (ScatteringByteChannel succeed = + session.readAs(readAsChannel.withRangeSpec(RangeSpec.of(10, 20)))) { + serverWaitCdl.countDown(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ByteStreams.copy(succeed, Channels.newChannel(baos)); + return baos.toByteArray(); + } + }); + + Future expectFailureChannel = + exec.submit( + () -> { + try (ScatteringByteChannel fail = + session.readAs(readAsChannel.withRangeSpec(RangeSpec.beginAt(39)))) { + serverWaitCdl.countDown(); + int read; + do { + read = fail.read(ByteBuffer.allocate(1)); + } while (read == 0); + return read; + } + }); + + boolean await = testWaitCdl.await(timeoutSeconds, TimeUnit.SECONDS); + assertThat(await).isTrue(); + ExecutionException exceptionFromFuture = + assertThrows( + ExecutionException.class, + () -> expectFailureFuture.get(timeoutSeconds, TimeUnit.SECONDS)); + byte[] bytesFromFuture = expectSuccessFuture.get(timeoutSeconds, TimeUnit.SECONDS); + ExecutionException finalExceptionFromChannel = + assertThrows( + ExecutionException.class, + () -> expectFailureChannel.get(timeoutSeconds, TimeUnit.SECONDS)); + byte[] bytesFromChannel = expectSuccessChannel.get(timeoutSeconds, TimeUnit.SECONDS); + + assertAll( + () -> + assertThat(exceptionFromFuture) // ExecutionException + .hasCauseThat() // StorageException + .hasCauseThat() + .isInstanceOf(OutOfRangeException.class), + () -> + assertThat(finalExceptionFromChannel) // ExecutionException + .hasCauseThat() // IOException + .hasCauseThat() // StorageException + .hasCauseThat() + .isInstanceOf(OutOfRangeException.class), + () -> assertThat(xxd(bytesFromFuture)).isEqualTo(xxd(expected.getBytes())), + () -> assertThat(xxd(bytesFromChannel)).isEqualTo(xxd(expected.getBytes()))); + } + } + } + + private static void runTestAgainstFakeServer( + FakeStorage fakeStorage, RangeSpec range, ChecksummedTestContent expected) throws Exception { + + try (FakeServer fakeServer = FakeServer.of(fakeStorage); + Storage storage = fakeServer.getGrpcStorageOptions().getService()) { + + BlobId id = BlobId.of("b", "o"); + ApiFuture futureObjectDescriptor = storage.blobReadSession(id); + + try (BlobReadSession bd = futureObjectDescriptor.get(5, TimeUnit.SECONDS)) { + ApiFuture future = + bd.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(range)); + + byte[] actual = future.get(5, TimeUnit.SECONDS); + Crc32cLengthKnown actualCrc32c = Hasher.enabled().hash(ByteBuffer.wrap(actual)); + + byte[] expectedBytes = expected.getBytes(); + Crc32cLengthKnown expectedCrc32c = + Crc32cValue.of(expected.getCrc32c(), expectedBytes.length); + + assertAll( + () -> assertThat(actual).hasLength(expectedBytes.length), + () -> assertThat(xxd(actual)).isEqualTo(xxd(expectedBytes)), + () -> assertThat(actualCrc32c).isEqualTo(expectedCrc32c)); + } + } + } + + static BidiReadObjectRequest read(int readId, int readOffset, int readLength) { + return BidiReadObjectRequest.newBuilder() + .addReadRanges(getReadRange(readId, readOffset, readLength)) + .build(); + } + + static ReadRange getReadRange(int readId, int readOffset, ChecksummedTestContent content) { + return getReadRange(readId, readOffset, content.asChecksummedData().getContent().size()); + } + + static ReadRange getReadRange(int readId, int readOffset, int readLength) { + return ReadRange.newBuilder() + .setReadId(readId) + .setReadOffset(readOffset) + .setReadLength(readLength) + .build(); + } + + static ThrowingRunnable assert503(ApiFuture f) { + return assertStatusCodeIs(f, 503); + } + + static ThrowingRunnable assertStatusCodeIs(ApiFuture f, int expected) { + return () -> { + StorageException se = + assertThrows(StorageException.class, () -> TestUtils.await(f, 5, TimeUnit.SECONDS)); + assertThat(se.getCode()).isEqualTo(expected); + }; + } + + static String fmt(ReadRange r) { + return String.format( + "ReadRange{id: %d, offset: %d, length: %d}", + r.getReadId(), r.getReadOffset(), r.getReadLength()); + } + + static ObjectReadSessionImpl getObjectReadSessionImpl(BlobReadSession bd) { + ObjectReadSessionImpl orsi = null; + if (bd instanceof OtelDecoratingBlobReadSession) { + OtelDecoratingBlobReadSession odbrs = (OtelDecoratingBlobReadSession) bd; + bd = odbrs.delegate; + } + if (bd instanceof BlobReadSessionAdapter) { + BlobReadSessionAdapter brsa = (BlobReadSessionAdapter) bd; + ObjectReadSession session = brsa.session; + if (session instanceof ObjectReadSessionImpl) { + orsi = (ObjectReadSessionImpl) session; + } + } + if (orsi == null) { + fail("unable to locate state for validation"); + } + return orsi; + } + + static final class FakeStorage extends StorageImplBase { + + private final Map>> db; + + private FakeStorage( + Map>> db) { + this.db = db; + } + + @Override + public StreamObserver bidiReadObject( + StreamObserver respond) { + return new AbstractObserver(respond) { + @Override + public void onNext(BidiReadObjectRequest req) { + if (db.containsKey(req)) { + db.get(req).accept(respond); + } else { + respond.onError(TestUtils.apiException(Code.UNIMPLEMENTED, "Unexpected request")); + } + } + }; + } + + static FakeStorage of( + Map>> db) { + return new FakeStorage(db); + } + + static FakeStorage from(Map db) { + return new FakeStorage(Maps.transformValues(db, resp -> (respond) -> respond.onNext(resp))); + } + } + + abstract static class AbstractObserver implements StreamObserver { + + protected final StreamObserver respond; + + private AbstractObserver(StreamObserver respond) { + this.respond = respond; + } + + @Override + public void onError(Throwable t) { + respond.onError(t); + } + + @Override + public void onCompleted() { + respond.onCompleted(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java new file mode 100644 index 000000000000..cd3eb3170d3d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java @@ -0,0 +1,376 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._1MiB; +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.OutOfRangeException; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.FlushPolicy.MinFlushSizeFlushPolicy; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.base.Stopwatch; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import com.google.common.io.ByteStreams; +import com.google.common.io.CountingOutputStream; +import com.google.protobuf.ByteString; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import java.util.stream.LongStream; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = Transport.GRPC) +public final class ITObjectReadSessionTest { + + private static final int _512KiB = 512 * 1024; + + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.RAPID) + public BucketInfo bucket; + + @Inject public Generator generator; + + @Inject public Backend backend; + + @Test + public void bytes() + throws ExecutionException, InterruptedException, TimeoutException, IOException { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(512 * 1024)); + BlobInfo obj512KiB = create(testContent); + byte[] expected = testContent.getBytes(_512KiB - 13); + BlobId blobId = obj512KiB.getBlobId(); + + try (BlobReadSession blobReadSession = + storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + + BlobInfo info1 = blobReadSession.getBlobInfo(); + assertThat(info1).isNotNull(); + + ApiFuture futureRead1Bytes = + blobReadSession.readAs( + ReadProjectionConfigs.asFutureBytes() + .withRangeSpec(RangeSpec.of(_512KiB - 13L, 13L))); + + byte[] read1Bytes = futureRead1Bytes.get(30, TimeUnit.SECONDS); + assertThat(read1Bytes.length).isEqualTo(13); + + assertThat(xxd(read1Bytes)).isEqualTo(xxd(expected)); + } + } + + @Test + public void attemptingToGetFutureOutSizeSessionFails() throws Throwable { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(512 * 1024)); + BlobInfo obj512KiB = create(testContent); + BlobId blobId = obj512KiB.getBlobId(); + + ApiFuture future; + try (BlobReadSession session = storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + future = session.readAs(ReadProjectionConfigs.asFutureBytes()); + } + + ExecutionException ee = + assertThrows(ExecutionException.class, () -> future.get(1, TimeUnit.SECONDS)); + + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(AsyncSessionClosedException.class); + } + + @Test + public void lotsOfBytes() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(64 * _1MiB)); + + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); + for (int j = 0; j < 2; j++) { + + Stopwatch sw = Stopwatch.createStarted(); + try (BlobReadSession blobReadSession = + storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + + int numRangesToRead = 32; + List> futures = + LongStream.range(0, numRangesToRead) + .mapToObj(i -> RangeSpec.of(i * _2MiB, _2MiB)) + .map( + r -> + blobReadSession.readAs( + ReadProjectionConfigs.asFutureBytes().withRangeSpec(r))) + .collect(Collectors.toList()); + + ApiFuture> listApiFuture = ApiFutures.allAsList(futures); + + List ranges = listApiFuture.get(30, TimeUnit.SECONDS); + Stopwatch stop = sw.stop(); + System.out.println(stop.elapsed(TimeUnit.MILLISECONDS)); + Hasher hasher = Hashing.crc32c().newHasher(); + long length = 0; + for (byte[] range : ranges) { + hasher.putBytes(range); + length += range.length; + } + final long finalLength = length; + + assertAll( + () -> { + Crc32cLengthKnown expectedCrc32c = + Crc32cValue.of(testContent.getCrc32c(), testContent.getBytes().length); + Crc32cLengthKnown actualCrc32c = Crc32cValue.of(hasher.hash().asInt(), finalLength); + + assertThat(actualCrc32c).isEqualTo(expectedCrc32c); + }, + () -> assertThat(finalLength).isEqualTo(numRangesToRead * _2MiB)); + } + } + } + + @Test + public void lotsChannel() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(64 * _1MiB)); + + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); + byte[] buffer = new byte[_2MiB]; + for (int j = 0; j < 2; j++) { + + Stopwatch sw = Stopwatch.createStarted(); + try (BlobReadSession blobReadSession = + storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ScatteringByteChannel r = blobReadSession.readAs(ReadProjectionConfigs.asChannel())) { + ByteBuffer buf = ByteBuffer.wrap(buffer); + Buffers.copyUsingBuffer(buf, r, Channels.newChannel(baos)); + } + Stopwatch stop = sw.stop(); + System.out.println(stop.elapsed(TimeUnit.MILLISECONDS)); + Hasher hasher = Hashing.crc32c().newHasher(); + byte[] actual = baos.toByteArray(); + hasher.putBytes(actual); + + Crc32cLengthKnown expectedCrc32c = + Crc32cValue.of(testContent.getCrc32c(), testContent.getBytes().length); + Crc32cLengthKnown actualCrc32c = Crc32cValue.of(hasher.hash().asInt(), actual.length); + + assertThat(actualCrc32c).isEqualTo(expectedCrc32c); + } + } + } + + @Test + public void readRangeAsByteString() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(64 * _1MiB)); + + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); + for (int j = 0; j < 2; j++) { + + Stopwatch sw = Stopwatch.createStarted(); + try (BlobReadSession blobReadSession = + storage.blobReadSession(blobId).get(2, TimeUnit.SECONDS)) { + + int numRangesToRead = 32; + List> futures = + LongStream.range(0, numRangesToRead) + .mapToObj(i -> RangeSpec.of(i * _2MiB, _2MiB)) + .map( + r -> + blobReadSession.readAs( + ReadProjectionConfigs.asFutureByteString().withRangeSpec(r))) + .collect(Collectors.toList()); + + ApiFuture> listApiFuture = ApiFutures.allAsList(futures); + + List ranges = listApiFuture.get(30, TimeUnit.SECONDS); + Stopwatch stop = sw.stop(); + System.out.println(stop.elapsed(TimeUnit.MILLISECONDS)); + Hasher hasher = Hashing.crc32c().newHasher(); + long length = 0; + for (DisposableByteString range : ranges) { + try (DisposableByteString disposable = range) { + ByteString byteString = disposable.byteString(); + for (ByteBuffer byteBuffer : byteString.asReadOnlyByteBufferList()) { + hasher.putBytes(byteBuffer); + } + length += byteString.size(); + } + } + final long finalLength = length; + + assertAll( + () -> { + Crc32cLengthKnown expectedCrc32c = + Crc32cValue.of(testContent.getCrc32c(), testContent.getBytes().length); + Crc32cLengthKnown actualCrc32c = Crc32cValue.of(hasher.hash().asInt(), finalLength); + + assertThat(actualCrc32c).isEqualTo(expectedCrc32c); + }, + () -> assertThat(finalLength).isEqualTo(numRangesToRead * _2MiB)); + } + } + } + + @Test + public void readFromBucketThatDoesNotExistShouldRaiseStorageExceptionWith404() { + BlobId blobId = BlobId.of("gcs-grpc-team-bucket-that-does-not-exist", "someobject"); + + ApiFuture futureObjectReadSession = storage.blobReadSession(blobId); + + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> futureObjectReadSession.get(5, TimeUnit.SECONDS)); + + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + StorageException cause = (StorageException) ee.getCause(); + assertThat(cause.getCode()).isEqualTo(404); + } + + @Test + public void seekable() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(16 * _1MiB)); + + ReadAsSeekableChannel config = + ReadProjectionConfigs.asSeekableChannel() + .withRangeSpecFunction( + RangeSpecFunction.linearExponential() + .withInitialMaxLength(_1MiB) + .withMaxLengthScalar(4.0)); + + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); + ByteBuffer buf = ByteBuffer.allocate(2 * 1024 * 1024); + for (int j = 0; j < 1; j++) { + + try (BlobReadSession session = storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + CountingOutputStream countingOutputStream = + new CountingOutputStream(ByteStreams.nullOutputStream()); + long copy1; + long copy2; + long copy3; + try (SeekableByteChannel seekable = session.readAs(config); + WritableByteChannel w = Channels.newChannel(countingOutputStream)) { + copy1 = Buffers.copyUsingBuffer(buf, seekable, w); + + seekable.position(8 * _1MiB); + copy2 = Buffers.copyUsingBuffer(buf, seekable, w); + + seekable.position(0); + copy3 = Buffers.copyUsingBuffer(buf, seekable, w); + } + + long totalRead = countingOutputStream.getCount(); + long finalCopy1 = copy1; + long finalCopy2 = copy2; + long finalCopy3 = copy3; + assertAll( + () -> assertThat(totalRead).isEqualTo((16 + 8 + 16) * _1MiB), + () -> assertThat(finalCopy1).isEqualTo(16 * _1MiB), + () -> assertThat(finalCopy2).isEqualTo(8 * _1MiB), + () -> assertThat(finalCopy3).isEqualTo(16 * _1MiB)); + } + } + } + + @Test + public void outOfRange() + throws ExecutionException, InterruptedException, TimeoutException, IOException { + int objectSize = 4 * 1024 * 1024; + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(objectSize)); + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); + + try (BlobReadSession blobReadSession = + storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { + + BlobInfo info1 = blobReadSession.getBlobInfo(); + assertThat(info1).isNotNull(); + + ReadAsFutureBytes cfg = ReadProjectionConfigs.asFutureBytes(); + + ApiFuture f2 = + blobReadSession.readAs(cfg.withRangeSpec(RangeSpec.beginAt(objectSize + 1))); + ExecutionException ee = + assertThrows(ExecutionException.class, () -> f2.get(30, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(OutOfRangeException.class); + + ApiFuture f1 = blobReadSession.readAs(cfg.withRangeSpec(RangeSpec.all())); + byte[] bytes1 = f1.get(30, TimeUnit.SECONDS); + assertThat(bytes1.length).isEqualTo(objectSize); + } + } + + private BlobInfo create(ChecksummedTestContent content) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + BlobAppendableUploadConfig config = BlobAppendableUploadConfig.of(); + if (backend == Backend.TEST_BENCH) { + // workaround for https://github.com/googleapis/storage-testbench/issues/733 + MinFlushSizeFlushPolicy flushPolicy = + FlushPolicy.minFlushSize(256 * 1024).withMaxPendingBytes(4 * 1024 * 1024); + config = config.withFlushPolicy(flushPolicy); + } + BlobAppendableUpload upload = + storage.blobAppendableUpload(info, config, BlobWriteOption.doesNotExist()); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + Buffers.emptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.finalizeAndClose(); + } + return upload.getResult().get(5, TimeUnit.SECONDS); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryMPUTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryMPUTest.java new file mode 100644 index 000000000000..e1a83ba6ebda --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryMPUTest.java @@ -0,0 +1,148 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompletedMultipartUpload; +import com.google.cloud.storage.multipartupload.model.CompletedPart; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListMultipartUploadsRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import com.google.cloud.storage.otel.TestExporter; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP}) +public final class ITOpenTelemetryMPUTest { + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + @Inject public Transport transport; + + @Test + public void checkMPUInstrumentation() throws Exception { + TestExporter exporter = new TestExporter(); + + OpenTelemetrySdk openTelemetrySdk = + OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(exporter)) + .build()) + .build(); + + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) storage.getOptions(); + StorageOptions storageOptions = + httpStorageOptions.toBuilder().setOpenTelemetry(openTelemetrySdk).build(); + + String objectName = generator.randomObjectName(); + + try (Storage storage = storageOptions.getService()) { + MultipartUploadClient mpuClient = + MultipartUploadClient.create( + MultipartUploadSettings.of((HttpStorageOptions) storage.getOptions())); + + CreateMultipartUploadResponse create = + mpuClient.createMultipartUpload( + CreateMultipartUploadRequest.builder() + .bucket(bucket.getName()) + .key(objectName) + .build()); + + byte[] data = "Hello, World!".getBytes(StandardCharsets.UTF_8); + RequestBody body = RequestBody.of(ByteBuffer.wrap(data)); + UploadPartResponse upload = + mpuClient.uploadPart( + UploadPartRequest.builder() + .bucket(bucket.getName()) + .key(objectName) + .uploadId(create.uploadId()) + .partNumber(1) + .build(), + body); + + mpuClient.completeMultipartUpload( + CompleteMultipartUploadRequest.builder() + .bucket(bucket.getName()) + .key(objectName) + .uploadId(create.uploadId()) + .multipartUpload( + CompletedMultipartUpload.builder() + .parts( + ImmutableList.of( + CompletedPart.builder().partNumber(1).eTag(upload.eTag()).build())) + .build()) + .build()); + + mpuClient.listMultipartUploads( + ListMultipartUploadsRequest.builder().bucket(bucket.getName()).build()); + } + + List spans = exporter.getExportedSpans(); + assertThat(spans).hasSize(4); + + SpanData createSpan = spans.get(0); + assertThat(createSpan.getName()) + .isEqualTo("com.google.cloud.storage.MultipartUploadClient/createMultipartUpload"); + assertThat(createSpan.getAttributes().get(AttributeKey.stringKey("gsutil.uri"))) + .isEqualTo(String.format("gs://%s/%s", bucket.getName(), objectName)); + + SpanData uploadSpan = spans.get(1); + assertThat(uploadSpan.getName()) + .isEqualTo("com.google.cloud.storage.MultipartUploadClient/uploadPart"); + assertThat(uploadSpan.getAttributes().get(AttributeKey.stringKey("gsutil.uri"))) + .isEqualTo(String.format("gs://%s/%s", bucket.getName(), objectName)); + assertThat(uploadSpan.getAttributes().get(AttributeKey.longKey("partNumber"))).isEqualTo(1); + + SpanData completeSpan = spans.get(2); + assertThat(completeSpan.getName()) + .isEqualTo("com.google.cloud.storage.MultipartUploadClient/completeMultipartUpload"); + assertThat(completeSpan.getAttributes().get(AttributeKey.stringKey("gsutil.uri"))) + .isEqualTo(String.format("gs://%s/%s", bucket.getName(), objectName)); + + SpanData listSpan = spans.get(3); + assertThat(listSpan.getName()) + .isEqualTo("com.google.cloud.storage.MultipartUploadClient/listMultipartUploads"); + assertThat(listSpan.getAttributes().get(AttributeKey.stringKey("gsutil.uri"))) + .isEqualTo(String.format("gs://%s/", bucket.getName())); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTest.java new file mode 100644 index 000000000000..3b8957bbac64 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.otel.TestExporter; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITOpenTelemetryTest { + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + @Inject public Transport transport; + + @Test + public void checkInstrumentation() throws Exception { + TestExporter exporter = new TestExporter(); + + OpenTelemetrySdk openTelemetrySdk = + OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(exporter)) + .build()) + .build(); + StorageOptions storageOptions = + storage.getOptions().toBuilder().setOpenTelemetry(openTelemetrySdk).build(); + try (Storage storage = storageOptions.getService()) { + storage.create(BlobInfo.newBuilder(bucket, generator.randomObjectName()).build()); + } + + SpanData spanData = exporter.getExportedSpans().get(0); + assertAll( + () -> assertThat(getAttributeValue(spanData, "gcp.client.service")).isEqualTo("Storage"), + () -> + assertThat(getAttributeValue(spanData, "gcp.client.repo")) + .isEqualTo("googleapis/java-storage"), + () -> + assertThat(getAttributeValue(spanData, "gcp.client.artifact")) + .isEqualTo("com.google.cloud:google-cloud-storage"), + () -> + assertThat(getAttributeValue(spanData, "rpc.system")) + .isEqualTo(transport.name().toLowerCase())); + } + + @Test + public void noOpDoesNothing() { + assertThat(storage.getOptions().getOpenTelemetry()).isSameInstanceAs(OpenTelemetry.noop()); + storage.create(BlobInfo.newBuilder(bucket, generator.randomObjectName()).build()); + } + + private static String getAttributeValue(SpanData spanData, String key) { + return spanData.getAttributes().get(AttributeKey.stringKey(key)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTestbenchTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTestbenchTest.java new file mode 100644 index 000000000000..f813fcf41cda --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITOpenTelemetryTestbenchTest.java @@ -0,0 +1,217 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.otel.TestExporter; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.UUID; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.TEST_BENCH, + transports = {Transport.HTTP, Transport.GRPC}) +public class ITOpenTelemetryTestbenchTest { + @Inject public Transport transport; + @Inject public Generator generator; + @Inject public BucketInfo testBucket; + @Inject public Storage storage; + private SpanExporter exporter; + private static final byte[] helloWorldTextBytes = "hello world".getBytes(); + private BlobId blobId; + private static final Path tmpDir = Paths.get(System.getProperty("java.io.tmpdir")); + + @Before + public void setUp() { + exporter = new TestExporter(); + OpenTelemetrySdk openTelemetrySdk = + OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(exporter)) + .build()) + .build(); + StorageOptions options = + storage.getOptions().toBuilder().setOpenTelemetry(openTelemetrySdk).build(); + storage = options.getService(); + String objectString = generator.randomObjectName(); + blobId = BlobId.of(testBucket.getName(), objectString); + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @Test + public void runCreateBucket() { + String bucket = "random-bucket" + UUID.randomUUID(); + storage.create(BucketInfo.of(bucket)); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + } + + @Test + public void runCreateBlob() { + byte[] content = "Hello, World!".getBytes(UTF_8); + BlobId toCreate = BlobId.of(testBucket.getName(), generator.randomObjectName()); + storage.create(BlobInfo.newBuilder(toCreate).build(), content); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("create"))); + } + + @Test + public void runReadAllBytes() { + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + storage.create(blobInfo, helloWorldTextBytes); + byte[] read = storage.readAllBytes(blobId); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("readAllBytes"))); + } + + @Test + public void runCreateFrom() throws IOException { + Path helloWorldTxtGz = File.createTempFile(blobId.getName(), ".txt.gz").toPath(); + storage.createFrom(BlobInfo.newBuilder(blobId).build(), helloWorldTxtGz); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("createFrom"))); + } + + @Test + public void runDownloadToPath() throws IOException { + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + storage.create(blobInfo, helloWorldTextBytes); + try (TmpFile file = TmpFile.of(tmpDir, "download-to", ".txt")) { + storage.downloadTo(blobId, file.getPath()); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("downloadTo"))); + } + } + + @Test + public void runDownloadToOutputStream() { + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + storage.create(blobInfo, helloWorldTextBytes); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + storage.downloadTo(blobId, baos); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("downloadTo"))); + } + + @Test + public void runCopy() { + BlobInfo info = + BlobInfo.newBuilder(testBucket, generator.randomObjectName() + "copy/src").build(); + Blob cpySrc = storage.create(info, helloWorldTextBytes, BlobTargetOption.doesNotExist()); + BlobInfo dst = + BlobInfo.newBuilder(testBucket, generator.randomObjectName() + "copy/dst").build(); + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(cpySrc.getBlobId()) + .setSourceOptions(BlobSourceOption.generationMatch(cpySrc.getGeneration())) + .setTarget(dst, BlobTargetOption.doesNotExist()) + .build(); + CopyWriter copyWriter = storage.copy(copyRequest); + BlobInfo result = copyWriter.getResult(); + assertThat(result).isNotNull(); + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("copy"))); + } + + @Test + public void runWriter() throws IOException { + BlobInfo info = BlobInfo.newBuilder(testBucket, generator.randomObjectName()).build(); + try (WriteChannel writer = storage.writer(info)) { + // Do nothing + } + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("writer"))); + } + + @Test + public void runReader() throws IOException { + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + storage.create(blobInfo, helloWorldTextBytes); + try (ReadChannel reader = storage.reader(blobId)) { + // Do nothing + } + TestExporter testExported = (TestExporter) exporter; + List spanData = testExported.getExportedSpans(); + checkCommonAttributes(spanData); + Assert.assertTrue(spanData.stream().anyMatch(x -> x.getName().contains("reader"))); + } + + private void checkCommonAttributes(List spanData) { + for (SpanData span : spanData) { + Assert.assertEquals("Storage", getAttributeValue(span, "gcp.client.service")); + Assert.assertEquals("googleapis/java-storage", getAttributeValue(span, "gcp.client.repo")); + Assert.assertEquals( + "com.google.cloud:google-cloud-storage", getAttributeValue(span, "gcp.client.artifact")); + Assert.assertEquals(transport.name().toLowerCase(), getAttributeValue(span, "rpc.system")); + } + } + + private String getAttributeValue(SpanData spanData, String key) { + return spanData.getAttributes().get(AttributeKey.stringKey(key)).toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITReadChannelGzipHandlingTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITReadChannelGzipHandlingTest.java new file mode 100644 index 000000000000..8a081316d6b2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITReadChannelGzipHandlingTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture.ObjectAndContent; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITReadChannelGzipHandlingTest { + + @Inject public Storage storage; + @Inject public ObjectsFixture objFixture; + + @Test + public void nonGzipObjectReadOneByteAtATimeNoLibraryBuffering() throws IOException { + ObjectAndContent obj512KiB = objFixture.getObj512KiB(); + BlobInfo info = obj512KiB.getInfo(); + BlobId blobId = info.getBlobId(); + byte[] bytes = new byte[1]; + BlobSourceOption attemptGzipDecompression = BlobSourceOption.shouldReturnRawInputStream(false); + try (ReadChannel reader = storage.reader(blobId, attemptGzipDecompression)) { + reader.setChunkSize(0); + + // read zero bytes, to trigger things to startup but don't actually pull out any bytes yes + reader.read(ByteBuffer.allocate(0)); + + byte[] content = obj512KiB.getContent().getBytes(); + for (int i = 0; i < info.getSize(); i++) { + int read = reader.read(ByteBuffer.wrap(bytes)); + assertThat(read).isEqualTo(1); + byte b = bytes[0]; + assertThat(b).isEqualTo(content[i]); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageDataClientFakeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageDataClientFakeTest.java new file mode 100644 index 000000000000..276889ab44c5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageDataClientFakeTest.java @@ -0,0 +1,153 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.ITObjectReadSessionFakeTest.getReadRange; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.ITObjectReadSessionFakeTest.FakeStorage; +import com.google.cloud.storage.StorageDataClient.FastOpenObjectReadSession; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.ByteStreams; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectRangeData; +import java.io.ByteArrayOutputStream; +import java.nio.channels.Channels; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public final class ITStorageDataClientFakeTest { + private static final byte[] ALL_OBJECT_BYTES = DataGenerator.base64Characters().genBytes(64); + private static final Object METADATA = + Object.newBuilder() + .setBucket(BucketName.format("_", "b")) + .setName("o") + .setGeneration(1) + .setSize(_2MiB) + .build(); + + @Test + public void fastOpen_futureBytes() throws Exception { + doTest( + ReadProjectionConfigs.asFutureBytes().withRangeSpec(RangeSpec.of(10, 20)), + f -> f.get(10, TimeUnit.MILLISECONDS)); + } + + @Test + public void fastOpen_channel() throws Exception { + doTest( + ReadProjectionConfigs.asChannel().withRangeSpec(RangeSpec.of(10, 20)), + c -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ByteStreams.copy(c, Channels.newChannel(baos)); + return baos.toByteArray(); + }); + } + + @Test + public void fastOpen_futureByteString() throws Exception { + doTest( + ReadProjectionConfigs.asFutureByteString().withRangeSpec(RangeSpec.of(10, 20)), + f -> { + try (DisposableByteString disposableByteString = f.get(10, TimeUnit.MILLISECONDS)) { + ByteString byteString = disposableByteString.byteString(); + return byteString.toByteArray(); + } + }); + } + + private

void doTest(ReadProjectionConfig

config, ThrowableFunction func) + throws Exception { + ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 20); + ChecksummedTestContent content1 = ChecksummedTestContent.of(content.getBytes(), 0, 10); + ChecksummedTestContent content2 = ChecksummedTestContent.of(content.getBytes(), 10, 10); + + BidiReadObjectRequest reqOpen = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .addReadRanges(getReadRange(1, 10, 20)) + .build(); + BidiReadObjectResponse res2_1 = + BidiReadObjectResponse.newBuilder() + .setMetadata(METADATA) + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content1.asChecksummedData()) + .setReadRange(getReadRange(1, 10, content1)) + .build()) + .build(); + + BidiReadObjectResponse res2_2 = + BidiReadObjectResponse.newBuilder() + .addObjectDataRanges( + ObjectRangeData.newBuilder() + .setChecksummedData(content2.asChecksummedData()) + .setReadRange(getReadRange(1, 20, content2)) + .setRangeEnd(true) + .build()) + .build(); + FakeStorage fake = + FakeStorage.of( + ImmutableMap.of( + reqOpen, + respond -> { + respond.onNext(res2_1); + respond.onNext(res2_2); + })); + + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + StorageDataClient dataClient = storage.storageDataClient; + BidiReadObjectRequest req = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .build(); + + ApiFuture> f = + dataClient.fastOpenReadSession(req, GrpcCallContext.createDefault(), config); + try (FastOpenObjectReadSession

fastOpen = f.get(3, TimeUnit.SECONDS)) { + byte[] apply = func.apply(fastOpen.getProjection()); + assertThat(xxd(apply)).isEqualTo(xxd(content.getBytes())); + } + } + } + + @FunctionalInterface + interface ThrowableFunction { + To apply(From from) throws Exception; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageLifecycleTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageLifecycleTest.java new file mode 100644 index 000000000000..4b4e497cc7ca --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITStorageLifecycleTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.TestBench; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.junit.Test; +import org.junit.runner.RunWith; + +/** + * The interaction of {@link com.google.cloud.ServiceOptions} instance caching has differing + * behavior depending on whether {@link StorageOptions#grpc()} or {@link StorageOptions#http()} are + * used. + * + *

Define some tests to ensue we are correctly integrating with the caching lifecycle + */ +// Not in com.google.cloud.storage.it because we're testing package local things +@SuppressWarnings("ResultOfMethodCallIgnored") +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITStorageLifecycleTest { + @Inject public TestBench testBench; + + @Test + public void grpc() throws Exception { + GrpcStorageOptions options = + StorageOptions.grpc() + .setHost(testBench.getGRPCBaseUri()) + .setCredentials(NoCredentials.getInstance()) + .setProjectId("test-project-id") + .setEnableGrpcClientMetrics(false) + .setAttemptDirectPath(false) + .build(); + + Storage service1 = options.getService(); + Storage service2 = options.getService(); + + // ensure both instances are the same + assertThat(service2).isSameInstanceAs(service1); + + // make sure an RPC can be done + StreamSupport.stream(service1.list().iterateAll().spliterator(), false) + .collect(Collectors.toList()); + + // close the instance + service1.close(); + + // expect a new instance to be returned + try (Storage service3 = options.getService()) { + assertThat(service3).isNotSameInstanceAs(service1); + // make sure an RPC can be done + StreamSupport.stream(service3.list().iterateAll().spliterator(), false) + .collect(Collectors.toList()); + } + } + + @Test + public void http() throws Exception { + HttpStorageOptions options = + StorageOptions.http() + .setHost(testBench.getBaseUri()) + .setCredentials(NoCredentials.getInstance()) + .setProjectId("test-project-id") + .build(); + + Storage service1 = options.getService(); + Storage service2 = options.getService(); + + // ensure both instances are the same + assertThat(service2).isSameInstanceAs(service1); + // make sure an RPC can be done + StreamSupport.stream(service1.list().iterateAll().spliterator(), false) + .collect(Collectors.toList()); + + service1.close(); // this should be a no-op for http + + // expect the original instance to still be returned + try (Storage service3 = options.getService()) { + + assertThat(service3).isSameInstanceAs(service1); + // make sure an RPC can be done + StreamSupport.stream(service3.list().iterateAll().spliterator(), false) + .collect(Collectors.toList()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest.java new file mode 100644 index 000000000000..2d55a26cbbe3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest.java @@ -0,0 +1,1079 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.apiException; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.defaultRetryingDeps; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.SyncAndUploadUnbufferedWritableByteChannel.Alg; +import com.google.cloud.storage.SyncAndUploadUnbufferedWritableByteChannel.RequestStream; +import com.google.cloud.storage.SyncAndUploadUnbufferedWritableByteChannel.ResponseStream; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.primitives.Ints; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.QueryWriteStatusRequest; +import com.google.storage.v2.QueryWriteStatusResponse; +import com.google.storage.v2.StartResumableWriteRequest; +import com.google.storage.v2.StartResumableWriteResponse; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.RandomDistribution; +import net.jqwik.api.Tuple; +import net.jqwik.api.arbitraries.IntegerArbitrary; +import net.jqwik.api.lifecycle.AfterContainer; +import net.jqwik.api.lifecycle.AfterProperty; +import net.jqwik.api.lifecycle.BeforeContainer; +import net.jqwik.api.lifecycle.BeforeProperty; +import net.jqwik.api.lifecycle.BeforeTry; +import org.checkerframework.checker.nullness.qual.NonNull; + +public class ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest { + + private static Path tmpFolder; + private static RecoveryFileManager recoveryFileManager; + private FailureInducingStorageImpl failureInducingStorage; + private FakeServer server; + private GrpcStorageImpl storage; + + @BeforeContainer + static void beforeContainer() throws IOException { + tmpFolder = + Files.createTempDirectory( + ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest.class.getSimpleName()); + recoveryFileManager = RecoveryFileManager.of(ImmutableList.of(tmpFolder)); + } + + @AfterContainer + static void afterContainer() throws IOException { + if (tmpFolder != null) { + TestUtils.rmDashRf(tmpFolder); + } + } + + @BeforeProperty + void beforeProperty() throws IOException { + failureInducingStorage = new FailureInducingStorageImpl(); + server = FakeServer.of(failureInducingStorage); + storage = (GrpcStorageImpl) server.getGrpcStorageOptions().getService(); + } + + @AfterProperty + void afterProperty() throws Exception { + // use try-with-resources to do the close dance + try (AutoCloseable ignore1 = server; + AutoCloseable ignore2 = storage) { + storage = null; + server = null; + } + } + + @BeforeTry + void beforeTry() { + failureInducingStorage.reset(); + } + + @Example + void emptyObject() throws Exception { + Scenario scenario = Scenario.of("empty", 0, 0, 256, 256, FailuresQueue.empty()); + testUploads(scenario); + } + + @Example + void requestStream_halfClosedToUnavailable_positive() { + UnavailableException unavailableException = + assertThrows( + UnavailableException.class, + () -> + RequestStream.halfClosedToUnavailable( + () -> { + throw new IllegalStateException("asdf half-closed fdsa"); + })); + assertThat(unavailableException) + .hasCauseThat() + .hasMessageThat() + .isEqualTo("asdf half-closed fdsa"); + } + + @Example + void requestStream_halfClosedToUnavailable_negative() { + IllegalStateException illegalStateException = + assertThrows( + IllegalStateException.class, + () -> + RequestStream.halfClosedToUnavailable( + () -> { + throw new IllegalStateException("blah"); + })); + assertThat(illegalStateException).hasMessageThat().isEqualTo("blah"); + } + + @Example + void alg_shouldSetResultFutureIfNotRetryable() { + SettableApiFuture resultFuture = SettableApiFuture.create(); + Alg alg = + new Alg((ResultRetryAlgorithmAdapter) (prevThrowable, prevResponse) -> false, resultFuture); + + ForcedFailure ff = new ForcedFailure("should not be retried"); + boolean shouldRetry = alg.shouldRetry(ff, null); + assertThat(shouldRetry).isFalse(); + assertThat(resultFuture.isDone()).isTrue(); + ExecutionException runtimeException = assertThrows(ExecutionException.class, resultFuture::get); + assertThat(runtimeException).hasCauseThat().hasMessageThat().isEqualTo("should not be retried"); + } + + @Example + void alg_shouldNotSetResultFutureIfRetryable() { + SettableApiFuture resultFuture = SettableApiFuture.create(); + Alg alg = + new Alg((ResultRetryAlgorithmAdapter) (prevThrowable, prevResponse) -> true, resultFuture); + + ForcedFailure ff = new ForcedFailure("can be retried"); + boolean shouldRetry = alg.shouldRetry(ff, null); + assertThat(shouldRetry).isTrue(); + assertThat(resultFuture.isDone()).isFalse(); + } + + @Example + void responseStream_onComplete_lastMessageWithResourceMustResolveResultFuture() + throws ExecutionException, InterruptedException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + ResponseStream responseStream = new ResponseStream(resultFuture); + + Object fake = Object.newBuilder().setName("fake").build(); + WriteObjectResponse response = WriteObjectResponse.newBuilder().setResource(fake).build(); + responseStream.onNext(response); + + assertThat(resultFuture.isDone()).isFalse(); + responseStream.onCompleted(); + assertThat(resultFuture.isDone()).isTrue(); + assertThat(resultFuture.get()).isEqualTo(response); + } + + @Example + void responseStream_onComplete_lastMessageWithoutResourceDoesNotResolveResultFuture() { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + ResponseStream responseStream = new ResponseStream(resultFuture); + + WriteObjectResponse response = WriteObjectResponse.newBuilder().setPersistedSize(3).build(); + responseStream.onNext(response); + + assertThat(resultFuture.isDone()).isFalse(); + responseStream.onCompleted(); + assertThat(resultFuture.isDone()).isFalse(); + } + + @Example + void responseStream_await_yields_onComplete() throws ExecutionException, InterruptedException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + ResponseStream responseStream = new ResponseStream(resultFuture); + + ExecutorService executorService = Executors.newSingleThreadExecutor(); + try { + Future submit = + executorService.submit( + () -> { + responseStream.await(); + return "Success"; + }); + + Object fake = Object.newBuilder().setName("fake").build(); + WriteObjectResponse response = WriteObjectResponse.newBuilder().setResource(fake).build(); + responseStream.onNext(response); + responseStream.onCompleted(); + + assertThat(submit.get()).isEqualTo("Success"); + } finally { + executorService.shutdownNow(); + } + } + + @Example + void responseStream_await_yields_onError() throws ExecutionException, InterruptedException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + ResponseStream responseStream = new ResponseStream(resultFuture); + + ExecutorService executorService = Executors.newSingleThreadExecutor(); + try { + Future submit = + executorService.submit( + () -> { + try { + responseStream.await(); + } catch (ForcedFailure ff) { + return "Success"; + } + return "Fail"; + }); + + RuntimeException re = new ForcedFailure("error"); + responseStream.onError(re); + + assertThat(submit.get()).isEqualTo("Success"); + } finally { + executorService.shutdownNow(); + } + } + + @Example + void debug() throws Exception { + testUploads( + Scenario.of( + "object--853610591", + 11428, + 1353, + 196608, + 32768, + new FailuresQueue( + ImmutableList.of(FailureOffset.of(0), FailureOffset.of(0), FailureOffset.of(0))))); + } + + // 25 tries leads to ~0m:30s of runtime + // 250 tries leads to ~6m:00s of runtime + @Property(tries = 25) + void testUploads(@ForAll("scenario") Scenario s) throws Exception { + + StorageClient storageClient = storage.storageClient; + BlobInfo info = BlobInfo.newBuilder("buck", s.objectName).build(); + try (RecoveryFile rf = s.recoveryFileManager.newRecoveryFile(info)) { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + ApiFuture f = + storage.startResumableWrite( + GrpcCallContext.createDefault(), + storage.getWriteObjectRequest(info, Opts.empty()), + Opts.empty()); + ResumableWrite resumableWrite = ApiExceptions.callAndTranslateApiException(f); + + UploadCtx uploadCtx = + failureInducingStorage.data.get(UploadId.of(resumableWrite.getRes().getUploadId())); + + uploadCtx.failuresQueue = s.failuresQueue; + + SyncAndUploadUnbufferedWritableByteChannel syncAndUpload = + new SyncAndUploadUnbufferedWritableByteChannel( + storageClient.writeObjectCallable(), + storageClient.queryWriteStatusCallable(), + resultFuture, + s.chunkSegmenter, + // TestUtils.defaultRetrier(), + new DefaultRetrier(UnaryOperator.identity(), defaultRetryingDeps()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler(), + WriteCtx.of(resumableWrite, s.chunkSegmenter.getHasher()), + rf, + s.copyBuffer); + try (BufferedWritableByteChannel w = s.buffered(syncAndUpload)) { + for (ByteString dataFrame : s.dataFrames) { + w.write(dataFrame.asReadOnlyByteBuffer()); + } + } + + WriteObjectResponse response = resultFuture.get(1, TimeUnit.SECONDS); + assertThat(response.hasResource()).isTrue(); + Object resource = response.getResource(); + + ByteString actual = + uploadCtx.parts.stream() + .filter(WriteObjectRequest::hasChecksummedData) + .map(wor -> wor.getChecksummedData().getContent()) + .reduce(ByteString.empty(), ByteString::concat); + ByteString expected = s.dataFrames.stream().reduce(ByteString.empty(), ByteString::concat); + assertAll( + () -> assertThat(uploadCtx.getLength()).isEqualTo(s.objectSize), + () -> assertThat(resource.getSize()).isEqualTo(s.objectSize), + () -> assertThat(xxd(actual)).isEqualTo(xxd(expected))); + } + } + + @Example + void multipleRetriesAgainstFakeServer() throws Exception { + ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(17)); + + String uploadId = UUID.randomUUID().toString(); + StartResumableWriteRequest reqStart = + StartResumableWriteRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("projects/_/buckets/b").setName("o").build()) + .build()) + .build(); + StartResumableWriteResponse resStart = + StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); + QueryWriteStatusRequest reqQuery = + QueryWriteStatusRequest.newBuilder().setUploadId(uploadId).build(); + QueryWriteStatusResponse resQuery = + QueryWriteStatusResponse.newBuilder().setPersistedSize(8).build(); + WriteObjectRequest reqWrite0 = + WriteObjectRequest.newBuilder() + .setUploadId(uploadId) + .setWriteOffset(0) + .setChecksummedData(content.slice(0, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite2 = + WriteObjectRequest.newBuilder() + .setWriteOffset(2) + .setChecksummedData(content.slice(2, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite4 = + WriteObjectRequest.newBuilder() + .setWriteOffset(4) + .setChecksummedData(content.slice(4, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite6 = + WriteObjectRequest.newBuilder() + .setWriteOffset(6) + .setChecksummedData(content.slice(6, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite8 = + WriteObjectRequest.newBuilder() + .setWriteOffset(8) + .setChecksummedData(content.slice(8, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite8WithUploadId = reqWrite8.toBuilder().setUploadId(uploadId).build(); + WriteObjectRequest reqWrite10 = + WriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setChecksummedData(content.slice(10, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite12 = + WriteObjectRequest.newBuilder() + .setWriteOffset(12) + .setChecksummedData(content.slice(12, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite14 = + WriteObjectRequest.newBuilder() + .setWriteOffset(14) + .setChecksummedData(content.slice(14, 2).asChecksummedData()) + .build(); + WriteObjectRequest reqWrite16 = + WriteObjectRequest.newBuilder() + .setWriteOffset(16) + .setChecksummedData(content.slice(16, 1).asChecksummedData()) + .build(); + WriteObjectRequest reqFinish = + WriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(content.getCrc32c()).build()) + .mergeFrom(reqWrite16) + .build(); + WriteObjectResponse resFinish = + WriteObjectResponse.newBuilder() + .setResource( + reqStart.getWriteObjectSpec().getResource().toBuilder() + .setGeneration(1) + .setSize(17) + .setChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(content.getCrc32c()) + .setMd5Hash(content.getMd5Bytes()) + .build()) + .build()) + .build(); + ImmutableSet allReqWrite = + ImmutableSet.of( + reqWrite0, + reqWrite2, + reqWrite4, + reqWrite6, + reqWrite8, + reqWrite10, + reqWrite12, + reqWrite14, + reqWrite16); + + AtomicInteger retryCount = new AtomicInteger(0); + StorageImplBase service = + new StorageImplBase() { + @Override + public void startResumableWrite( + StartResumableWriteRequest req, StreamObserver respond) { + if (req.equals(reqStart)) { + respond.onNext(resStart); + respond.onCompleted(); + } else { + unexpected(respond, req); + } + } + + @Override + public void queryWriteStatus( + QueryWriteStatusRequest req, StreamObserver respond) { + if (req.equals(reqQuery)) { + respond.onNext(resQuery); + respond.onCompleted(); + } else { + unexpected(respond, req); + } + } + + @Override + public StreamObserver writeObject( + StreamObserver respond) { + return new StreamObserver() { + @Override + public void onNext(WriteObjectRequest value) { + if (value.equals(reqFinish)) { + respond.onNext(resFinish); + respond.onCompleted(); + } else if (value.equals(reqWrite10)) { + int i = retryCount.get(); + if (i < 2) { + respond.onError(apiException(Code.UNAVAILABLE, "{Unavailable}")); + } + } else if (value.equals(reqWrite8WithUploadId)) { + retryCount.incrementAndGet(); + } else if (allReqWrite.contains(value)) { + // do nothing + } else { + unexpected(respond, value); + } + } + + @Override + public void onError(Throwable t) {} + + @Override + public void onCompleted() {} + }; + } + + private void unexpected(StreamObserver respond, Message msg) { + respond.onError( + apiException( + Code.UNIMPLEMENTED, + "Unexpected request { " + TextFormat.printer().shortDebugString(msg) + " }")); + } + }; + try (FakeServer fakeServer = FakeServer.of(service); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + SettableApiFuture resultFuture = SettableApiFuture.create(); + BufferHandle recoverBufferHandle = BufferHandle.allocate(2); + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 2, 2); + SyncAndUploadUnbufferedWritableByteChannel syncAndUpload = + new SyncAndUploadUnbufferedWritableByteChannel( + storage.storageClient.writeObjectCallable(), + storage.storageClient.queryWriteStatusCallable(), + resultFuture, + chunkSegmenter, + new DefaultRetrier(UnaryOperator.identity(), storage.getOptions()), + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler(), + WriteCtx.of( + new ResumableWrite( + reqStart, + resStart, + id -> reqWrite0.toBuilder().clearWriteObjectSpec().setUploadId(id).build()), + chunkSegmenter.getHasher()), + recoveryFileManager.newRecoveryFile(info), + recoverBufferHandle); + try (BufferedWritableByteChannel w = + StorageByteChannels.writable() + .createSynchronized( + new DefaultBufferedWritableByteChannel(recoverBufferHandle, syncAndUpload))) { + w.write(ByteBuffer.wrap(content.getBytes())); + } + + Decoder decoder = + Conversions.grpc().blobInfo().compose(WriteObjectResponse::getResource); + BlobInfo actual = decoder.decode(resultFuture.get(3, TimeUnit.SECONDS)); + assertThat(actual.getSize()).isEqualTo(content.getBytes().length); + assertThat(actual.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + } + + static List dataFrames(long length, int segmentLength) { + // todo: rethink this + Random rand = new Random(length); + ArrayList segments = new ArrayList<>(); + + int i = 0; + for (; i < length; i += segmentLength) { + long remaining = length - i; + int size = Math.toIntExact(Math.min(remaining, segmentLength)); + byte[] bytes = DataGenerator.rand(rand).genBytes(size); + if (size > 4) { + byte[] byteArray = Ints.toByteArray(i); + ByteString offset = ByteString.copyFrom(byteArray); + ByteString concat = offset.concat(ByteString.copyFrom(bytes, 4, bytes.length - 4)); + segments.add(concat); + } else { + segments.add(ByteString.copyFrom(bytes)); + } + } + + return ImmutableList.copyOf(segments); + } + + @Provide("scenario") + static Arbitrary scenarioArbitrary() { + // 1. choose an alignment quantum + return alignmentQuantumArbitrary() + .flatMap( + quantum -> + Combinators.combine( + Arbitraries.just(quantum), + // 2. choose a segment size between 1 and 8 times the quantum + ints().between(1, 8).map(mult -> quantum * mult)) + .as(Tuple::of)) + .flatMap( + t -> { + int segmentSize = t.get2(); + return Combinators.combine( + Arbitraries.just(t.get1()), + Arbitraries.just(segmentSize), + // 3. choose an object size between 0 and 32 time segment size + // this helps keep the maximum number of rights relatively low and + // proportional with the size of the object + ints().between(0, 32 * segmentSize)) + .as(Tuple::of); + }) + .flatMap( + t -> { + int quantum = t.get1(); + int objectSize = t.get3(); + // if the object isn't 0 bytes, set our min write size to be 1 + int minWriteSize = Math.min(1, objectSize); + + // determine how many quantum will make up the full object + // we want to align failures to quantum boundaries like GCS does + int quantumCount = objectSize / quantum; + return Combinators.combine( + Arbitraries.just(quantum), + Arbitraries.just(t.get2()), + Arbitraries.just(objectSize), + ints().between(minWriteSize, objectSize), + // 4. generate between 0 and 3 failure offsets + ints() + .between(0, quantumCount) + .map(i -> FailureOffset.of((long) i * quantum)) + .list() + .ofMinSize(0) + .ofMaxSize(3) + .map(FailuresQueue::new)) + .as(Tuple::of); + }) + .map( + t -> { + // 5. Construct our scenario from the generated values + int quantum = t.get1(); + int segmentSize = t.get2(); + int objectSize = t.get3(); + int writeSize = t.get4(); + return Scenario.of( + String.format(Locale.US, "object-%d", t.hashCode()), + objectSize, + writeSize, + segmentSize, + quantum, + t.get5()); + }) + // The way we're defining things there aren't critical edge cases. Let jqwik know, so it + // can be smarter about generation, evaluation and shrinking + .withoutEdgeCases(); + } + + static Arbitrary alignmentQuantumArbitrary() { + // 16..256KiB + return ints().between(4, 18).map(i -> Math.toIntExact((long) Math.pow(2, i))); + } + + @NonNull + private static IntegerArbitrary ints() { + return Arbitraries.integers().withDistribution(RandomDistribution.uniform()); + } + + private static String fmt(int i) { + return String.format(Locale.US, "% 10d (0x%08x)", i, i); + } + + private static final class Scenario { + private final String toString; + private final String objectName; + private final long objectSize; + private final ChunkSegmenter chunkSegmenter; + private final BufferHandle bufferHandle; + private final BufferHandle copyBuffer; + private final FailuresQueue failuresQueue; + private final RecoveryFileManager recoveryFileManager; + private final List dataFrames; + + private Scenario( + String toString, + String objectName, + long objectSize, + ChunkSegmenter chunkSegmenter, + BufferHandle bufferHandle, + BufferHandle copyBuffer, + FailuresQueue failuresQueue, + RecoveryFileManager recoveryFileManager, + List dataFrames) { + this.toString = toString; + this.objectName = objectName; + this.objectSize = objectSize; + this.chunkSegmenter = chunkSegmenter; + this.bufferHandle = bufferHandle; + this.copyBuffer = copyBuffer; + this.failuresQueue = failuresQueue; + this.recoveryFileManager = recoveryFileManager; + this.dataFrames = dataFrames; + } + + BufferedWritableByteChannel buffered(UnbufferedWritableByteChannel c) { + return StorageByteChannels.writable() + .createSynchronized(new DefaultBufferedWritableByteChannel(bufferHandle, c)); + } + + public static Scenario of( + String objectName, + long objectSize, + int writeSize, + int segmentSize, + int quantum, + FailuresQueue failuresQueue) { + + List nonQuantumAligned = + failuresQueue.statuses.stream() + .filter(f -> f.getOffset() % quantum != 0) + .collect(Collectors.toList()); + assertWithMessage("Failure offsets not quantum aligned (quantum=%s)", fmt(quantum)) + .that(nonQuantumAligned) + .isEmpty(); + List dataFrames = dataFrames(objectSize, writeSize); + return new Scenario( + MoreObjects.toStringHelper(Scenario.class) + .add("\n objectName", objectName) + .add("\n objectSize", objectSize) + .add("\n writeSize", writeSize) + .add("\n segmentSize", segmentSize) + .add("\n quantum", quantum) + .add("\n dataFrames.size()", dataFrames.size()) + .add("\n failuresQueue", failuresQueue) + .addValue("\n") + .toString(), + objectName, + objectSize, + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), segmentSize, quantum), + BufferHandle.allocate(segmentSize), + BufferHandle.allocate(segmentSize), + failuresQueue, + ITSyncAndUploadUnbufferedWritableByteChannelPropertyTest.recoveryFileManager, + dataFrames); + } + + @Override + public String toString() { + return toString; + } + } + + private static final class UploadId { + private final String id; + + private UploadId(String id) { + this.id = id; + } + + private static UploadId of(String id) { + return new UploadId(id); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (!(o instanceof UploadId)) { + return false; + } + UploadId uploadId = (UploadId) o; + return Objects.equals(id, uploadId.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("id", id).toString(); + } + } + + private static final class UploadCtx { + private final UploadId id; + private final StartResumableWriteRequest req; + + private final List parts; + + private FailuresQueue failuresQueue; + private long length; + + private UploadCtx(UploadId id, StartResumableWriteRequest req) { + this.id = id; + this.req = req; + this.parts = Collections.synchronizedList(new ArrayList<>()); + } + + public static UploadCtx of(UploadId id, StartResumableWriteRequest req) { + return new UploadCtx(id, req); + } + + UploadId getId() { + return id; + } + + StartResumableWriteRequest getReq() { + return req; + } + + void addPart(WriteObjectRequest req) { + length += req.getChecksummedData().getContent().size(); + parts.add(req); + } + + long getLength() { + return length; + } + + boolean finishWrite() { + if (!parts.isEmpty()) { + return parts.get(parts.size() - 1).getFinishWrite(); + } else { + return false; + } + } + + public Code consume(WriteObjectRequest req) { + if (failuresQueue != null) { + FailureOffset peek = failuresQueue.pending.peekFirst(); + if (peek != null) { + if (req.hasChecksummedData()) { + long writeOffset = req.getWriteOffset(); + ByteString content = req.getChecksummedData().getContent(); + int size = content.size(); + boolean applies = writeOffset <= peek.offset && peek.offset < writeOffset + size; + if (applies) { + int subLength = Math.toIntExact(Math.subtractExact(peek.offset, writeOffset)); + ByteString substring = content.substring(0, subLength); + WriteObjectRequest.Builder b = req.toBuilder(); + b.getChecksummedDataBuilder().setContent(substring); + b.clearFinishWrite(); + failuresQueue.pending.pop(); + length += substring.size(); + parts.add(b.build()); + return peek.getStatus(); + } + } + } + } + addPart(req); + return Code.OK; + } + } + + private static final class FailuresQueue { + + private final List statuses; + + private final Deque pending; + + private FailuresQueue(List statuses) { + this.statuses = ImmutableList.sortedCopyOf(FailureOffset.COMP, statuses); + ArrayDeque tmp = new ArrayDeque<>(); + this.statuses.forEach(tmp::addLast); + this.pending = tmp; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("statuses", statuses).toString(); + } + + static FailuresQueue empty() { + return new FailuresQueue(ImmutableList.of()); + } + } + + private static final class FailureOffset implements Comparable { + private static final Comparator COMP = + Comparator.comparing(FailureOffset::getOffset); + private final long offset; + private final Status.Code status; + + private FailureOffset(long offset, Code status) { + this.offset = offset; + this.status = status; + } + + public long getOffset() { + return offset; + } + + public Code getStatus() { + return status; + } + + @Override + public int compareTo(FailureOffset o) { + return COMP.compare(this, o); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("offset", offset) + .add("status", status) + .toString(); + } + + private static FailureOffset of(long offset) { + return new FailureOffset(offset, Code.INTERNAL); + } + } + + private static final class FailureInducingStorageImpl extends StorageImplBase { + + private final Map data; + + public FailureInducingStorageImpl() { + this.data = Collections.synchronizedMap(new HashMap<>()); + } + + void reset() { + data.clear(); + } + + @Override + public void startResumableWrite( + StartResumableWriteRequest request, + StreamObserver responseObserver) { + UploadId id = UploadId.of(UUID.randomUUID().toString()); + data.put(id, UploadCtx.of(id, request)); + StartResumableWriteResponse startResumableWriteResponse = + StartResumableWriteResponse.newBuilder().setUploadId(id.id).build(); + responseObserver.onNext(startResumableWriteResponse); + responseObserver.onCompleted(); + } + + @Override + public StreamObserver writeObject( + StreamObserver responseObserver) { + return new FailureInducingWriteObjectRequestObserver(responseObserver, data); + } + + @Override + public void queryWriteStatus( + QueryWriteStatusRequest queryWriteStatusRequest, + StreamObserver responseObserver) { + UploadId uploadId = UploadId.of(queryWriteStatusRequest.getUploadId()); + UploadCtx ctx; + if (data.containsKey(uploadId)) { + ctx = data.get(uploadId); + } else { + responseObserver.onError(Code.NOT_FOUND.toStatus().asRuntimeException()); + return; + } + QueryWriteStatusResponse.Builder b = QueryWriteStatusResponse.newBuilder(); + if (ctx.finishWrite()) { + b.setResource( + ctx.getReq().getWriteObjectSpec().getResource().toBuilder() + .setSize(ctx.getLength()) + .setGeneration(1) + .setMetageneration(1) + .build()); + } else { + b.setPersistedSize(ctx.getLength()); + } + QueryWriteStatusResponse queryWriteStatusResponse = b.build(); + responseObserver.onNext(queryWriteStatusResponse); + responseObserver.onCompleted(); + } + } + + private static final class FailureInducingWriteObjectRequestObserver + implements StreamObserver { + private final StreamObserver responseObserver; + private final Map data; + + private UploadCtx ctx; + private boolean errored; + + public FailureInducingWriteObjectRequestObserver( + StreamObserver responseObserver, Map data) { + this.data = data; + this.responseObserver = responseObserver; + this.ctx = null; + this.errored = false; + } + + @Override + public void onNext(WriteObjectRequest writeObjectRequest) { + if (writeObjectRequest.hasChecksummedData()) { + ChecksummedData checksummedData = writeObjectRequest.getChecksummedData(); + if (!checksummedData.hasCrc32C()) { + errored = true; + sendFailure("no crc32c value specified"); + return; + } + if (!checksummedData.getContent().isEmpty() && checksummedData.getCrc32C() == 0) { + errored = true; + sendFailure("crc32c value of 0 with non-empty content"); + return; + } + } + if (writeObjectRequest.hasObjectChecksums() + && !writeObjectRequest.getObjectChecksums().hasCrc32C()) { + errored = true; + sendFailure("missing object_checksums.crc32c"); + return; + } + if (ctx == null) { + UploadId uploadId = UploadId.of(writeObjectRequest.getUploadId()); + if (data.containsKey(uploadId)) { + ctx = data.get(uploadId); + } else { + errored = true; + responseObserver.onError(Code.NOT_FOUND.toStatus().asRuntimeException()); + return; + } + } + Status.Code ret = ctx.consume(writeObjectRequest); + if (ret != Code.OK) { + errored = true; + responseObserver.onError(ret.toStatus().asRuntimeException()); + } + } + + @Override + public void onError(Throwable throwable) { + if (errored) { + return; + } + responseObserver.onError(throwable); + } + + @Override + public void onCompleted() { + if (errored) { + return; + } + WriteObjectResponse resp = + WriteObjectResponse.newBuilder() + .setResource( + ctx.getReq().getWriteObjectSpec().getResource().toBuilder() + .setSize(ctx.getLength()) + .setGeneration(1) + .setMetageneration(1) + .build()) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); + } + + private void sendFailure(String description) { + responseObserver.onError( + Code.INVALID_ARGUMENT.toStatus().withDescription(description).asRuntimeException()); + } + } + + @FunctionalInterface + private interface ResultRetryAlgorithmAdapter extends ResultRetryAlgorithm { + + @Override + default TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, java.lang.Object prevResponse, TimedAttemptSettings prevSettings) { + return null; + } + } + + private static final class ForcedFailure extends RuntimeException { + public ForcedFailure(String message) { + super(message); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncingFileChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncingFileChannelTest.java new file mode 100644 index 000000000000..cf356e0ab8be --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITSyncingFileChannelTest.java @@ -0,0 +1,187 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.RewindableContentPropertyTest.byteBuffers; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static java.nio.file.Files.readAllBytes; + +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; + +public final class ITSyncingFileChannelTest { + + /** + * Run a series of generated scenarios where each write is performed against a {@link + * SyncingFileChannel} after {@link SyncingFileChannel#write(ByteBuffer)} returns verify the full + * contents of the file match the expected cumulative value. + */ + @Property + void shouldHandleAnySizeWriteGt0(@ForAll("WriteScenario") WriteScenario writeScenario) + throws IOException { + // use try-with-resource to approximate @TearDown and cleanup the file + try (WriteScenario ws = writeScenario) { + Path path = ws.getPath(); + try (FileChannel fc = FileChannel.open(path, ws.getOpenOptions()); + SyncingFileChannel syncing = new SyncingFileChannel(fc)) { + assertThat(syncing.isOpen()).isTrue(); + ByteBuffer[] writes = ws.writes(); + for (int i = 0; i < writes.length; i++) { + ByteBuffer buf = writes[i]; + syncing.write(buf); + assertThat(xxd(readAllBytes(path))).isEqualTo(ws.expected(i)); + } + } + assertThat(xxd(readAllBytes(path))).isEqualTo(ws.all()); + } + } + + @Provide("WriteScenario") + static Arbitrary writeScenario() { + return Arbitraries.lazyOf( + () -> + Arbitraries.oneOf( + byteBuffers(1, 10), + byteBuffers(10, 100), + byteBuffers(100, 1_000), + byteBuffers(1_000, 10_000), + byteBuffers(10_000, 100_000), + byteBuffers(100_000, 1_000_000))) + .map( + buffers -> + Arrays.stream(buffers).filter(Buffer::hasRemaining).toArray(ByteBuffer[]::new)) + .filter( + buffers -> { + long totalAvailable = Arrays.stream(buffers).mapToLong(ByteBuffer::remaining).sum(); + return totalAvailable > 0; + }) + .map(WriteScenario::of); + } + + static final class WriteScenario implements AutoCloseable { + private static final Path TMP_DIR = Paths.get(System.getProperty("java.io.tmpdir")); + private static final Collector DEBUG_JOINER = + Collectors.joining(",\n\t", "[\n\t", "\n]"); + + private final Path path; + private final ByteBuffer[] writes; + private final ByteString[] expectedCumulativeContents; + private final EnumSet openOptions; + + private WriteScenario(Path path, ByteBuffer[] writes, ByteString[] expectedCumulativeContents) { + this.path = path; + this.writes = writes; + this.expectedCumulativeContents = expectedCumulativeContents; + this.openOptions = EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE); + } + + public Path getPath() { + return path; + } + + public EnumSet getOpenOptions() { + return openOptions; + } + + ByteBuffer[] writes() { + return Arrays.stream(writes).map(ByteBuffer::duplicate).toArray(ByteBuffer[]::new); + } + + String expected(int idx) { + Preconditions.checkArgument( + 0 <= idx && idx < expectedCumulativeContents.length, + "index out of bounds: (0 <= %s && %s < %s)", + idx, + idx, + expectedCumulativeContents.length); + return xxd(false, expectedCumulativeContents[idx].asReadOnlyByteBuffer()); + } + + String all() { + return xxd( + false, + expectedCumulativeContents[expectedCumulativeContents.length - 1].asReadOnlyByteBuffer()); + } + + @Override + public void close() throws IOException { + Files.deleteIfExists(path); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("\npath", path) + .add( + "\nwrites", + Arrays.stream(writes) + .map( + b -> + String.format( + Locale.US, "%s \n %s", b.toString(), xxd(false, b.duplicate()))) + .collect(DEBUG_JOINER)) + .add( + "\nexpectedCumulativeContents", + Arrays.stream(expectedCumulativeContents) + .map(ByteString::toString) + .collect(DEBUG_JOINER)) + .toString(); + } + + public static WriteScenario of(ByteBuffer[] byteBuffers) { + try { + Path path = Files.createTempFile(TMP_DIR, WriteScenario.class.getName() + "-", ".bin"); + + List byteStrings = new ArrayList<>(); + for (int i = 0; i < byteBuffers.length; i++) { + ByteString bs = ByteString.empty(); + for (int j = 0; j <= i; j++) { + ByteBuffer byteBuffer = byteBuffers[j].duplicate(); + bs = bs.concat(ByteStringStrategy.noCopy().apply(byteBuffer)); + } + byteStrings.add(bs); + } + + return new WriteScenario(path, byteBuffers, byteStrings.toArray(new ByteString[0])); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUnbufferedResumableUploadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUnbufferedResumableUploadTest.java new file mode 100644 index 000000000000..59f694fe303b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUnbufferedResumableUploadTest.java @@ -0,0 +1,256 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.ITUnbufferedResumableUploadTest.ObjectSizes; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.CrossRun.Exclude; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.collect.ImmutableList; +import com.google.storage.v2.Object; +import com.google.storage.v2.StorageClient; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import com.google.storage.v2.WriteObjectSpec; +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +@Parameterized(ObjectSizes.class) +public final class ITUnbufferedResumableUploadTest { + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Parameter public int objectSize; + + public static final class ObjectSizes implements ParametersProvider { + + @Override + public ImmutableList parameters() { + return ImmutableList.of(256 * 1024, 2 * 1024 * 1024, 8 * 1024 * 1024); + } + } + + @Test + @Exclude(transports = Transport.GRPC) + public void json() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + UnbufferedWritableByteChannelSession session = jsonSession(); + + int additional = 13; + long size = objectSize + additional; + ByteBuffer b = DataGenerator.base64Characters().genByteBuffer(size); + + UnbufferedWritableByteChannel open = session.open(); + int written1 = open.write(b); + assertThat(written1).isEqualTo(objectSize); + assertThat(b.remaining()).isEqualTo(additional); + + // no bytes should be consumed if less than 256KiB + int written2 = open.write(b); + assertThat(written2).isEqualTo(0); + assertThat(b.remaining()).isEqualTo(additional); + + int writtenAndClose = open.writeAndClose(b); + assertThat(writtenAndClose).isEqualTo(additional); + open.close(); + + StorageObject storageObject = session.getResult().get(2, TimeUnit.SECONDS); + assertThat(storageObject.getSize()).isEqualTo(BigInteger.valueOf(size)); + } + + @Test + @Exclude(transports = Transport.HTTP) + public void grpc() throws Exception { + UnbufferedWritableByteChannelSession session = grpcSession(); + + int additional = 13; + long size = objectSize + additional; + ByteBuffer b = DataGenerator.base64Characters().genByteBuffer(size); + + UnbufferedWritableByteChannel open = session.open(); + int written1 = open.write(b); + assertThat(written1).isEqualTo(objectSize); + assertThat(b.remaining()).isEqualTo(additional); + + // no bytes should be consumed if less than 256KiB + int written2 = open.write(b); + assertThat(written2).isEqualTo(0); + assertThat(b.remaining()).isEqualTo(additional); + + int writtenAndClose = open.writeAndClose(b); + assertThat(writtenAndClose).isEqualTo(additional); + open.close(); + WriteObjectResponse resp = session.getResult().get(2, TimeUnit.SECONDS); + assertThat(resp.getResource().getSize()).isEqualTo(size); + } + + @Test + @Exclude(transports = Transport.GRPC) + public void json_minFlush() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + UnbufferedWritableByteChannelSession session = jsonSession(); + + int additional = 13; + long size = objectSize + additional; + ByteBuffer b = DataGenerator.base64Characters().genByteBuffer(size); + + UnbufferedWritableByteChannel open = session.open(); + BufferHandle bufferHandle = BufferHandle.allocate(256 * 1024); + MinFlushBufferedWritableByteChannel channel = + new MinFlushBufferedWritableByteChannel(bufferHandle, open); + int written1 = channel.write(b); + assertThat(written1).isEqualTo(size); + assertThat(bufferHandle.position()).isEqualTo(additional); + + channel.close(); + assertThat(bufferHandle.remaining()).isEqualTo(bufferHandle.capacity()); + + StorageObject storageObject = session.getResult().get(2, TimeUnit.SECONDS); + assertThat(storageObject.getSize()).isEqualTo(BigInteger.valueOf(size)); + } + + @Test + @Exclude(transports = Transport.HTTP) + public void grpc_minFlush() throws Exception { + UnbufferedWritableByteChannelSession session = grpcSession(); + + int additional = 13; + long size = objectSize + additional; + ByteBuffer b = DataGenerator.base64Characters().genByteBuffer(size); + + UnbufferedWritableByteChannel open = session.open(); + BufferHandle bufferHandle = BufferHandle.allocate(256 * 1024); + MinFlushBufferedWritableByteChannel channel = + new MinFlushBufferedWritableByteChannel(bufferHandle, open); + int written1 = channel.write(b); + assertThat(written1).isEqualTo(size); + assertThat(bufferHandle.position()).isEqualTo(additional); + + channel.close(); + assertThat(bufferHandle.remaining()).isEqualTo(bufferHandle.capacity()); + + WriteObjectResponse resp = session.getResult().get(2, TimeUnit.SECONDS); + assertThat(resp.getResource().getSize()).isEqualTo(size); + } + + private UnbufferedWritableByteChannelSession jsonSession() { + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Opts opts = Opts.empty(); + final Map optionsMap = opts.getRpcOptions(); + BlobInfo.Builder builder = blobInfo.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + + StorageObject encode = Conversions.json().blobInfo().encode(updated); + HttpStorageOptions options = (HttpStorageOptions) storage.getOptions(); + Retrier retrier = TestUtils.retrierFromStorageOptions(options); + Supplier uploadIdSupplier = + ResumableMedia.startUploadForBlobInfo( + options, + updated, + optionsMap, + retrier.withAlg( + StorageRetryStrategy.getUniformStorageRetryStrategy().getIdempotentHandler())); + JsonResumableWrite jsonResumableWrite = + JsonResumableWrite.of(encode, optionsMap, uploadIdSupplier.get(), 0); + + return ResumableMedia.http() + .write() + .byteChannel(HttpClientContext.from(options.getStorageRpcV1())) + .resumable() + .unbuffered() + .setStartAsync(ApiFutures.immediateFuture(jsonResumableWrite)) + .build(); + } + + private UnbufferedWritableByteChannelSession grpcSession() { + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Opts opts = Opts.empty(); + BlobInfo.Builder builder = blobInfo.toBuilder().setMd5(null).setCrc32c(null); + BlobInfo updated = opts.blobInfoMapper().apply(builder).build(); + + Object object = Conversions.grpc().blobInfo().encode(updated); + Object.Builder objectBuilder = + object.toBuilder() + // required if the data is changing + .clearChecksums() + // trimmed to shave payload size + .clearGeneration() + .clearMetageneration() + .clearSize() + .clearCreateTime() + .clearUpdateTime(); + WriteObjectSpec.Builder specBuilder = WriteObjectSpec.newBuilder().setResource(objectBuilder); + + WriteObjectRequest.Builder requestBuilder = + WriteObjectRequest.newBuilder().setWriteObjectSpec(specBuilder); + + WriteObjectRequest request = opts.writeObjectRequest().apply(requestBuilder).build(); + + GrpcCallContext merge = Retrying.newCallContext(); + StorageClient storageClient = PackagePrivateMethodWorkarounds.maybeGetStorageClient(storage); + assertThat(storageClient).isNotNull(); + ApiFuture start = + ResumableMedia.gapic() + .write() + .resumableWrite( + storageClient.startResumableWriteCallable().withDefaultCallContext(merge), + request, + opts, + RetrierWithAlg.attemptOnce()); + + return ResumableMedia.gapic() + .write() + .byteChannel(storageClient.writeObjectCallable()) + .resumable() + .unbuffered() + .setStartAsync(start) + .build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUpdateMaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUpdateMaskTest.java new file mode 100644 index 000000000000..4fc803b81981 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ITUpdateMaskTest.java @@ -0,0 +1,466 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.bucketNameCodec; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.BlobInfo.CustomerEncryption; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.BucketInfo.Logging; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.FieldMask; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.Object; +import com.google.storage.v2.StorageGrpc.StorageImplBase; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import io.grpc.stub.StreamObserver; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +@RunWith(Enclosed.class) +public final class ITUpdateMaskTest { + + public static final class BlobInfoUpdateMask { + + @Test + public void updateObjectRequest() throws Exception { + Object expectedObject = + Object.newBuilder() + .setBucket(bucketNameCodec.encode("bucket")) + .setName("obj-name") + .putMetadata("x", "X") + .build(); + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder() + .setObject(expectedObject) + .setUpdateMask(FieldMask.newBuilder().addPaths("metadata.x").build()) + .build(); + + AtomicReference actualRequest = new AtomicReference<>(); + StorageImplBase service = + new StorageImplBase() { + @Override + public void updateObject(UpdateObjectRequest request, StreamObserver obs) { + try { + actualRequest.compareAndSet(null, request); + obs.onNext(expectedObject); + obs.onCompleted(); + } catch (Exception e) { + obs.onError(e); + } + } + }; + + try (FakeServer fake = FakeServer.of(service); + Storage s = fake.getGrpcStorageOptions().getService()) { + BlobInfo base = base(); + s.update(base.toBuilder().setMetadata(ImmutableMap.of("x", "X")).build()); + } + + UpdateObjectRequest actual = actualRequest.get(); + assertThat(actual).isNotNull(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void blobInfo_field_metadata() { + testBlobField( + b -> b.setMetadata(ImmutableMap.of("x", "X")), + NamedField.nested(BlobField.METADATA, NamedField.literal("x"))); + } + + @Test + public void blobInfo_field_acl() { + testBlobField(b -> b.setAcl(ImmutableList.of()), BlobField.ACL); + } + + @Test + public void blobInfo_field_cacheControl() { + testBlobField(b -> b.setCacheControl("cc"), BlobField.CACHE_CONTROL); + } + + @Test + public void blobInfo_field_contentDisposition() { + testBlobField(b -> b.setContentDisposition("cd"), BlobField.CONTENT_DISPOSITION); + } + + @Test + public void blobInfo_field_contentEncoding() { + testBlobField(b -> b.setContentEncoding("ce"), BlobField.CONTENT_ENCODING); + } + + @Test + public void blobInfo_field_contentLanguage() { + testBlobField(b -> b.setContentLanguage("cl"), BlobField.CONTENT_LANGUAGE); + } + + @Test + public void blobInfo_field_contentType() { + testBlobField(b -> b.setContentType("ct"), BlobField.CONTENT_TYPE); + } + + @Test + public void blobInfo_field_crc32c() { + testBlobField(b -> b.setCrc32c("c"), BlobField.CRC32C); + } + + @Test + public void blobInfo_field_crc32cFromHexString() { + testBlobField(b -> b.setCrc32cFromHexString("145d34"), BlobField.CRC32C); + } + + @Test + public void blobInfo_field_etag() { + testBlobField(b -> b.setEtag("e"), BlobField.ETAG); + } + + @Test + public void blobInfo_field_md5() { + testBlobField(b -> b.setMd5("m"), BlobField.MD5HASH); + } + + @Test + public void blobInfo_field_md5FromHexString() { + testBlobField(b -> b.setMd5FromHexString("145d34"), BlobField.MD5HASH); + } + + @Test + public void blobInfo_field_owner() { + testBlobField(b -> b.setOwner(new User("x@y.z")), BlobField.OWNER); + } + + @Test + public void blobInfo_field_storageClass() { + testBlobField(b -> b.setStorageClass(StorageClass.COLDLINE), BlobField.STORAGE_CLASS); + } + + @Test + public void blobInfo_field_timeDeleted() { + testBlobField(b -> b.setDeleteTimeOffsetDateTime(OffsetDateTime.MAX), BlobField.TIME_DELETED); + } + + @Test + public void blobInfo_field_timeCreated() { + testBlobField(b -> b.setCreateTimeOffsetDateTime(OffsetDateTime.MAX), BlobField.TIME_CREATED); + } + + @Test + public void blobInfo_field_kmsKeyName() { + testBlobField(b -> b.setKmsKeyName("key"), BlobField.KMS_KEY_NAME); + } + + @Test + public void blobInfo_field_eventBasedHold() { + testBlobField(b -> b.setEventBasedHold(true), BlobField.EVENT_BASED_HOLD); + } + + @Test + public void blobInfo_field_temporaryHold() { + testBlobField(b -> b.setTemporaryHold(true), BlobField.TEMPORARY_HOLD); + } + + @Test + public void blobInfo_field_retentionExpirationTime() { + testBlobField( + b -> b.setRetentionExpirationTimeOffsetDateTime(OffsetDateTime.MAX), + BlobField.RETENTION_EXPIRATION_TIME); + } + + @Test + public void blobInfo_field_updated() { + testBlobField(b -> b.setUpdateTimeOffsetDateTime(OffsetDateTime.MAX), BlobField.UPDATED); + } + + @Test + public void blobInfo_field_customTime() { + testBlobField(b -> b.setCustomTimeOffsetDateTime(OffsetDateTime.MAX), BlobField.CUSTOM_TIME); + } + + @Test + public void blobInfo_field_timeStorageClassUpdated() { + testBlobField( + b -> b.setTimeStorageClassUpdatedOffsetDateTime(OffsetDateTime.MAX), + BlobField.TIME_STORAGE_CLASS_UPDATED); + } + + @Test + public void blobInfo_field_customerEncryption() { + testBlobField( + b -> b.setCustomerEncryption(new CustomerEncryption("alg", "sha")), + BlobField.CUSTOMER_ENCRYPTION); + } + + @Test + public void blobInfo_field_blobId_changeBucketNameGeneration() { + testBlobField( + b -> b.setBlobId(BlobId.of("bucket2", "obj2", 3L)), + BlobField.BUCKET, + BlobField.NAME, + BlobField.GENERATION); + } + + @Test + public void blobInfo_field_blobId_changeName() { + testBlobField(b -> b.setBlobId(BlobId.of("bucket", "obj2")), BlobField.NAME); + } + + @Test + public void blobInfo_field_blobId_changeGeneration() { + testBlobField(b -> b.setBlobId(BlobId.of("bucket", "obj-name", 3L)), BlobField.GENERATION); + } + + private static void testBlobField( + UnaryOperator f, NamedField... expectedModified) { + BlobInfo actual1 = f.apply(base().toBuilder()).build(); + assertThat(actual1.getModifiedFields()).isEqualTo(ImmutableSet.copyOf(expectedModified)); + // verify that nothing is carried through from a previous state, and that setting the same + // value does not mark it as modified. + BlobInfo actual2 = f.apply(actual1.toBuilder()).build(); + assertThat(actual2.getModifiedFields()).isEqualTo(ImmutableSet.of()); + } + + private static BlobInfo base() { + return BlobInfo.newBuilder("bucket", "obj-name").build(); + } + } + + public static final class BucketInfoUpdateMask { + + @Test + public void updateBucketRequest() throws Exception { + Bucket expectedBucket = + Bucket.newBuilder().setName(bucketNameCodec.encode("bucket")).putLabels("x", "X").build(); + UpdateBucketRequest expected = + UpdateBucketRequest.newBuilder() + .setBucket(expectedBucket) + .setUpdateMask(FieldMask.newBuilder().addPaths("labels.x").build()) + .build(); + + AtomicReference actualRequest = new AtomicReference<>(); + StorageImplBase service = + new StorageImplBase() { + @Override + public void updateBucket(UpdateBucketRequest request, StreamObserver obs) { + try { + actualRequest.compareAndSet(null, request); + obs.onNext(expectedBucket); + obs.onCompleted(); + } catch (Exception e) { + obs.onError(e); + } + } + }; + + try (FakeServer fake = FakeServer.of(service); + Storage s = fake.getGrpcStorageOptions().getService()) { + BucketInfo base = base(); + s.update(base.toBuilder().setLabels(ImmutableMap.of("x", "X")).build()); + } + + UpdateBucketRequest actual = actualRequest.get(); + assertThat(actual).isNotNull(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void bucketInfo_field_setOwner() { + testBucketField(b -> b.setOwner(new User("x@y.z")), BucketField.OWNER); + } + + @Test + public void bucketInfo_field_setVersioningEnabled() { + testBucketField(b -> b.setVersioningEnabled(true), BucketField.VERSIONING); + } + + @Test + public void bucketInfo_field_setRequesterPays() { + testBucketField(b -> b.setRequesterPays(true), BucketField.BILLING); + } + + @Test + public void bucketInfo_field_setIndexPage() { + testBucketField(b -> b.setIndexPage("i"), BucketField.WEBSITE); + } + + @Test + public void bucketInfo_field_setNotFoundPage() { + testBucketField(b -> b.setNotFoundPage("n"), BucketField.WEBSITE); + } + + @Test + public void bucketInfo_field_setLifecycleRules() { + testBucketField( + b -> + b.setLifecycleRules( + ImmutableList.of( + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder().setAge(1).build()))), + BucketField.LIFECYCLE); + } + + @Test + public void bucketInfo_field_setRpo() { + testBucketField(b -> b.setRpo(Rpo.ASYNC_TURBO), BucketField.RPO); + } + + @Test + public void bucketInfo_field_setStorageClass() { + testBucketField(b -> b.setStorageClass(StorageClass.COLDLINE), BucketField.STORAGE_CLASS); + } + + @Test + public void bucketInfo_field_setLocation() { + testBucketField(b -> b.setLocation("l"), BucketField.LOCATION); + } + + @Test + public void bucketInfo_field_setEtag() { + testBucketField(b -> b.setEtag("e"), BucketField.ETAG); + } + + @Test + public void bucketInfo_field_setCreateTimeOffsetDateTime() { + testBucketField( + b -> b.setCreateTimeOffsetDateTime(OffsetDateTime.MAX), BucketField.TIME_CREATED); + } + + @Test + public void bucketInfo_field_setUpdateTimeOffsetDateTime() { + testBucketField(b -> b.setUpdateTimeOffsetDateTime(OffsetDateTime.MAX), BucketField.UPDATED); + } + + @Test + public void bucketInfo_field_setCors() { + testBucketField( + b -> b.setCors(ImmutableList.of(Cors.newBuilder().setMaxAgeSeconds(2).build())), + BucketField.CORS); + } + + @Test + public void bucketInfo_field_setAcl() { + testBucketField( + b -> b.setAcl(ImmutableList.of(Acl.of(new User("x@y.x"), Role.READER))), BucketField.ACL); + } + + @Test + public void bucketInfo_field_setDefaultAcl() { + testBucketField( + b -> b.setDefaultAcl(ImmutableList.of(Acl.of(new User("x@y.x"), Role.READER))), + BucketField.DEFAULT_OBJECT_ACL); + } + + @Test + public void bucketInfo_field_setLabels() { + testBucketField( + b -> b.setLabels(ImmutableMap.of("x", "X")), + NamedField.nested(BucketField.LABELS, NamedField.literal("x"))); + } + + @Test + public void bucketInfo_field_setDefaultKmsKeyName() { + testBucketField(b -> b.setDefaultKmsKeyName("k"), BucketField.ENCRYPTION); + } + + @Test + public void bucketInfo_field_setDefaultEventBasedHold() { + testBucketField(b -> b.setDefaultEventBasedHold(true), BucketField.DEFAULT_EVENT_BASED_HOLD); + } + + @Test + public void bucketInfo_field_setRetentionEffectiveTimeOffsetDateTime() { + testBucketField( + b -> b.setRetentionEffectiveTimeOffsetDateTime(OffsetDateTime.MAX), + BucketField.RETENTION_POLICY); + } + + @Test + public void bucketInfo_field_setRetentionPolicyIsLocked() { + testBucketField(b -> b.setRetentionPolicyIsLocked(true), BucketField.RETENTION_POLICY); + } + + @Test + public void bucketInfo_field_setRetentionPeriodDuration() { + testBucketField( + b -> b.setRetentionPeriodDuration(Duration.ofDays(3)), BucketField.RETENTION_POLICY); + } + + @Test + public void bucketInfo_field_setIamConfiguration() { + testBucketField( + b -> + b.setIamConfiguration( + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()), + BucketField.IAMCONFIGURATION); + } + + @Test + public void bucketInfo_field_setLogging() { + testBucketField( + b -> b.setLogging(Logging.newBuilder().setLogBucket("bucket2").build()), + BucketField.LOGGING); + } + + @Test + public void bucketInfo_field_setCustomPlacementConfig() { + testBucketField( + b -> + b.setCustomPlacementConfig( + CustomPlacementConfig.newBuilder() + .setDataLocations(ImmutableList.of("a", "b")) + .build()), + BucketField.CUSTOM_PLACEMENT_CONFIG); + } + + @Test + public void bucketInfo_field_setLocationType() { + testBucketField(b -> b.setLocationType("l"), BucketField.LOCATION_TYPE); + } + + private static void testBucketField( + UnaryOperator f, NamedField... expectedModified) { + BucketInfo actual1 = f.apply(base().toBuilder()).build(); + assertThat(actual1.getModifiedFields()).isEqualTo(ImmutableSet.copyOf(expectedModified)); + // verify that nothing is carried through from a previous state, and that setting the same + // value does not mark it as modified. + BucketInfo actual2 = f.apply(actual1.toBuilder()).build(); + assertThat(actual2.getModifiedFields()).isEqualTo(ImmutableSet.of()); + } + + private static BucketInfo base() { + return BucketInfo.newBuilder("bucket").build(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IamPolicyPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IamPolicyPropertyTest.java new file mode 100644 index 000000000000..8255fa2848ca --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/IamPolicyPropertyTest.java @@ -0,0 +1,23 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.Policy; + +final class IamPolicyPropertyTest + extends BaseConvertablePropertyTest< + Policy, com.google.iam.v1.Policy, com.google.api.services.storage.model.Policy> {} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JqwikTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JqwikTest.java new file mode 100644 index 000000000000..d139a0200654 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JqwikTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static net.jqwik.api.providers.TypeUsage.of; + +import com.google.protobuf.Message; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.EdgeCases; +import net.jqwik.api.Example; +import net.jqwik.api.providers.TypeUsage; +import net.jqwik.engine.support.JqwikStringSupport; + +public class JqwikTest { + @Example + public void reportSimple() { + report(of(short.class)); + report(of(int.class)); + report(of(double.class)); + report(of(String.class)); + report(of(String.class).asNullable()); + report(of(List.class)); + report(of(List.class).asNullable()); + report(of(Map.class)); + report(of(Map.class).asNullable()); + report(of(Boolean.class)); + report(of(Boolean.class).asNullable()); + } + + public static void report(TypeUsage t) { + report(t, Arbitraries.defaultFor(t)); + } + + public static void report(TypeUsage t, Arbitrary objectArbitrary) { + if (!CIUtils.verbose()) { + return; + } + EdgeCases cases = objectArbitrary.edgeCases(); + // inspired from net.jqwik.engine.properties.arbitraries.EdgeCasesSupport$1#toString() + String formattedCases = + StreamSupport.stream(cases.spliterator(), false) + .map( + s -> { + Object value = s.value(); + if (value instanceof Message) { + Message m = (Message) value; + return fmtProto(m); + } else { + return JqwikStringSupport.displayString(value); + } + }) + .collect(Collectors.joining(",\n ", "[\n ", "]")); + System.out.printf("%-25s = EdgeCases%s%n", t, formattedCases); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JsonUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JsonUtilsTest.java new file mode 100644 index 000000000000..8348fa14cf1f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/JsonUtilsTest.java @@ -0,0 +1,366 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.JsonUtils.jop; +import static com.google.cloud.storage.TestUtils.hashMapOf; +import static com.google.cloud.storage.UnifiedOpts.NamedField.literal; +import static com.google.cloud.storage.UnifiedOpts.NamedField.nested; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.util.Data; +import com.google.api.services.storage.model.ObjectCustomContextPayload; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.StorageObject.Contexts; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.UnifiedOpts.NamedField; +import com.google.cloud.storage.UnifiedOpts.NestedNamedField; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Maps; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonNull; +import com.google.gson.JsonObject; +import java.io.IOException; +import java.io.StringReader; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.stream.Stream; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.Tuple; +import net.jqwik.api.arbitraries.SetArbitrary; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +public final class JsonUtilsTest { + + @Example + public void getOutputJson_WithSelectedFields_metadata() throws IOException { + StorageObject src = jop.parseAndClose(new StringReader(jsonString), StorageObject.class); + StorageObject expected = + new StorageObject() + .setBucket("some-bucket") + .setName("some-name") + .setGeneration(1755811928351810L) + .setMetadata(hashMapOf("k1", Data.nullOf(String.class))); + + NestedNamedField nested = (NestedNamedField) nested(literal("metadata"), literal("k1")); + ImmutableSet modifiedFields = + Stream.of( + BlobField.REQUIRED_FIELDS.stream(), + Stream.of(BlobField.GENERATION), + Stream.of(nested)) + .flatMap(s -> s) + .collect(ImmutableSet.toImmutableSet()); + + StorageObject dst = JsonUtils.getOutputJsonWithSelectedFields(src, modifiedFields); + + assertThat(dst).isEqualTo(expected); + } + + @Example + public void getOutputJson_WithSelectedFields_contexts() throws IOException { + StorageObject src = jop.parseAndClose(new StringReader(jsonString), StorageObject.class); + StorageObject expected = + new StorageObject() + .setBucket("some-bucket") + .setName("some-name") + .setGeneration(1755811928351810L) + .setContexts(c(hashMapOf("k2", null))); + + NestedNamedField nested = + (NestedNamedField) nested(nested(literal("contexts"), literal("custom")), literal("k2")); + ImmutableSet modifiedFields = + Stream.of( + BlobField.REQUIRED_FIELDS.stream(), + Stream.of(BlobField.GENERATION), + Stream.of(nested)) + .flatMap(s -> s) + .collect(ImmutableSet.toImmutableSet()); + NamedField custom = nested.getParent(); + + StorageObject dst = JsonUtils.getOutputJsonWithSelectedFields(src, modifiedFields); + + assertThat(dst).isEqualTo(expected); + } + + @Property(tries = 10_000) + void getOutputJson_WithSelectedFields_works(@ForAll("jts") JsonTrimmingScenario s) { + JsonObject actual = JsonUtils.getOutputJson(s.original, s.fieldsToRetain); + + assertThat(actual).isEqualTo(s.expected); + } + + @Provide("jts") + static Arbitrary jsonTrimmingScenarioArbitrary() { + return fieldPaths() + .flatMap( + fieldPaths -> + Combinators.combine( + // carry through our field paths as-is + Arbitraries.just(fieldPaths), + // create a new map that contains any number of the defined field paths + // where we set the value to "3" + // the value here isn't actually important, just that it's set to a non-null + // value. + Arbitraries.maps(Arbitraries.of(fieldPaths), Arbitraries.just("3"))) + .as(Tuple::of) + .flatMap( + t -> { + Set paths = t.get1(); + assertThat(paths).isNotNull(); + Map m = t.get2(); + assertThat(m).isNotNull(); + + return Combinators.combine( + // carry through our m as is + Arbitraries.just(m), + // select a subset of the field paths we want to make sure are + // present in the output object + Arbitraries.of(paths).set().ofMinSize(1).ofMaxSize(paths.size())) + .as(JsonTrimmingScenario::of); + })); + } + + private static SetArbitrary fieldPaths() { + return fieldPath().set().ofMinSize(1).ofMaxSize(30); + } + + /** + * Generate a json field path with a depth between 1 and 4 (inclusive). + * + *

A json field path is of the form `a.b.c.d` + */ + private static @NonNull Arbitrary fieldPath() { + return Arbitraries.integers() + .between(1, 4) + .flatMap( + depth -> + Arbitraries.strings() + .withCharRange('a', 'f') + .ofLength(depth) + .map( + s -> { + StringBuilder sb = new StringBuilder(); + char[] charArray = s.toCharArray(); + for (int i = 0; i < charArray.length; i++) { + char c = charArray[i]; + sb.append(c); + if (i == 0) { + // add the overall length as part of the first key + // this makes is it so different depth keys don't collide + // and cause trouble for things like `a.a.a: 3` and `a.a.a.a: 4` + sb.append(charArray.length); + } + if (i + 1 < charArray.length) { + sb.append("."); + } + } + return sb.toString(); + })); + } + + @Example + public void treeify_flatten_roundtrip_withArray() { + JsonObject o = new JsonObject(); + JsonArray a = new JsonArray(); + JsonArray b = new JsonArray(); + b.add(JsonNull.INSTANCE); + b.add(JsonNull.INSTANCE); + b.add(JsonNull.INSTANCE); + b.add("b3"); + JsonObject a0 = new JsonObject(); + a0.addProperty("id", "a0"); + JsonObject a1 = new JsonObject(); + a1.addProperty("id", "a1"); + a.add(a0); + a.add(a1); + o.add("a", a); + o.add("b", b); + + Map expected = new TreeMap<>(); + expected.put("a[0].id", "a0"); + expected.put("a[1].id", "a1"); + expected.put("b[3]", "b3"); + expected.put("b[2]", null); + expected.put("b[1]", null); + expected.put("b[0]", null); + + Map flatten = new TreeMap<>(JsonUtils.flatten(o)); + assertThat(flatten).isEqualTo(expected); + + JsonObject treeify = JsonUtils.treeify(expected); + assertThat(treeify).isEqualTo(o); + } + + @Example + public void treeify_arrayWithHoles() { + JsonObject o = new JsonObject(); + JsonArray b = new JsonArray(); + b.add(JsonNull.INSTANCE); + b.add(JsonNull.INSTANCE); + b.add(JsonNull.INSTANCE); + b.add("b3"); + o.add("b", b); + + Map expected = new TreeMap<>(); + expected.put("b[3]", "b3"); + + JsonObject treeify = JsonUtils.treeify(expected); + assertThat(treeify).isEqualTo(o); + } + + @Example + public void treeify_flatten_roundtrip() { + ImmutableMap m = + ImmutableMap.of( + "a.b.c.d", "D", + "a.b.c.e", "E", + "f.g", "G", + "h", "H", + "z.x.y", "Y"); + + JsonObject expected = new JsonObject(); + JsonObject a = new JsonObject(); + JsonObject b = new JsonObject(); + JsonObject c = new JsonObject(); + JsonObject f = new JsonObject(); + JsonObject x = new JsonObject(); + JsonObject z = new JsonObject(); + + x.addProperty("y", "Y"); + z.add("x", x); + expected.add("z", z); + + f.addProperty("g", "G"); + + c.addProperty("d", "D"); + c.addProperty("e", "E"); + + b.add("c", c); + a.add("b", b); + + expected.add("a", a); + expected.add("f", f); + expected.addProperty("h", "H"); + + JsonObject treeified = JsonUtils.treeify(m); + assertThat(treeified).isEqualTo(expected); + + Map flattened = JsonUtils.flatten(treeified); + assertThat(flattened).isEqualTo(m); + } + + private static Contexts c(Map m) { + Contexts contexts = new Contexts(); + if (!m.isEmpty()) { + contexts.setCustom(Maps.transformValues(m, JsonUtilsTest::p)); + } + return contexts; + } + + private static @NonNull ObjectCustomContextPayload p(@Nullable String v) { + if (v == null) { + return Data.nullOf(ObjectCustomContextPayload.class); + } + return new ObjectCustomContextPayload().setValue(v); + } + + private static final class JsonTrimmingScenario { + private static final Gson gson = + new GsonBuilder() + // ensure null values are not stripped, they are important to us + .serializeNulls() + .create(); + + private final JsonObject original; + private final TreeSet fieldsToRetain; + private final JsonObject expected; + + private JsonTrimmingScenario( + JsonObject original, TreeSet fieldsToRetain, JsonObject expected) { + this.original = original; + this.fieldsToRetain = fieldsToRetain; + this.expected = expected; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("\noriginal", gson.toJson(original)) + .add("\nfieldsToRetain", fieldsToRetain) + .add("\nexpected", gson.toJson(expected)) + .toString(); + } + + public static JsonTrimmingScenario of(Map m, Set fieldsToRetain) { + TreeSet ftr = new TreeSet<>(fieldsToRetain); + JsonObject original = JsonUtils.treeify(m); + HashMap mm = new HashMap<>(Maps.filterKeys(m, fieldsToRetain::contains)); + for (String f : fieldsToRetain) { + if (m.containsKey(f)) { + continue; + } + + mm.put(f, null); + } + JsonObject expected = JsonUtils.treeify(mm); + return new JsonTrimmingScenario(original, ftr, expected); + } + } + + // language=JSON + private static final String jsonString = + "{\n" + + " \"bucket\": \"some-bucket\",\n" + + " \"contentType\": \"application/octet-stream\",\n" + + " \"crc32c\": \"AAAAAA\\u003d\\u003d\",\n" + + " \"etag\": \"CMLIoJLtnI8DEAE\\u003d\",\n" + + " \"generation\": \"1755811928351810\",\n" + + " \"id\": \"some-bucket/some-name/1755811928351810\",\n" + + " \"md5Hash\": \"1B2M2Y8AsgTpgAmY7PhCfg\\u003d\\u003d\",\n" + + " \"mediaLink\":" + + " \"https://storage.googleapis.com/download/storage/v1/b/some-bucket/o/some-name?generation\\u003d1755811928351810\\u0026alt\\u003dmedia\",\n" + + " \"metadata\": {\n" + + " \"k1\": \"\"\n" + + " },\n" + + " \"metageneration\": \"1\",\n" + + " \"name\": \"some-name\",\n" + + " \"selfLink\": \"https://www.googleapis.com/storage/v1/b/some-bucket/o/some-name\",\n" + + " \"storageClass\": \"STANDARD\",\n" + + " \"contexts\": {\n" + + " \"custom\": {\n" + + " \"k2\": null,\n" + + " \"k3\": {\n" + + " \"value\": \"glavin\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/LazyReadChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/LazyReadChannelTest.java new file mode 100644 index 000000000000..36c55b5390cd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/LazyReadChannelTest.java @@ -0,0 +1,137 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public final class LazyReadChannelTest { + + private final AtomicInteger counter = new AtomicInteger(1); + + @Test + public void repeatedCallsOfGetSessionMustReturnTheSameInstance() { + LazyReadChannel lrc = + new LazyReadChannel<>(this::newTestSession); + + ReadableByteChannelSession session1 = lrc.getSession(); + ReadableByteChannelSession session2 = lrc.getSession(); + assertThat(session1).isSameInstanceAs(session2); + } + + @Test + public void repeatedCallsOfGetChannelMustReturnTheSameInstance() { + LazyReadChannel lrc = + new LazyReadChannel<>(this::newTestSession); + + BufferedReadableByteChannel channel1 = lrc.getChannel(); + BufferedReadableByteChannel channel2 = lrc.getChannel(); + assertThat(channel1).isSameInstanceAs(channel2); + } + + @Test + public void isNotOpenUntilGetChannelIsCalled() { + LazyReadChannel lrc = + new LazyReadChannel<>(this::newTestSession); + + assertThat(lrc.isOpen()).isFalse(); + BufferedReadableByteChannel channel = lrc.getChannel(); + assertThat(channel.isOpen()).isTrue(); + + assertThat(lrc.isOpen()).isTrue(); + } + + @Test + public void closingUnderlyingChannelClosesTheLazyReadChannel() throws IOException { + LazyReadChannel lrc = + new LazyReadChannel<>(this::newTestSession); + + BufferedReadableByteChannel channel = lrc.getChannel(); + assertThat(channel.isOpen()).isTrue(); + channel.close(); + assertThat(lrc.isOpen()).isFalse(); + } + + private TestSession newTestSession() { + return new TestSession(String.format(Locale.US, "test-%02d", counter.getAndIncrement())); + } + + private static final class TestSession implements BufferedReadableByteChannelSession { + + private final String s; + private final ApiFuture channel; + private final ApiFuture result; + + private TestSession(String s) { + this.s = s; + this.channel = ApiFutures.immediateFuture(new TestChannel()); + this.result = ApiFutures.immediateFuture(s); + } + + @Override + public ApiFuture openAsync() { + return channel; + } + + @Override + public ApiFuture getResult() { + return result; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("s", s) + .add("channel", channel) + .add("result", result) + .toString(); + } + } + + private static final class TestChannel implements BufferedReadableByteChannel { + + boolean open = true; + + @Override + public int read(ByteBuffer dst) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + return 0; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() { + open = false; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java new file mode 100644 index 000000000000..77128f812365 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Maths.sub; +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; + +@SuppressWarnings("ConstantConditions") +public final class MathsTest { + + @Test + public void sub_bothNull() { + assertThat(sub((Long) null, null)).isNull(); + } + + @Test + public void sub_lNull_rNonNull() { + assertThat(sub(null, 3L)).isNull(); + } + + @Test + public void sub_lNonNull_rNull() { + assertThat(sub(3L, null)).isEqualTo(3L); + } + + @Test + public void sub_bothNonNull() { + assertThat(sub(5L, 3L)).isEqualTo(2L); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MetadataFieldTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MetadataFieldTest.java new file mode 100644 index 000000000000..9f7a3f4c1f8f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MetadataFieldTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.common.collect.ImmutableMap; +import java.util.function.Consumer; +import org.junit.Test; + +public final class MetadataFieldTest { + + @Test + public void appendTo_long_encode_works() { + MetadataField l = MetadataField.forLong("long"); + ImmutableMap map = mapFrom(b -> l.appendTo(37L, b)); + assertThat(map).containsEntry("long", "37"); + } + + @Test + public void readFrom_long_decode_null() { + ImmutableMap map = ImmutableMap.of(); + MetadataField l = MetadataField.forLong("long"); + Long read = l.readFrom(map); + assertThat(read).isNull(); + } + + @Test + public void readFrom_long_decode_nonNull() { + ImmutableMap map = ImmutableMap.of("long", "37"); + MetadataField l = MetadataField.forLong("long"); + Long read = l.readFrom(map); + assertThat(read).isNotNull(); + assertThat(read).isEqualTo(37L); + } + + @Test + public void readFrom_long_decode_blobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + MetadataField l = MetadataField.forLong("long"); + Long read = l.readFrom(info); + assertThat(read).isNull(); + } + + @Test + public void readFrom_long_decode_blobInfo_nonNull() { + ImmutableMap map = ImmutableMap.of("long", "37"); + BlobInfo info = BlobInfo.newBuilder("b", "o").setMetadata(map).build(); + MetadataField l = MetadataField.forLong("long"); + Long read = l.readFrom(info); + assertThat(read).isNotNull(); + assertThat(read).isEqualTo(37L); + } + + @Test + public void appendTo_string_encode_works() { + MetadataField l = MetadataField.forString("string"); + ImmutableMap map = mapFrom(b -> l.appendTo("blah", b)); + assertThat(map).containsEntry("string", "blah"); + } + + @Test + public void readFrom_string_decode_null() { + ImmutableMap map = ImmutableMap.of(); + MetadataField l = MetadataField.forString("string"); + String read = l.readFrom(map); + assertThat(read).isNull(); + } + + @Test + public void readFrom_string_decode_nonNull() { + ImmutableMap map = ImmutableMap.of("string", "blah"); + MetadataField l = MetadataField.forString("string"); + String read = l.readFrom(map); + assertThat(read).isNotNull(); + assertThat(read).isEqualTo("blah"); + } + + @Test + public void appendTo_partRange_encode_works() { + MetadataField l = MetadataField.forPartRange("partRange"); + ImmutableMap map = mapFrom(b -> l.appendTo(PartRange.of(37L), b)); + assertThat(map).containsEntry("partRange", "0037-0037"); + } + + @Test + public void readFrom_partRange_decode_null() { + ImmutableMap map = ImmutableMap.of(); + MetadataField l = MetadataField.forPartRange("partRange"); + PartRange read = l.readFrom(map); + assertThat(read).isNull(); + } + + @Test + public void readFrom_partRange_decode_nonNull() { + ImmutableMap map = ImmutableMap.of("partRange", "0037-0037"); + MetadataField l = MetadataField.forPartRange("partRange"); + PartRange read = l.readFrom(map); + assertThat(read).isNotNull(); + assertThat(read).isEqualTo(PartRange.of(37L)); + } + + @Test + public void partRange_handlesNumbersWithMoreThanFourDigits_encode() { + + PartRange r = PartRange.of(0, 123456); + + String encode = r.encode(); + assertThat(encode).isEqualTo("0000-123456"); + } + + @Test + public void partRange_handlesNumbersWithMoreThanFourDigits_decode() { + + PartRange expected = PartRange.of(0, 123456); + + PartRange decode = PartRange.decode("0000-123456"); + assertThat(decode).isEqualTo(expected); + } + + private static ImmutableMap mapFrom( + Consumer> f) { + ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); + f.accept(builder); + return builder.build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java new file mode 100644 index 000000000000..6ccd5c669f64 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java @@ -0,0 +1,724 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ChunkSegmenterTest.TestData.fmt; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.DefaultBufferedWritableByteChannelTest.AuditingBufferHandle; +import com.google.cloud.storage.DefaultBufferedWritableByteChannelTest.CountingWritableByteChannelAdapter; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.WritableByteChannel; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; +import org.slf4j.MarkerFactory; + +public final class MinFlushBufferedWritableByteChannelTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(MinFlushBufferedWritableByteChannelTest.class); + private static final Marker TRACE_ENTER = MarkerFactory.getMarker("enter"); + private static final Marker TRACE_EXIT = MarkerFactory.getMarker("exit"); + + @Example + void edgeCases() { + JqwikTest.report(TypeUsage.of(WriteOps.class), arbitraryWriteOps()); + } + + @Example + void nonBlockingWrite0DoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(0, 1), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_8_3 = ByteBuffer.wrap(all.slice(0, 3).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + + int written3 = c.write(s_8_3); + assertThat(written3).isEqualTo(0); + assertThat(s_8_3.remaining()).isEqualTo(3); + + assertThat(handle.remaining()).isEqualTo(1); + } + + @Example + void nonBlockingWritePartialDoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(6, 5), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(1); + assertThat(s_4_4.remaining()).isEqualTo(3); + assertThat(handle.remaining()).isEqualTo(5); + } + + @Example + void illegalStateExceptionIfWrittenLt0() throws IOException { + BufferHandle handle = BufferHandle.allocate(4); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel( + handle, + new UnbufferedWritableByteChannel() { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + }); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + assertThrows(IllegalStateException.class, () -> c.write(s_0_4)); + } + + @Property + void bufferingEagerlyFlushesWhenFull(@ForAll("WriteOps") WriteOps writeOps) throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(writeOps.bufferSize); + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.handleOf(buffer)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new MinFlushBufferedWritableByteChannel(handle, adapter)) { + + List actualWriteSizes = new ArrayList<>(); + + for (ByteBuffer buf : writeOps.writes) { + int write = c.write(buf); + actualWriteSizes.add(write); + } + + c.close(); + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected write size") + .that(actualWriteSizes) + .isEqualTo(writeOps.writeSizes); + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(writeOps.expectedFlushes); + assertThat(baos.toByteArray()).isEqualTo(writeOps.bytes); + } + } + + /** + * Scenario A: + * + *

Data size, and write size are smaller than buffer size + */ + @Example + void scenario_a() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(1, 2, 1)); + } + + /** Scenario B: Data size and buffer size are equal, while write size may be larger than both */ + @Example + void scenario_b() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(1, 1, 2)); + } + + /** + * Scenario C: + * + *

    + *
  • data size is evenly divisible by buffer size and write size + *
  • buffer size is larger than write size + *
  • buffer size is not evenly divisible by write size + *
+ */ + @Example + void scenario_c() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(105, 15, 7)); + } + + /** + * Scenario D: + * + *
    + *
  • write and buffer size are smaller than data + *
  • data size is not evenly divisible by either write size nor buffer size + *
  • buffer size is smaller than write size + *
  • write size is not evenly divisible by buffer size + *
+ */ + @Example + void scenario_d() throws IOException { + bufferingEagerlyFlushesWhenFull(WriteOps.of(61, 3, 16)); + } + + /** + * Scenario E: + * + *

Some flushes are only partially consumed. Ensure we proceed with consuming the buffer + * provided to {@code write} + * + *

+   *           0                        27
+   * data:    |--------------------------|
+   *               5       14 17        27
+   * writes:  |----|--------|--|---------|
+   *                       14
+   * flush 1: |-------------|
+   *            2          14
+   * flush 2:   |-----------|
+   *                        15     21
+   * flush 3:                |------|
+   *                                21    27
+   * flush 4:                       |------|
+   * 
+ */ + @Example + void partialFlushOfEnqueuedBytesFlushesMultipleTimes() throws IOException { + ByteBuffer data1 = DataGenerator.base64Characters().genByteBuffer(5); + ByteBuffer data2 = DataGenerator.base64Characters().genByteBuffer(9); + ByteBuffer data3 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data4 = DataGenerator.base64Characters().genByteBuffer(10); + + ImmutableList buffers = ImmutableList.of(data1, data2, data3, data4); + + int allDataSize = buffers.stream().mapToInt(ByteBuffer::remaining).sum(); + byte[] allData = + buffers.stream().reduce(ByteBuffer.allocate(allDataSize), ByteBuffer::put).array(); + buffers.forEach(b -> b.position(0)); + + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.allocate(10)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new MinFlushBufferedWritableByteChannel(handle, adapter)) { + + c.write(data1); // write 5 bytes, which should enqueue in full + // before the next write, limit the number of bytes the underlying channel will consume to 2. + adapter.nextWriteMaxConsumptionLimit = 2L; + // write 9 bytes, which should trigger a flush - limited to 2 bytes, leaving 3 bytes in the + // buffer and not consuming any of the 9 bytes. Since 3 + 9 is still larger than our buffer + // attempt another flush of 12 bytes which will all be consumed. + c.write(data2); + + // write 3 bytes, which should enqueue in full + c.write(data3); + // before the next write, limit the number of bytes the underlying channel will consume to 7. + adapter.nextWriteMaxConsumptionLimit = 7L; + // write 10 bytes, which should trigger a flush, consuming all the buffer, but only consuming + // 4 bytes written data. The remaining 6 bytes should be enqueued in full. + c.write(data4); + + // close the channel, causing a flush of the 6 outstanding bytes in buffer. + c.close(); + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(ImmutableList.of(2L, 14L, 21L, 27L)); + assertThat(baos.toByteArray()).isEqualTo(allData); + } + } + + /** + * Ensure manually calling flush works. + * + *
+   *           0         12
+   * data:    |-----------|
+   *             3  6  9
+   * writes:  |--|--|--|--|
+   *             3
+   * flush 1: |--|
+   *             3  6
+   * flush 2:    |--|
+   *               5   10
+   * flush 3:      |----|
+   *                   10 12
+   * flush 4:           |-|
+   * 
+ */ + @Example + void manualFlushingIsAccurate() throws IOException { + ByteBuffer data1 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data2 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data3 = DataGenerator.base64Characters().genByteBuffer(3); + ByteBuffer data4 = DataGenerator.base64Characters().genByteBuffer(3); + + ImmutableList buffers = ImmutableList.of(data1, data2, data3, data4); + + int allDataSize = buffers.stream().mapToInt(ByteBuffer::remaining).sum(); + byte[] allData = + buffers.stream().reduce(ByteBuffer.allocate(allDataSize), ByteBuffer::put).array(); + buffers.forEach(b -> b.position(0)); + + AuditingBufferHandle handle = new AuditingBufferHandle(BufferHandle.allocate(5)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new MinFlushBufferedWritableByteChannel(handle, adapter)) { + + c.write(data1); // write 3 bytes, which should enqueue in full + c.flush(); // flush all enqueued bytes + + c.write(data2); // write 3 bytes, which should enqueue in full + // before we call flush, limit how many bytes the underlying channel will consume to 2. + // This should leave 1 byte in the buffer + adapter.nextWriteMaxConsumptionLimit = 2L; + c.flush(); // attempt to flush all enqueued bytes, however only 2 of the 3 will be consumed + c.write(data3); // write 3 bytes, which should enqueue in full + // after this write, our buffer should contain 4 bytes of its 5 byte capacity + c.write(data4); // all bytes from buffer and data4 should be flushed + + c.close(); // close the channel, nothing should be enqueued so we do not expect another flush + assertThrows(ClosedChannelException.class, () -> c.write(null)); + + assertWithMessage("Unexpected total flushed length") + .that(adapter.writeEndPoints) + .isEqualTo(ImmutableList.of(3L, 5L, 6L, 12L)); + assertThat(baos.toByteArray()).isEqualTo(allData); + } + } + + @Provide("WriteOps") + static Arbitrary arbitraryWriteOps() { + return Combinators.combine( + Arbitraries.integers().between(1, 256 * 1024), + Arbitraries.integers().between(1, 16 * 1024), + Arbitraries.integers().between(1, 64 * 1024)) + .as(WriteOps::of); + } + + /** + * + * + *
+   *           0                                                                                                     105
+   * data:    |--------------------------------------------------------------------------------------------------------|
+   *                 7     14     21     28     35     42     49     56     63     70     77     84     91     98    105
+   * writes:  |------|------|------|------|------|------|------|------|------|------|------|------|------|------|------|
+   *                              21                   42                   63                   84                  105
+   * flushes: |--------------------|--------------------|--------------------|--------------------|--------------------|
+   * 
+ */ + @Example + void writeOpsOfGeneratesAccurately_1() { + int dataSize = 105; + int bufferSize = 15; + int writeSize = 7; + + byte[] bytes = DataGenerator.base64Characters().genBytes(dataSize); + ImmutableList writes = + ImmutableList.of( + ByteBuffer.wrap(bytes, 0, writeSize), + ByteBuffer.wrap(bytes, 7, writeSize), + ByteBuffer.wrap(bytes, 14, writeSize), + ByteBuffer.wrap(bytes, 21, writeSize), + ByteBuffer.wrap(bytes, 28, writeSize), + ByteBuffer.wrap(bytes, 35, writeSize), + ByteBuffer.wrap(bytes, 42, writeSize), + ByteBuffer.wrap(bytes, 49, writeSize), + ByteBuffer.wrap(bytes, 56, writeSize), + ByteBuffer.wrap(bytes, 63, writeSize), + ByteBuffer.wrap(bytes, 70, writeSize), + ByteBuffer.wrap(bytes, 77, writeSize), + ByteBuffer.wrap(bytes, 84, writeSize), + ByteBuffer.wrap(bytes, 91, writeSize), + ByteBuffer.wrap(bytes, 98, writeSize)); + ImmutableList flushes = ImmutableList.of(21L, 42L, 63L, 84L, 105L); + String z = "[0x00000007 * 0x0000000f]"; + WriteOps expected = new WriteOps(bytes, bufferSize, writeSize, writes, flushes, z); + assertThat(WriteOps.of(dataSize, bufferSize, writeSize)).isEqualTo(expected); + } + + /** + * + * + *
+   *           0                                                          61
+   * data:    |------------------------------------------------------------|
+   *                         16         (16) 32         (16) 48      (13) 61
+   * writes:  |---------------|---------------|---------------|------------|
+   *                         16              32              48           61
+   * flushes: |---------------|---------------|---------------|------------|
+   * 
+ */ + @Example + void writeOpsOfGeneratesAccurately_2() { + int dataSize = 61; + int bufferSize = 3; + int writeSize = 16; + byte[] bytes = DataGenerator.base64Characters().genBytes(dataSize); + ImmutableList writes = + ImmutableList.of( + ByteBuffer.wrap(bytes, 0, writeSize), + ByteBuffer.wrap(bytes, 16, writeSize), + ByteBuffer.wrap(bytes, 32, writeSize), + ByteBuffer.wrap(bytes, 48, 13)); + ImmutableList flushes = ImmutableList.of(16L, 32L, 48L, 61L); + String z = "[0x00000010 * 0x00000003, 0x0000000d]"; + WriteOps expected = new WriteOps(bytes, bufferSize, writeSize, writes, flushes, z); + WriteOps actual = WriteOps.of(dataSize, bufferSize, writeSize); + assertThat(actual).isEqualTo(expected); + } + + @Example + @SuppressWarnings("JUnit5AssertionsConverter") + void callingCloseWithBufferedDataShouldCallWriteAndClose() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + AtomicBoolean closed = new AtomicBoolean(false); + UnbufferedWritableByteChannel delegate = + new UnbufferedWritableByteChannel() { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + fail("unexpected write(ByteBuffer[], int, int) call"); + return 0; + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long total = 0; + try (WritableByteChannel out = Channels.newChannel(baos)) { + for (ByteBuffer src : srcs) { + total += out.write(src); + } + } + closed.compareAndSet(false, true); + return total; + } + + @Override + public boolean isOpen() { + return !closed.get(); + } + + @Override + public void close() throws IOException { + fail("unexpected close() call"); + } + }; + BufferedWritableByteChannel test = + new MinFlushBufferedWritableByteChannel(BufferHandle.allocate(20), delegate); + + byte[] bytes = DataGenerator.base64Characters().genBytes(10); + String expected = xxd(bytes); + + int write = test.write(ByteBuffer.wrap(bytes)); + assertThat(write).isEqualTo(10); + + assertThat(closed.get()).isFalse(); + + test.close(); + + String actual = xxd(baos.toByteArray()); + assertThat(actual).isEqualTo(expected); + assertThat(closed.get()).isTrue(); + } + + @Property + void bufferAllocationShouldOnlyHappenWhenNeeded(@ForAll("BufferSizes") WriteOps writeOps) + throws IOException { + AuditingBufferHandle handle = + new AuditingBufferHandle(BufferHandle.allocate(writeOps.bufferSize)); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingWritableByteChannelAdapter adapter = + new CountingWritableByteChannelAdapter(Channels.newChannel(baos)); + BufferedWritableByteChannel c = new MinFlushBufferedWritableByteChannel(handle, adapter)) { + + for (ByteBuffer buf : writeOps.writes) { + c.write(buf); + } + } + + // if our write size is evenly divisible by our buffer size AND our buffer size is smaller + // than the total data size we expect to never allocate a buffer + if (writeOps.writeSize % writeOps.bufferSize == 0 + && writeOps.bufferSize <= writeOps.bytes.length) { + assertThat(handle.getCallCount).isEqualTo(0); + } else { + assertThat(handle.getCallCount).isGreaterThan(0); + } + } + + @Provide("BufferSizes") + static Arbitrary arbitraryBufferSizes() { + return Arbitraries.of( + // expect no allocation + WriteOps.of(32, 4, 16), + WriteOps.of(32, 16, 16), + WriteOps.of(32, 32, 32), + // expect allocation + WriteOps.of(32, 33, 32), + WriteOps.of(32, 64, 4)); + } + + private static final class WriteOps { + private final byte[] bytes; + private final int bufferSize; + private final int writeSize; + private final ImmutableList writeSizes; + private final ImmutableList writes; + private final ImmutableList expectedFlushes; + private final String dbgExpectedWriteSizes; + + public WriteOps( + byte[] bytes, + int bufferSize, + int writeSize, + ImmutableList writes, + ImmutableList expectedFlushes, + String dbgExpectedWriteSizes) { + this.bytes = bytes; + this.bufferSize = bufferSize; + this.writeSize = writeSize; + this.writeSizes = + writes.stream().map(ByteBuffer::remaining).collect(ImmutableList.toImmutableList()); + this.writes = writes; + this.expectedFlushes = expectedFlushes; + this.dbgExpectedWriteSizes = dbgExpectedWriteSizes; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof WriteOps)) { + return false; + } + WriteOps writeOps = (WriteOps) o; + return bufferSize == writeOps.bufferSize + && writeSize == writeOps.writeSize + && Arrays.equals(bytes, writeOps.bytes) + && Objects.equals(writes, writeOps.writes) + && Objects.equals(expectedFlushes, writeOps.expectedFlushes) + && Objects.equals(dbgExpectedWriteSizes, writeOps.dbgExpectedWriteSizes); + } + + @Override + public int hashCode() { + int result = + Objects.hash(bufferSize, writeSize, writes, expectedFlushes, dbgExpectedWriteSizes); + result = 31 * result + Arrays.hashCode(bytes); + return result; + } + + @Override + public String toString() { + return "[WriteOps.of(" + + fmt(bytes.length) + + ", " + + fmt(bufferSize) + + ", " + + fmt(writeSize) + + ")] WriteOps{" + + "bytes.length=" + + fmt(bytes.length) + + ", bufferSize=" + + fmt(bufferSize) + + ", writeSize=" + + fmt(writeSize) + + ", writes.size()=" + + fmt(writes.size()) + + ", expectedFlushes.size()=" + + fmt(expectedFlushes.size()) + + ", expectedWriteSizes=" + + dbgExpectedWriteSizes + + '}'; + } + + @NonNull + static WriteOps of(int numBytes, int bufferSize, int writeSize) { + byte[] bytes = DataGenerator.base64Characters().genBytes(numBytes); + + List writes = new ArrayList<>(); + Deque expectedFlushes = new ArrayDeque<>(); + + int length = bytes.length; + + int fullWriteCount = 0; + int remainingWrite = 0; + int prevWriteEndOffset = 0; + for (int i = 1; i <= length; i++) { + boolean writeBoundary = (i % writeSize == 0) || writeSize == 1; + boolean eof = i == length; + + if (writeBoundary) { + long lastFlush = Optional.ofNullable(expectedFlushes.peekLast()).orElse(0L); + long sinceLastFlush = i - lastFlush; + if (sinceLastFlush >= bufferSize) { + expectedFlushes.addLast((long) i); + } + writes.add(ByteBuffer.wrap(bytes, prevWriteEndOffset, writeSize)); + fullWriteCount++; + prevWriteEndOffset += writeSize; + } + + if (eof) { + // We expect a flush during close in the following scenarios: + // the buffer size is larger than our data size (peekLast == null) + // data size is not evenly divisible by bufferSize + if (expectedFlushes.peekLast() == null || expectedFlushes.peekLast() != length) { + expectedFlushes.addLast((long) length); + } + + // If the data size is not evenly divisible by writeSize we will have an extra + // smaller write + if (prevWriteEndOffset != length) { + int writeLen = Math.min(length - prevWriteEndOffset, writeSize); + writes.add(ByteBuffer.wrap(bytes, prevWriteEndOffset, writeLen)); + remainingWrite = writeLen; + prevWriteEndOffset += writeLen; + } + } + } + + String dbgExpectedWriteSizes; + if (fullWriteCount > 0 && remainingWrite > 0) { + dbgExpectedWriteSizes = + String.format( + Locale.US, + "[%s * %s, %s]", + fmt(writeSize), + fmt(fullWriteCount), + fmt(remainingWrite)); + } else if (remainingWrite > 0) { + dbgExpectedWriteSizes = String.format(Locale.US, "[%s]", fmt(remainingWrite)); + } else { + dbgExpectedWriteSizes = + String.format(Locale.US, "[%s * %s]", fmt(writeSize), fmt(fullWriteCount)); + } + return new WriteOps( + bytes, + bufferSize, + writeSize, + ImmutableList.copyOf(writes), + ImmutableList.copyOf(expectedFlushes), + dbgExpectedWriteSizes); + } + } + + static final class OnlyConsumeNBytes implements UnbufferedWritableByteChannel { + private static final Logger LOGGER = LoggerFactory.getLogger(OnlyConsumeNBytes.class); + private final long bytesToConsume; + private final int consumptionIncrement; + private long bytesConsumed; + + OnlyConsumeNBytes(int bytesToConsume, int consumptionIncrement) { + this.bytesToConsume = bytesToConsume; + this.consumptionIncrement = consumptionIncrement; + this.bytesConsumed = 0; + } + + long getBytesConsumed() { + return bytesConsumed; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + LOGGER.info(TRACE_ENTER, "write(srcs : {}, offset : {}, length : {})", srcs, offset, length); + try { + if (bytesConsumed >= bytesToConsume) { + return 0; + } + + long consumed = 0; + int toConsume = consumptionIncrement; + for (int i = offset; i < length && toConsume > 0; i++) { + ByteBuffer src = srcs[i]; + int remaining = src.remaining(); + int position = src.position(); + int consumable = Math.min(toConsume, remaining); + toConsume -= consumable; + consumed += consumable; + src.position(position + consumable); + } + bytesConsumed += consumed; + return consumed; + } finally { + LOGGER.info(TRACE_EXIT, "write(srcs : {}, offset : {}, length : {})", srcs, offset, length); + } + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bytesToConsume", bytesToConsume) + .add("consumptionIncrement", consumptionIncrement) + .add("bytesConsumed", bytesConsumed) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/NotificationInfoTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/NotificationInfoTest.java new file mode 100644 index 000000000000..d60bd07614a7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/NotificationInfoTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.cloud.storage.NotificationInfo.EventType; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.common.collect.ImmutableMap; +import java.util.Arrays; +import java.util.Map; +import org.junit.Test; + +public class NotificationInfoTest { + private static final String ETAG = "0xFF00"; + private static final String SELF_LINK = "http://storage/b/n"; + private static final String OBJECT_NAME_PREFIX = "index.html"; + private static final String TOPIC = "projects/myProject/topics/topic1"; + private static final Map CUSTOM_ATTRIBUTES = ImmutableMap.of("label1", "value1"); + private static final PayloadFormat PAYLOAD_FORMAT = PayloadFormat.JSON_API_V1.JSON_API_V1; + private static final EventType[] EVENT_TYPES = { + EventType.OBJECT_FINALIZE, EventType.OBJECT_METADATA_UPDATE + }; + private static final NotificationInfo NOTIFICATION_INFO = + NotificationInfo.newBuilder(TOPIC) + .setEtag(ETAG) + .setCustomAttributes(CUSTOM_ATTRIBUTES) + .setSelfLink(SELF_LINK) + .setEventTypes(EVENT_TYPES) + .setObjectNamePrefix(OBJECT_NAME_PREFIX) + .setPayloadFormat(PAYLOAD_FORMAT) + .build(); + + @Test + public void testToBuilder() { + compareBucketsNotification(NOTIFICATION_INFO, NOTIFICATION_INFO.toBuilder().build()); + NotificationInfo notificationInfo = NOTIFICATION_INFO.toBuilder().setTopic(TOPIC).build(); + assertEquals(TOPIC, notificationInfo.getTopic()); + notificationInfo = notificationInfo.toBuilder().setTopic(TOPIC).build(); + compareBucketsNotification(NOTIFICATION_INFO, notificationInfo); + } + + @Test + public void testToBuilderIncomplete() { + NotificationInfo incompleteNotificationInfo = Notification.newBuilder(TOPIC).build(); + compareBucketsNotification( + incompleteNotificationInfo, incompleteNotificationInfo.toBuilder().build()); + } + + @Test + public void testOf() { + NotificationInfo notificationInfo = NotificationInfo.of(TOPIC); + assertEquals(TOPIC, notificationInfo.getTopic()); + assertNull(notificationInfo.getNotificationId()); + assertNull(notificationInfo.getCustomAttributes()); + assertNull(notificationInfo.getEtag()); + assertNull(notificationInfo.getSelfLink()); + assertNull(notificationInfo.getEventTypes()); + assertNull(notificationInfo.getObjectNamePrefix()); + assertNull(notificationInfo.getPayloadFormat()); + } + + @Test + public void testBuilder() { + assertEquals(ETAG, NOTIFICATION_INFO.getEtag()); + assertNull(NOTIFICATION_INFO.getNotificationId()); + assertEquals(SELF_LINK, NOTIFICATION_INFO.getSelfLink()); + assertEquals(OBJECT_NAME_PREFIX, NOTIFICATION_INFO.getObjectNamePrefix()); + assertEquals(PAYLOAD_FORMAT, NOTIFICATION_INFO.getPayloadFormat()); + assertEquals(TOPIC, NOTIFICATION_INFO.getTopic()); + assertEquals(CUSTOM_ATTRIBUTES, NOTIFICATION_INFO.getCustomAttributes()); + assertEquals(Arrays.asList(EVENT_TYPES), NOTIFICATION_INFO.getEventTypes()); + } + + @Test + public void testToPbAndFromPb() { + compareBucketsNotification( + NOTIFICATION_INFO, + Conversions.json() + .notificationInfo() + .decode(Conversions.json().notificationInfo().encode(NOTIFICATION_INFO))); + NotificationInfo notificationInfo = + NotificationInfo.of(TOPIC).toBuilder().setPayloadFormat(PayloadFormat.NONE).build(); + compareBucketsNotification( + notificationInfo, + Conversions.json() + .notificationInfo() + .decode(Conversions.json().notificationInfo().encode(notificationInfo))); + } + + private void compareBucketsNotification(NotificationInfo expected, NotificationInfo actual) { + assertEquals(expected, actual); + assertEquals(expected.getNotificationId(), actual.getNotificationId()); + assertEquals(expected.getCustomAttributes(), actual.getCustomAttributes()); + assertEquals(expected.getEtag(), actual.getEtag()); + assertEquals(expected.getSelfLink(), actual.getSelfLink()); + assertEquals(expected.getEventTypes(), actual.getEventTypes()); + assertEquals(expected.getObjectNamePrefix(), actual.getObjectNamePrefix()); + assertEquals(expected.getPayloadFormat(), actual.getPayloadFormat()); + assertEquals(expected.getTopic(), actual.getTopic()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectOptExtractorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectOptExtractorTest.java new file mode 100644 index 000000000000..06ada97b5c67 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectOptExtractorTest.java @@ -0,0 +1,204 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.UnifiedOpts.Crc32cMatchExtractor; +import com.google.cloud.storage.UnifiedOpts.GenerationMatch; +import com.google.cloud.storage.UnifiedOpts.GenerationMatchExtractor; +import com.google.cloud.storage.UnifiedOpts.GenerationNotMatch; +import com.google.cloud.storage.UnifiedOpts.GenerationNotMatchExtractor; +import com.google.cloud.storage.UnifiedOpts.Md5MatchExtractor; +import com.google.cloud.storage.UnifiedOpts.MetagenerationMatchExtractor; +import com.google.cloud.storage.UnifiedOpts.MetagenerationNotMatchExtractor; +import com.google.cloud.storage.UnifiedOpts.NoOpObjectTargetOpt; +import org.junit.Test; +import org.junit.function.ThrowingRunnable; + +public final class ObjectOptExtractorTest { + + @Test + public void generationMatch_success() { + GenerationMatchExtractor ex = UnifiedOpts.generationMatchExtractor(); + BlobId id = BlobId.of("b", "o", 7654L); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + GenerationMatch expected = UnifiedOpts.generationMatch(7654); + assertThat(ex.extractFromBlobId(id)).isEqualTo(expected); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(expected); + } + + @Test + public void generationMatch_errorOnNull() { + GenerationMatchExtractor ex = UnifiedOpts.generationMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertExceptions( + () -> ex.extractFromBlobId(id), () -> ex.extractFromBlobInfo(info), "ifGenerationMatch"); + } + + @Test + public void generationNotMatch_success() { + GenerationNotMatchExtractor ex = UnifiedOpts.generationNotMatchExtractor(); + BlobId id = BlobId.of("b", "o", 7654L); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + GenerationNotMatch expected = UnifiedOpts.generationNotMatch(7654); + assertThat(ex.extractFromBlobId(id)).isEqualTo(expected); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(expected); + } + + @Test + public void generationNotMatch_errorOnNull() { + GenerationNotMatchExtractor ex = UnifiedOpts.generationNotMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertExceptions( + () -> ex.extractFromBlobId(id), () -> ex.extractFromBlobInfo(info), "ifGenerationNotMatch"); + } + + @Test + public void metagenerationMatch_success_blob() { + MetagenerationMatchExtractor ex = UnifiedOpts.metagenerationMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).setMetageneration(3L).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(UnifiedOpts.metagenerationMatch(3)); + } + + @Test + public void metagenerationMatch_errorOnNull_blob() { + MetagenerationMatchExtractor ex = UnifiedOpts.metagenerationMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + IllegalArgumentException e1 = + assertThrows(IllegalArgumentException.class, () -> ex.extractFromBlobInfo(info)); + assertThat(e1).hasMessageThat().contains("ifMetagenerationMatch"); + } + + @Test + public void metagenerationNotMatch_success_blob() { + MetagenerationNotMatchExtractor ex = UnifiedOpts.metagenerationNotMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).setMetageneration(3L).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(UnifiedOpts.metagenerationNotMatch(3)); + } + + @Test + public void metagenerationNotMatch_errorOnNull_blob() { + MetagenerationNotMatchExtractor ex = UnifiedOpts.metagenerationNotMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + IllegalArgumentException e1 = + assertThrows(IllegalArgumentException.class, () -> ex.extractFromBlobInfo(info)); + assertThat(e1).hasMessageThat().contains("ifMetagenerationNotMatch"); + } + + @Test + public void metagenerationMatch_success_bucket() { + MetagenerationMatchExtractor ex = UnifiedOpts.metagenerationMatchExtractor(); + BucketInfo info = BucketInfo.newBuilder("b").setMetageneration(3L).build(); + + assertThat(ex.extractFromBucketInfo(info)).isEqualTo(UnifiedOpts.metagenerationMatch(3)); + } + + @Test + public void metagenerationMatch_errorOnNull_bucket() { + MetagenerationMatchExtractor ex = UnifiedOpts.metagenerationMatchExtractor(); + BucketInfo info = BucketInfo.newBuilder("b").build(); + + IllegalArgumentException e1 = + assertThrows(IllegalArgumentException.class, () -> ex.extractFromBucketInfo(info)); + assertThat(e1).hasMessageThat().contains("ifMetagenerationMatch"); + } + + @Test + public void metagenerationNotMatch_success_bucket() { + MetagenerationNotMatchExtractor ex = UnifiedOpts.metagenerationNotMatchExtractor(); + BucketInfo info = BucketInfo.newBuilder("b").setMetageneration(3L).build(); + + assertThat(ex.extractFromBucketInfo(info)).isEqualTo(UnifiedOpts.metagenerationNotMatch(3)); + } + + @Test + public void metagenerationNotMatch_errorOnNull_bucket() { + MetagenerationNotMatchExtractor ex = UnifiedOpts.metagenerationNotMatchExtractor(); + BucketInfo info = BucketInfo.newBuilder("b").build(); + + IllegalArgumentException e1 = + assertThrows(IllegalArgumentException.class, () -> ex.extractFromBucketInfo(info)); + assertThat(e1).hasMessageThat().contains("ifMetagenerationNotMatch"); + } + + @Test + public void md5Match_success() { + Md5MatchExtractor ex = UnifiedOpts.md5MatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).setMd5("md5").build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(UnifiedOpts.md5Match("md5")); + } + + @Test + public void md5Match_noOpOnNull() { + Md5MatchExtractor ex = UnifiedOpts.md5MatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void crc32cMatch_success() { + Crc32cMatchExtractor ex = UnifiedOpts.crc32cMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).setCrc32c("crc32c").build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(UnifiedOpts.crc32cMatch("crc32c")); + } + + @Test + public void crc32cMatch_noOpOnNull() { + Crc32cMatchExtractor ex = UnifiedOpts.crc32cMatchExtractor(); + BlobId id = BlobId.of("b", "o"); + BlobInfo info = BlobInfo.newBuilder(id).build(); + + assertThat(ex.extractFromBlobId(id)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + assertThat(ex.extractFromBlobInfo(info)).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + private static void assertExceptions(ThrowingRunnable r1, ThrowingRunnable r2, String contains) { + IllegalArgumentException e1 = assertThrows(IllegalArgumentException.class, r1); + assertThat(e1).hasMessageThat().contains(contains); + IllegalArgumentException e2 = assertThrows(IllegalArgumentException.class, r2); + assertThat(e2).hasMessageThat().contains(contains); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionLifeCycleTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionLifeCycleTest.java new file mode 100644 index 000000000000..2889ebfe51f0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionLifeCycleTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.storage.v2.Object; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public final class ObjectReadSessionLifeCycleTest { + + @Test + public void seekableChannel_closeOnlyExecutesOnce() throws Exception { + + AtomicInteger sessionCloseCount = new AtomicInteger(0); + AtomicInteger closeAlongCount = new AtomicInteger(0); + + ObjectReadSession session = + new ObjectReadSession() { + @Override + public Object getResource() { + return Object.getDefaultInstance(); + } + + @Override + public Projection readAs(ReadProjectionConfig config) { + return null; + } + + @Override + public void close() throws IOException { + sessionCloseCount.getAndIncrement(); + } + }; + ObjectReadSessionSeekableByteChannel channel = + new ObjectReadSessionSeekableByteChannel( + session, + ReadAsSeekableChannel.INSTANCE, + session.andThen(closeAlongCount::getAndIncrement)); + + channel.close(); + channel.close(); + channel.close(); + + assertAll( + () -> assertThat(sessionCloseCount.get()).isEqualTo(1), + () -> assertThat(closeAlongCount.get()).isEqualTo(1)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStateTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStateTest.java new file mode 100644 index 000000000000..2714d177c82d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStateTest.java @@ -0,0 +1,237 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.StreamingRead; +import com.google.cloud.storage.ObjectReadSessionState.OpenArguments; +import com.google.cloud.storage.ObjectReadSessionStreamTest.TestObjectReadSessionStreamRead; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiReadHandle; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.CommonObjectRequestParams; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadRange; +import java.util.List; +import java.util.Map; +import org.junit.Test; + +public final class ObjectReadSessionStateTest { + + @Test + public void getOpenArguments_includesAllRelevantModifications() throws Exception { + BidiReadObjectRequest base = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object") + .setCommonObjectRequestParams( + CommonObjectRequestParams.newBuilder() + .setEncryptionKeyBytes(ByteString.copyFromUtf8("asdf")) + .setEncryptionAlgorithm("SHA-256") + .setEncryptionKeySha256Bytes(ByteString.copyFromUtf8("FDSA")))) + .build(); + + BidiReadObjectResponse resp = + BidiReadObjectResponse.newBuilder() + .setMetadata( + Object.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setName("my-object") + .setGeneration(387) + .setSize(98_765_432)) + .setReadHandle( + BidiReadHandle.newBuilder().setHandle(ByteString.copyFromUtf8("read_handle_1"))) + .build(); + + ObjectReadSessionState state = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + + state.setMetadata(resp.getMetadata()); + state.setBidiReadHandle(resp.getReadHandle()); + + RetryContext neverRetry = RetryContext.neverRetry(); + SettableApiFuture f1 = SettableApiFuture.create(); + SettableApiFuture f2 = SettableApiFuture.create(); + + AccumulatingRead r1 = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.of(3, 4), Hasher.enabled(), neverRetry); + AccumulatingRead r2 = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 2, RangeSpec.of(19, 14), Hasher.enabled(), neverRetry); + + state.putOutstandingRead(1, r1); + state.putOutstandingRead(2, r2); + + OpenArguments expected = + OpenArguments.of( + GrpcCallContext.createDefault() + .withExtraHeaders( + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of("bucket=projects/_/buckets/my-bucket"))), + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object") + .setGeneration(387) + .setCommonObjectRequestParams( + CommonObjectRequestParams.newBuilder() + .setEncryptionKeyBytes(ByteString.copyFromUtf8("asdf")) + .setEncryptionAlgorithm("SHA-256") + .setEncryptionKeySha256Bytes(ByteString.copyFromUtf8("FDSA"))) + .setReadHandle( + BidiReadHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("read_handle_1")))) + .addReadRanges( + ReadRange.newBuilder().setReadId(1).setReadOffset(3).setReadLength(4).build()) + .addReadRanges( + ReadRange.newBuilder().setReadId(2).setReadOffset(19).setReadLength(14).build()) + .build()); + + OpenArguments actual = state.getOpenArguments(); + assertAll( + () -> assertThat(actual.getReq()).isEqualTo(expected.getReq()), + () -> + assertThat(actual.getCtx().getExtraHeaders()) + .isEqualTo(expected.getCtx().getExtraHeaders())); + } + + @Test + public void redirectTokenPresentInHeadersIfNonNull() { + BidiReadObjectRequest base = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object")) + .build(); + + ObjectReadSessionState state = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + + state.setRoutingToken("token-1"); + + OpenArguments openArguments = state.getOpenArguments(); + GrpcCallContext ctx = openArguments.getCtx(); + Map> extraHeaders = ctx.getExtraHeaders(); + Map> expected = + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of("bucket=projects/_/buckets/my-bucket&routing_token=token-1")); + + assertThat(extraHeaders).isEqualTo(expected); + } + + @Test + public void redirectTokenNotPresentInHeadersIfNull() { + BidiReadObjectRequest base = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object")) + .build(); + + ObjectReadSessionState state = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + + state.setRoutingToken(null); + + OpenArguments openArguments = state.getOpenArguments(); + GrpcCallContext ctx = openArguments.getCtx(); + Map> extraHeaders = ctx.getExtraHeaders(); + Map> expected = + ImmutableMap.of( + "x-goog-request-params", ImmutableList.of("bucket=projects/_/buckets/my-bucket")); + + assertThat(extraHeaders).isEqualTo(expected); + } + + @Test + public void redirectTokenMustNotBeUrlEncoded() { + BidiReadObjectRequest base = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object")) + .build(); + + ObjectReadSessionState state = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + + state.setRoutingToken("token%20with%2furl%20encoding"); + + OpenArguments openArguments = state.getOpenArguments(); + GrpcCallContext ctx = openArguments.getCtx(); + Map> extraHeaders = ctx.getExtraHeaders(); + Map> expected = + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of( + "bucket=projects/_/buckets/my-bucket&routing_token=token%20with%2furl%20encoding")); + + assertThat(extraHeaders).isEqualTo(expected); + } + + @Test + public void canHandleNewRead() throws Exception { + BidiReadObjectRequest base = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/my-bucket") + .setObject("my-object")) + .build(); + + ObjectReadSessionState state1 = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + ObjectReadSessionState state2 = + new ObjectReadSessionState(GrpcCallContext.createDefault(), base); + + state1.putOutstandingRead(1, TestObjectReadSessionStreamRead.of()); + state2.putOutstandingRead( + 3, + ObjectReadSessionStreamRead.streamingRead( + 3, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry())); + + try (AccumulatingRead bytes = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 2, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + StreamingRead streaming2 = + ObjectReadSessionStreamRead.streamingRead( + 4, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry())) { + assertAll( + () -> assertThat(state1.canHandleNewRead(TestObjectReadSessionStreamRead.of())).isTrue(), + () -> assertThat(state1.canHandleNewRead(bytes)).isFalse(), + () -> assertThat(state2.canHandleNewRead(streaming2)).isFalse()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamReadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamReadTest.java new file mode 100644 index 000000000000..9db08fd638bc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamReadTest.java @@ -0,0 +1,566 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.StreamingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.ZeroCopyByteStringAccumulatingRead; +import com.google.cloud.storage.ObjectReadSessionStreamTest.TestObjectReadSessionStreamRead; +import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratingObjectReadSessionStreamRead; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.google.storage.v2.ReadRange; +import io.opentelemetry.api.trace.Span; +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ScatteringByteChannel; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import org.junit.Test; + +public final class ObjectReadSessionStreamReadTest { + + @Test + public void byteArrayAccumulatingRead_happyPath() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + byte[] genBytes = DataGenerator.base64Characters().genBytes(137); + ByteString byteString = UnsafeByteOperations.unsafeWrap(genBytes); + AtomicBoolean closed = new AtomicBoolean(false); + Closeable close = () -> closed.set(true); + ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create(byteString, close); + ResponseContentLifecycleHandle.ChildRef childRef = + handle.borrow(Function.identity()); + handle.close(); + + AccumulatingRead byteArrayAccumulatingRead = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry()); + + byteArrayAccumulatingRead.accept(childRef); + byteArrayAccumulatingRead.eof(); + + String expectedBytes = xxd(genBytes); + + byte[] actualFutureBytes = byteArrayAccumulatingRead.get(1, TimeUnit.SECONDS); + assertThat(xxd(actualFutureBytes)).isEqualTo(expectedBytes); + assertThat(closed.get()).isTrue(); + } + + @Test + public void byteArrayAccumulatingRead_childRef_close_ioException_propagated() throws IOException { + byte[] genBytes = DataGenerator.base64Characters().genBytes(137); + ByteString byteString = UnsafeByteOperations.unsafeWrap(genBytes); + Closeable throwOnClose = + () -> { + throw new IOException(new Kaboom()); + }; + ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create(byteString, throwOnClose); + ResponseContentLifecycleHandle.ChildRef childRef = + handle.borrow(Function.identity()); + handle.close(); + + AccumulatingRead byteArrayAccumulatingRead = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry()); + + IOException ioException = + assertThrows( + IOException.class, + () -> { + byteArrayAccumulatingRead.accept(childRef); + byteArrayAccumulatingRead.eof(); + }); + assertThat(ioException).hasCauseThat().isInstanceOf(Kaboom.class); + } + + @Test + public void byteArrayAccumulatingRead_producesAnAccurateReadRange() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + int readId = 1; + try (AccumulatingRead read = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + readId, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + + ReadRange readRange1 = read.makeReadRange(); + ReadRange expectedReadRange1 = + ReadRange.newBuilder().setReadId(readId).setReadOffset(0).setReadLength(137).build(); + assertThat(readRange1).isEqualTo(expectedReadRange1); + + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes2 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes3 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes1)) { + read.accept(handle.borrow(Function.identity())); + } + + ReadRange readRange2 = read.makeReadRange(); + ReadRange expectedReadRange2 = + ReadRange.newBuilder().setReadId(readId).setReadOffset(64).setReadLength(73).build(); + assertThat(readRange2).isEqualTo(expectedReadRange2); + + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes2)) { + read.accept(handle.borrow(Function.identity())); + } + + ReadRange readRange3 = read.makeReadRange(); + ReadRange expectedReadRange3 = + ReadRange.newBuilder().setReadId(readId).setReadOffset(128).setReadLength(9).build(); + assertThat(readRange3).isEqualTo(expectedReadRange3); + + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes3)) { + read.accept(handle.borrow(Function.identity())); + read.eof(); + } + + ReadRange readRange4 = read.makeReadRange(); + ReadRange expectedReadRange4 = + ReadRange.newBuilder().setReadId(readId).setReadOffset(137).setReadLength(0).build(); + assertThat(readRange4).isEqualTo(expectedReadRange4); + + byte[] actualBytes = read.get(1, TimeUnit.SECONDS); + assertThat(xxd(actualBytes)).isEqualTo(xxd(DataGenerator.base64Characters().genBytes(137))); + } + } + + @Test + public void streamingRead_producesAnAccurateReadRange() throws Exception { + int readId = 1; + ExecutorService exec = Executors.newSingleThreadExecutor(); + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + readId, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + ByteBuffer buffer = ByteBuffer.allocate(512); + + Future f = + exec.submit( + () -> { + try { + return Buffers.fillFrom(buffer, read); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes2 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes3 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes1)) { + read.accept(handle.borrow(Function.identity())); + } + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes2)) { + read.accept(handle.borrow(Function.identity())); + } + try (ResponseContentLifecycleHandle handle = noopContentHandle(bytes3)) { + read.accept(handle.borrow(Function.identity())); + read.eof(); + } + + int copied = f.get(5, TimeUnit.SECONDS); + assertAll( + () -> assertThat(copied).isEqualTo(137), + () -> + assertThat(xxd(buffer)) + .isEqualTo(xxd(DataGenerator.base64Characters().genBytes(137)))); + } finally { + exec.shutdownNow(); + } + } + + @Test + public void streamingRead_shouldNotBlockWaitingForMessages() throws Exception { + int readId = 1; + ExecutorService exec = Executors.newSingleThreadExecutor(); + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes2 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(64)); + ByteString bytes3 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + + CountDownLatch accept1 = new CountDownLatch(1); + CountDownLatch accept2 = new CountDownLatch(1); + CountDownLatch accept3 = new CountDownLatch(1); + CountDownLatch eof = new CountDownLatch(1); + + CountDownLatch accept1Ack = new CountDownLatch(1); + CountDownLatch accept2Ack = new CountDownLatch(1); + CountDownLatch accept3Ack = new CountDownLatch(1); + CountDownLatch eofAck = new CountDownLatch(1); + + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + readId, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + + Future f = + exec.submit( + () -> { + try { + accept1.await(); + try (ResponseContentLifecycleHandle handle = + noopContentHandle(bytes1)) { + read.accept(handle.borrow(Function.identity())); + } + accept1Ack.countDown(); + accept2.await(); + try (ResponseContentLifecycleHandle handle = + noopContentHandle(bytes2)) { + read.accept(handle.borrow(Function.identity())); + } + accept2Ack.countDown(); + accept3.await(); + try (ResponseContentLifecycleHandle handle = + noopContentHandle(bytes3)) { + read.accept(handle.borrow(Function.identity())); + } + accept3Ack.countDown(); + eof.await(); + read.eof(); + eofAck.countDown(); + return null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + ByteBuffer buffer = ByteBuffer.allocate(512); + + int read0 = read.read(buffer); + assertThat(read0).isEqualTo(0); + + accept1.countDown(); + accept1Ack.await(); + int read1 = read.read(buffer); + assertThat(read1).isEqualTo(64); + + accept2.countDown(); + accept2Ack.await(); + int read2 = read.read(buffer); + assertThat(read2).isEqualTo(64); + + accept3.countDown(); + accept3Ack.await(); + int read3 = read.read(buffer); + assertThat(read3).isEqualTo(9); + + eof.countDown(); + eofAck.await(); + int read4 = read.read(buffer); + assertThat(read4).isEqualTo(-1); + + f.get(5, TimeUnit.SECONDS); + + assertThat(xxd(buffer)).isEqualTo(xxd(DataGenerator.base64Characters().genBytes(137))); + } finally { + accept1.countDown(); + accept2.countDown(); + accept3.countDown(); + eof.countDown(); + exec.shutdownNow(); + } + } + + @Test + public void streamingRead_fail() throws IOException { + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + read.fail(new Kaboom()); + + IOException ioe = assertThrows(IOException.class, () -> read.read(ByteBuffer.allocate(1))); + + assertThat(ioe).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ioe).hasCauseThat().hasCauseThat().isInstanceOf(Kaboom.class); + } + } + + @Test + public void streamingRead_closedChannelException() throws IOException { + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + read.close(); + assertThat(read.isOpen()).isFalse(); + + assertThrows(ClosedChannelException.class, () -> read.read(ByteBuffer.allocate(1))); + } + } + + @Test + public void streamingRead_leftoversAreOnlyClearedWhenFullyConsumed() throws Exception { + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(62)); + AtomicBoolean bytes1Close = new AtomicBoolean(false); + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes1, () -> bytes1Close.compareAndSet(false, true))) { + read.accept(handle.borrow(Function.identity())); + } + + ByteBuffer buf = ByteBuffer.allocate(512); + ByteBuffer slice1 = (ByteBuffer) buf.slice().limit(16); + assertThat(read.read(slice1)).isEqualTo(16); + buf.position(buf.position() + 16); + ByteBuffer slice2 = (ByteBuffer) buf.slice().limit(16); + assertThat(read.read(slice2)).isEqualTo(16); + buf.position(buf.position() + 16); + ByteBuffer slice3 = (ByteBuffer) buf.slice().limit(16); + assertThat(read.read(slice3)).isEqualTo(16); + buf.position(buf.position() + 16); + ByteBuffer slice4 = (ByteBuffer) buf.slice().limit(14); + assertThat(read.read(slice4)).isEqualTo(14); + buf.position(buf.position() + 14); + + assertAll( + () -> assertThat(bytes1Close.get()).isTrue(), + () -> assertThat(xxd(buf)).isEqualTo(xxd(bytes1.toByteArray()))); + } + } + + @Test + public void streamingRead_eofShouldBeReturnedIfNoOtherBytesRead() throws Exception { + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + read.eof(); + assertThat(read.read(ByteBuffer.allocate(1))).isEqualTo(-1); + assertThat(read.isOpen()).isTrue(); + read.close(); + assertThat(read.isOpen()).isFalse(); + } + } + + @Test + public void streamingRead_leftoversAreClosedIfNonNullAndStreamClosed() throws Exception { + try (StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(62)); + AtomicBoolean bytes1Close = new AtomicBoolean(false); + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes1, () -> bytes1Close.compareAndSet(false, true))) { + read.accept(handle.borrow(Function.identity())); + } + + ByteBuffer buf = ByteBuffer.allocate(1); + read.read(buf); // load into leftovers + read.close(); + + assertAll(() -> assertThat(bytes1Close.get()).isTrue()); + } + } + + @Test + public void streamingRead_withNewReadIdDoesNotOrphanAnyData() throws Exception { + try (StreamingRead read1 = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry())) { + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(62)); + AtomicBoolean bytes1Close = new AtomicBoolean(false); + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes1, () -> bytes1Close.compareAndSet(false, true))) { + read1.accept(handle.borrow(Function.identity())); + } + + ByteBuffer buf = ByteBuffer.allocate(512); + // read some bytes, causing leftovers to be populated + ByteBuffer slice1 = (ByteBuffer) buf.slice().limit(16); + assertThat(read1.read(slice1)).isEqualTo(16); + buf.position(buf.position() + 16); + ByteBuffer slice2 = (ByteBuffer) buf.slice().limit(16); + assertThat(read1.read(slice2)).isEqualTo(16); + buf.position(buf.position() + 16); + + // update read id (like would happen during a retry) + StreamingRead read2 = read1.withNewReadId(2); + assertThat(read2).isSameInstanceAs(read1); + + // make sure we can read from both read1 and read 2 + ByteBuffer slice3 = (ByteBuffer) buf.slice().limit(16); + assertThat(read1.read(slice3)).isEqualTo(16); + buf.position(buf.position() + 16); + ByteBuffer slice4 = (ByteBuffer) buf.slice().limit(14); + assertThat(read2.read(slice4)).isEqualTo(14); + buf.position(buf.position() + 14); + + assertAll( + () -> assertThat(bytes1Close.get()).isTrue(), + () -> assertThat(xxd(buf)).isEqualTo(xxd(bytes1.toByteArray()))); + } + } + + @Test + public void canShareStreamWith() throws Exception { + try (AccumulatingRead bytes = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + ZeroCopyByteStringAccumulatingRead byteString = + ObjectReadSessionStreamRead.createZeroCopyByteStringAccumulatingRead( + 2, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + StreamingRead streamingRead = + ObjectReadSessionStreamRead.streamingRead( + 3, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry())) { + assertAll( + () -> assertThat(bytes.canShareStreamWith(byteString)).isTrue(), + () -> assertThat(byteString.canShareStreamWith(bytes)).isTrue(), + () -> assertThat(byteString.canShareStreamWith(streamingRead)).isFalse(), + () -> assertThat(bytes.canShareStreamWith(streamingRead)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(byteString)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(bytes)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(streamingRead)).isFalse()); + } + } + + @Test + public void canShareStreamWith_otelDecorated() throws Exception { + try (OtelDecoratingObjectReadSessionStreamRead> bytes = + new OtelDecoratingObjectReadSessionStreamRead<>( + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()), + Span.getInvalid()); + OtelDecoratingObjectReadSessionStreamRead> byteString = + new OtelDecoratingObjectReadSessionStreamRead<>( + ObjectReadSessionStreamRead.createZeroCopyByteStringAccumulatingRead( + 2, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()), + Span.getInvalid()); + OtelDecoratingObjectReadSessionStreamRead streamingRead = + new OtelDecoratingObjectReadSessionStreamRead<>( + ObjectReadSessionStreamRead.streamingRead( + 3, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()), + Span.getInvalid())) { + assertAll( + () -> assertThat(bytes.canShareStreamWith(byteString)).isTrue(), + () -> assertThat(byteString.canShareStreamWith(bytes)).isTrue(), + () -> assertThat(byteString.canShareStreamWith(streamingRead)).isFalse(), + () -> assertThat(bytes.canShareStreamWith(streamingRead)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(byteString)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(bytes)).isFalse(), + () -> assertThat(streamingRead.canShareStreamWith(streamingRead)).isFalse()); + } + } + + @Test + public void onCloseCallbackIsCalled() throws IOException { + final AtomicBoolean closed = new AtomicBoolean(false); + + try (TestObjectReadSessionStreamRead read = TestObjectReadSessionStreamRead.of()) { + read.setOnCloseCallback(() -> closed.set(true)); + } + + assertThat(closed.get()).isTrue(); + } + + @Test + public void onCloseCallbackIsCalled_evenIfThrown() throws Exception { + final AtomicBoolean closed = new AtomicBoolean(false); + + TestObjectReadSessionStreamRead read = + new TestObjectReadSessionStreamRead(1, RangeSpec.all(), RetryContext.neverRetry()) { + @Override + public void internalClose() throws IOException { + throw new IOException("Kaboom"); + } + }; + read.setOnCloseCallback(() -> closed.set(true)); + + IOException ioException = assertThrows(IOException.class, read::close); + + assertAll( + () -> assertThat(ioException).hasMessageThat().isEqualTo("Kaboom"), + () -> assertThat(closed.get()).isTrue()); + } + + @Test + public void accumulating_futureCancel_disposes() throws IOException { + byte[] genBytes = DataGenerator.base64Characters().genBytes(137); + ByteString byteString = UnsafeByteOperations.unsafeWrap(genBytes); + AtomicBoolean closed = new AtomicBoolean(false); + Closeable close = () -> closed.set(true); + ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create(byteString, close); + ResponseContentLifecycleHandle.ChildRef childRef = + handle.borrow(Function.identity()); + handle.close(); + + AccumulatingRead byteArrayAccumulatingRead = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.of(0, 137), Hasher.enabled(), RetryContext.neverRetry()); + + byteArrayAccumulatingRead.accept(childRef); + + byteArrayAccumulatingRead.cancel(true); + + assertThat(closed.get()).isTrue(); + } + + @Test + public void projections() throws Exception { + assertAll( + () -> { + AccumulatingRead read = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + ApiFuture projected = read.project(); + assertThat(projected).isSameInstanceAs(read); + }, + () -> { + StreamingRead read = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + UnbufferedReadableByteChannel projected = read.project(); + assertThat(projected).isSameInstanceAs(read); + }, + () -> { + AccumulatingRead read = + ObjectReadSessionStreamRead.createZeroCopyByteStringAccumulatingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry()); + ApiFuture projected = read.project(); + assertThat(projected).isSameInstanceAs(read); + }); + } + + private static ResponseContentLifecycleHandle noopContentHandle( + ByteString byteString) { + return ResponseContentLifecycleHandle.create(byteString, () -> {}); + } + + private static final class Kaboom extends RuntimeException { + private Kaboom() { + super("Kaboom!!!"); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamTest.java new file mode 100644 index 000000000000..75fec2cb2de6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionStreamTest.java @@ -0,0 +1,391 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.NanoClock; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ClientStreamReadyObserver; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.AccumulatingRead; +import com.google.cloud.storage.BaseObjectReadSessionStreamRead.StreamingRead; +import com.google.cloud.storage.GrpcUtils.ZeroCopyBidiStreamingCallable; +import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.cloud.storage.RetryContext.RetryContextProvider; +import com.google.cloud.storage.RetryContextTest.BlockingOnSuccess; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.protobuf.ByteString; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.Object; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public final class ObjectReadSessionStreamTest { + + private static final Object METADATA = + Object.newBuilder() + .setBucket(BucketName.format("_", "b")) + .setName("o") + .setGeneration(1) + .setSize(_2MiB) + .build(); + private static final BidiReadObjectRequest REQ_OPEN = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .build()) + .build(); + + private static ScheduledExecutorService exec; + + private final RetrySettings retrySettings = RetrySettings.newBuilder().build(); + private final ObjectReadSessionState state = + new ObjectReadSessionState(GrpcCallContext.createDefault(), REQ_OPEN); + private final RetryContextProvider retryContextProvider = + () -> + RetryContext.of( + exec, + RetryingDependencies.simple(NanoClock.getDefaultClock(), retrySettings), + Retrying.alwaysRetry(), + Jitterer.noJitter()); + private final ZeroCopyBidiStreamingCallable + callable = + new ZeroCopyBidiStreamingCallable<>( + new BidiStreamingCallable() { + @Override + public ClientStream internalCall( + ResponseObserver responseObserver, + ClientStreamReadyObserver onReady, + ApiCallContext context) { + return new ClientStream() { + @Override + public void send(BidiReadObjectRequest request) {} + + @Override + public void closeSendWithError(Throwable t) {} + + @Override + public void closeSend() { + responseObserver.onComplete(); + } + + @Override + public boolean isSendReady() { + return true; + } + }; + } + }, + ResponseContentLifecycleManager.noopBidiReadObjectResponse()); + + @BeforeClass + public static void beforeClass() { + exec = Executors.newSingleThreadScheduledExecutor(); + } + + @AfterClass + public static void afterClass() throws Exception { + if (exec != null) { + exec.shutdownNow(); + assertThat(exec.awaitTermination(5, TimeUnit.SECONDS)).isTrue(); + } + } + + @Test + public void streamRestartShouldNotSendARequestIfAllReadsAreInBackoff() { + RetryContext read1RetryContext = retryContextProvider.create(); + TestObjectReadSessionStreamRead read1 = + new TestObjectReadSessionStreamRead(1, RangeSpec.of(1, 2), read1RetryContext); + state.putOutstandingRead(1, read1); + + RetryContext streamRetryContext = retryContextProvider.create(); + try (ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, streamRetryContext)) { + BlockingOnSuccess blockingOnSuccess = new BlockingOnSuccess(); + read1RetryContext.recordError( + new RuntimeException("read1err"), blockingOnSuccess, RetryContextTest.failOnFailure()); + + stream.restart(); + blockingOnSuccess.release(); + } + } + + @Test + public void streamRestartShouldSendARequestIfReadsAreNotInBackoff() { + RetryContext read1RetryContext = retryContextProvider.create(); + TestObjectReadSessionStreamRead read1 = + new TestObjectReadSessionStreamRead(1, RangeSpec.of(1, 2), read1RetryContext); + read1.readyToSend = true; + state.putOutstandingRead(1, read1); + + RetryContext streamRetryContext = retryContextProvider.create(); + try (ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, streamRetryContext)) { + stream.restart(); + } + } + + @Test + public void attemptingToRestartStreamThatIsAlreadyActiveThrows() { + + RetryContext streamRetryContext = retryContextProvider.create(); + try (ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, streamRetryContext)) { + stream.send(REQ_OPEN); + + IllegalStateException ise = assertThrows(IllegalStateException.class, stream::restart); + assertThat(ise).hasMessageThat().contains("already active"); + } + } + + @Test + public void sendErrorsIfNotOpen() throws Exception { + + RetryContext streamRetryContext = retryContextProvider.create(); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, streamRetryContext); + assertThat(stream.isOpen()).isTrue(); + stream.close(); + + assertAll( + () -> { + IllegalStateException ise = + assertThrows(IllegalStateException.class, () -> stream.send(REQ_OPEN)); + assertThat(ise).hasMessageThat().isEqualTo("Stream closed"); + }, + () -> assertThat(stream.isOpen()).isFalse()); + } + + @Test + public void closingShouldFailPendingReads() throws Exception { + + TestObjectReadSessionStreamRead read1 = TestObjectReadSessionStreamRead.of(); + TestObjectReadSessionStreamRead read2 = TestObjectReadSessionStreamRead.of(); + TestObjectReadSessionStreamRead read3 = TestObjectReadSessionStreamRead.of(); + state.putOutstandingRead(read1.readId, read1); + state.putOutstandingRead(read2.readId, read2); + state.putOutstandingRead(read3.readId, read3); + + RetryContext streamRetryContext = retryContextProvider.create(); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, streamRetryContext); + ApiFuture closeAsync = stream.closeAsync(); + TestUtils.await(closeAsync, 5, TimeUnit.SECONDS); + + assertAll( + () -> { + Throwable t1 = read1.fail.get(2, TimeUnit.SECONDS); + // t1.printStackTrace(System.out); + assertThat(t1).isInstanceOf(StorageException.class); + assertThat(t1).hasCauseThat().isInstanceOf(AsyncSessionClosedException.class); + }, + () -> { + Throwable t2 = read2.fail.get(2, TimeUnit.SECONDS); + // t2.printStackTrace(System.err); + assertThat(t2).isInstanceOf(StorageException.class); + assertThat(t2).hasCauseThat().isInstanceOf(AsyncSessionClosedException.class); + }, + () -> { + Throwable t3 = read3.fail.get(2, TimeUnit.SECONDS); + // t3.printStackTrace(System.out); + assertThat(t3).isInstanceOf(StorageException.class); + assertThat(t3).hasCauseThat().isInstanceOf(AsyncSessionClosedException.class); + }); + } + + @Test + public void streamingRead_mustCloseQueuedResponsesWhenFailed() throws Exception { + try (StreamingRead read1 = + ObjectReadSessionStreamRead.streamingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry())) { + state.putOutstandingRead(1, read1); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, RetryContext.neverRetry()); + + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + ByteString bytes2 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + + AtomicBoolean bytes1Close = new AtomicBoolean(false); + AtomicBoolean bytes2Close = new AtomicBoolean(false); + + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes1, () -> bytes1Close.compareAndSet(false, true))) { + read1.accept(handle.borrow(Function.identity())); + } + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes2, () -> bytes2Close.compareAndSet(false, true))) { + read1.accept(handle.borrow(Function.identity())); + } + + // read some bytes, causing leftovers to be populated + read1.read(ByteBuffer.allocate(1)); + stream.close(); + + // call read again to observe the async close that happens + IOException ioe = assertThrows(IOException.class, () -> read1.read(ByteBuffer.allocate(32))); + + assertAll( + () -> assertThat(bytes1Close.get()).isTrue(), + () -> assertThat(bytes2Close.get()).isTrue(), + () -> assertThat(read1.acceptingBytes()).isFalse(), + () -> assertThat(ioe).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(ioe).hasCauseThat().hasMessageThat().contains("Parent stream shutdown")); + } + } + + @Test + public void accumulatingRead_mustCloseQueuedResponsesWhenFailed() throws Exception { + try (AccumulatingRead read1 = + ObjectReadSessionStreamRead.createByteArrayAccumulatingRead( + 1, RangeSpec.all(), Hasher.enabled(), RetryContext.neverRetry())) { + state.putOutstandingRead(1, read1); + ObjectReadSessionStream stream = + ObjectReadSessionStream.create(exec, callable, state, RetryContext.neverRetry()); + + ByteString bytes1 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + ByteString bytes2 = ByteString.copyFrom(DataGenerator.base64Characters().genBytes(9)); + + AtomicBoolean bytes1Close = new AtomicBoolean(false); + AtomicBoolean bytes2Close = new AtomicBoolean(false); + + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes1, () -> bytes1Close.compareAndSet(false, true))) { + read1.accept(handle.borrow(Function.identity())); + } + try (ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create( + bytes2, () -> bytes2Close.compareAndSet(false, true))) { + read1.accept(handle.borrow(Function.identity())); + } + + stream.close(); + + StorageException se = + assertThrows(StorageException.class, () -> TestUtils.await(read1, 2, TimeUnit.SECONDS)); + assertAll( + () -> assertThat(bytes1Close.get()).isTrue(), + () -> assertThat(bytes2Close.get()).isTrue(), + () -> assertThat(read1.acceptingBytes()).isFalse(), + () -> assertThat(se).hasMessageThat().contains("Parent stream shutdown"), + () -> assertThat(se).hasCauseThat().isInstanceOf(AsyncSessionClosedException.class)); + } + } + + static class TestObjectReadSessionStreamRead + extends BaseObjectReadSessionStreamRead { + + private static final AtomicLong readIdSeq = new AtomicLong(1); + protected final long readId; + private boolean readyToSend = false; + private final SettableApiFuture fail = SettableApiFuture.create(); + + TestObjectReadSessionStreamRead(long readId, RangeSpec rangeSpec, RetryContext retryContext) { + super( + rangeSpec, + new AtomicLong(rangeSpec.begin()), + retryContext, + IOAutoCloseable.noOp(), + false); + this.readId = readId; + } + + @Override + long readId() { + return readId; + } + + @Override + public java.lang.Object project() { + return this; + } + + @Override + public boolean acceptingBytes() { + return false; + } + + @Override + public void accept(ChildRef childRef) {} + + @Override + public void eof() {} + + @Override + public ApiFuture fail(Throwable t) { + fail.set(t); + return fail; + } + + @Override + public Hasher hasher() { + return Hasher.enabled(); + } + + @Override + public TestObjectReadSessionStreamRead withNewReadId(long newReadId) { + return null; + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) {} + + @Override + public boolean readyToSend() { + return readyToSend; + } + + @Override + public void internalClose() throws IOException {} + + static TestObjectReadSessionStreamRead of() { + long id = readIdSeq.getAndIncrement(); + return new TestObjectReadSessionStreamRead( + id, RangeSpec.of(0, 10), RetryContext.neverRetry()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionTest.java new file mode 100644 index 000000000000..22071173805e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ObjectReadSessionTest.java @@ -0,0 +1,167 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.ObjectReadSessionImpl.ConcurrentIdentityMap; +import com.google.common.base.MoreObjects; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public final class ObjectReadSessionTest { + private static final AtomicInteger vCounter = new AtomicInteger(1); + + private static ListeningExecutorService exec; + + @BeforeClass + public static void beforeClass() { + exec = + MoreExecutors.listeningDecorator( + Executors.newFixedThreadPool(2, new ThreadFactoryBuilder().setDaemon(true).build())); + } + + @AfterClass + public static void afterClass() { + exec.shutdownNow(); + } + + @Test + public void concurrentIdentityMap_basic() throws Exception { + ConcurrentIdentityMap map = new ConcurrentIdentityMap<>(); + + map.put(new Key("k1"), new Value()); + map.put(new Key("k2"), new Value()); + map.put(new Key("k3"), new Value()); + map.put(new Key("k4"), new Value()); + + List strings = map.drainEntries((k, v) -> String.format("%s -> %s", k, v)); + assertThat(strings).hasSize(4); + + String joined = String.join("\n", strings); + assertAll( + () -> assertThat(joined).contains("k1"), + () -> assertThat(joined).contains("k2"), + () -> assertThat(joined).contains("k3"), + () -> assertThat(joined).contains("k4")); + } + + @Test + public void concurrentIdentityMap_multipleThreadsAdding() throws Exception { + ConcurrentIdentityMap map = new ConcurrentIdentityMap<>(); + + CountDownLatch cdl = new CountDownLatch(1); + map.put(new Key("t1k1"), new Value()); + map.put(new Key("t1k2"), new Value()); + + ListenableFuture submitted = + exec.submit( + () -> { + try { + boolean await = cdl.await(3, TimeUnit.SECONDS); + assertThat(await).isTrue(); + map.put(new Key("t2k1"), new Value()); + return true; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + BiFunction f = + (k, v) -> { + cdl.countDown(); + return String.format("%s -> %s", k, v); + }; + List strings = map.drainEntries(f); + assertThat(strings).hasSize(2); + String joined = String.join("\n", strings); + assertAll(() -> assertThat(joined).contains("t1k1"), () -> assertThat(joined).contains("t1k2")); + + submitted.get(1, TimeUnit.SECONDS); + List drain2 = map.drainEntries(f); + assertThat(drain2).hasSize(1); + } + + @Test + public void concurrentIdentityMap_removeAfterDrainClean() throws Exception { + ConcurrentIdentityMap map = new ConcurrentIdentityMap<>(); + + CountDownLatch cdl = new CountDownLatch(1); + map.put(new Key("t1k1"), new Value()); + Key t1k2 = new Key("t1k2"); + map.put(t1k2, new Value()); + + ListenableFuture submit = + exec.submit( + () -> { + try { + boolean await = cdl.await(3, TimeUnit.SECONDS); + assertThat(await).isTrue(); + map.remove(t1k2); + return true; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + BiFunction f = + (k, v) -> { + cdl.countDown(); + return String.format("%s -> %s", k, v); + }; + List strings = map.drainEntries(f); + assertThat(strings).hasSize(2); + String joined = String.join("\n", strings); + assertAll(() -> assertThat(joined).contains("t1k1"), () -> assertThat(joined).contains("t1k2")); + + assertThat(submit.get(1, TimeUnit.SECONDS)).isEqualTo(true); + } + + private static final class Key { + private final String k; + + private Key(String k) { + this.k = k; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("k", k).toString(); + } + } + + private static final class Value { + private final String v = String.format("v/%d", vCounter.getAndIncrement()); + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("v", v).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtilsTest.java new file mode 100644 index 000000000000..5e93c95a988b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/OpenTelemetryBootstrappingUtilsTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeFalse; +import static org.mockito.Mockito.mock; + +import com.google.cloud.storage.OpenTelemetryBootstrappingUtils.ChannelConfigurator; +import io.grpc.ManagedChannelBuilder; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Test; + +public final class OpenTelemetryBootstrappingUtilsTest { + + @Test + public void noErrorIfNotRunningOnGcp() { + assumeFalse("Skipping because running on GCP", TestUtils.isOnComputeEngine()); + + ChannelConfigurator cc = ChannelConfigurator.identity(); + + String endpoint = "storage.googleapis.com:443"; + String projectId = null; + String universeDomain = null; + ChannelConfigurator actual = + OpenTelemetryBootstrappingUtils.enableGrpcMetrics( + cc, endpoint, projectId, universeDomain, true); + + assertThat(actual).isSameInstanceAs(cc); + } + + @SuppressWarnings("rawtypes") // ManagedChannelBuilder + @Test + public void channelConfigurator_andThen() { + ManagedChannelBuilder b1 = mock(ManagedChannelBuilder.class, "b1"); + ManagedChannelBuilder b2 = mock(ManagedChannelBuilder.class, "b2"); + ManagedChannelBuilder b3 = mock(ManagedChannelBuilder.class, "b2"); + + ChannelConfigurator cc1 = + b -> { + assertThat(b).isSameInstanceAs(b1); + return b2; + }; + ChannelConfigurator cc2 = + b -> { + assertThat(b).isSameInstanceAs(b2); + return b3; + }; + + ChannelConfigurator cc3 = cc1.andThen(cc2); + + ManagedChannelBuilder apply = cc3.apply(b1); + assertThat(apply).isSameInstanceAs(b3); + } + + @Test + public void channelConfigurator_lift_nullToIdentity() { + ChannelConfigurator actual = ChannelConfigurator.lift(null); + assertThat(actual).isSameInstanceAs(ChannelConfigurator.identity()); + } + + @SuppressWarnings("rawtypes") // ManagedChannelBuilder + @Test + public void channelConfigurator_lift_plumbingWorks() { + ManagedChannelBuilder b1 = mock(ManagedChannelBuilder.class, "b1"); + AtomicBoolean called = new AtomicBoolean(false); + ChannelConfigurator lifted = + ChannelConfigurator.lift( + b -> { + called.compareAndSet(false, true); + return b; + }); + ManagedChannelBuilder actual = lifted.apply(b1); + assertThat(actual).isSameInstanceAs(b1); + assertThat(called.get()).isTrue(); + } + + @Test + public void channelConfigurator_andThen_nullsafe() { + ChannelConfigurator actual = ChannelConfigurator.identity().andThen(null); + assertThat(actual).isSameInstanceAs(ChannelConfigurator.identity()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java new file mode 100644 index 000000000000..2b3dce86f69f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java @@ -0,0 +1,142 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratedReadChannel; +import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratedWriteChannel; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.MessageOrBuilder; +import com.google.storage.v2.StorageClient; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; +import java.util.function.Function; +import javax.annotation.Nullable; + +/** + * Several classes in the High Level Model for storage include package-local constructors and + * methods. For conformance testing we don't want to exist in the com.google.cloud.storage package + * to ensure we're interacting with the public api, however in a few select cases we need to change + * the instance of {@link Storage} which an object holds on to. The utilities in this class allow us + * to perform these operations. + */ +public final class PackagePrivateMethodWorkarounds { + + private PackagePrivateMethodWorkarounds() {} + + public static Bucket bucketCopyWithStorage(Bucket b, Storage s) { + return b.asBucket(s); + } + + public static Blob blobCopyWithStorage(Blob b, Storage s) { + return b.asBlob(s); + } + + public static Function> maybeGetBlobInfoFunction() { + return (w) -> { + if (w instanceof OtelDecoratedWriteChannel) { + OtelDecoratedWriteChannel odwc = (OtelDecoratedWriteChannel) w; + w = odwc.delegate; + } + if (w instanceof BlobWriteChannelV2) { + BlobWriteChannelV2 blobWriteChannel = (BlobWriteChannelV2) w; + return Optional.ofNullable(blobWriteChannel.getResolvedObject()) + .map(Conversions.json().blobInfo()::decode); + } else if (w instanceof GrpcBlobWriteChannel) { + GrpcBlobWriteChannel grpcBlobWriteChannel = (GrpcBlobWriteChannel) w; + return Optional.of(grpcBlobWriteChannel.getObject()) + .map( + f -> { + try { + return f.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }); + } else { + return Optional.empty(); + } + }; + } + + public static ApiFuture getBlobInfoFromReadChannelFunction(ReadChannel c) { + if (c instanceof OtelDecoratedReadChannel) { + OtelDecoratedReadChannel odrc = (OtelDecoratedReadChannel) c; + c = odrc.reader; + } + if (c instanceof StorageReadChannel) { + StorageReadChannel src = (StorageReadChannel) c; + return src.getObject(); + } + return ApiFutures.immediateFailedFuture( + new IllegalStateException("Unsupported ReadChannel Type " + c.getClass().getName())); + } + + @Nullable + public static StorageClient maybeGetStorageClient(Storage s) { + if (s instanceof GrpcStorageImpl) { + return ((GrpcStorageImpl) s).storageClient; + } + // handle instances of AbstractStorageProxy + Storage service = s.getOptions().getService(); + if (service instanceof OtelStorageDecorator) { + OtelStorageDecorator osd = (OtelStorageDecorator) service; + service = osd.delegate; + } + if (service instanceof GrpcStorageImpl) { + return ((GrpcStorageImpl) service).storageClient; + } + return null; + } + + @Nullable + public static StorageDataClient maybeGetStorageDataClient(Storage s) { + if (s instanceof GrpcStorageImpl) { + return ((GrpcStorageImpl) s).storageDataClient; + } + // handle instances of AbstractStorageProxy + Storage service = s.getOptions().getService(); + if (service instanceof OtelStorageDecorator) { + OtelStorageDecorator osd = (OtelStorageDecorator) service; + service = osd.delegate; + } + if (service instanceof GrpcStorageImpl) { + return ((GrpcStorageImpl) service).storageDataClient; + } + return null; + } + + public static void ifNonNull(@Nullable T t, Consumer c) { + Utils.ifNonNull(t, c); + } + + public static void ifNonNull(@Nullable T1 t, Function map, Consumer c) { + Utils.ifNonNull(t, map, c); + } + + public static BlobInfo noAcl(BlobInfo bi) { + return bi.toBuilder().setOwner(null).setAcl(ImmutableList.of()).build(); + } + + public static String fmtProto(Object msg, Function fmt) { + return StorageV2ProtoUtils.fmtProtoWithFmt(msg, fmt::apply); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfigTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfigTest.java new file mode 100644 index 000000000000..93071b0d1c5b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadBlobWriteSessionConfigTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecorator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.common.truth.StringSubject; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.util.Locale; +import org.junit.Test; + +public final class ParallelCompositeUploadBlobWriteSessionConfigTest { + + @Test + public void partNameStrategy_noPrefix() throws Exception { + PartNamingStrategy strategy = PartNamingStrategy.noPrefix(); + + String fmt = strategy.fmtName("object23", PartRange.of(1, 32)); + assertAll( + // random digest to spread over keyspace + () -> assertField(fmt, 0).hasLength(22), + // name digest + () -> assertField(fmt, 1).hasLength(22), + () -> assertField(fmt, 2).isEqualTo("0001-0032.part")); + } + + @Test + public void partNameStrategy_prefix() throws Exception { + PartNamingStrategy strategy = PartNamingStrategy.prefix("asdf"); + + String fmt = strategy.fmtName("301object23", PartRange.of(1, 96)); + assertAll( + // random digest with prefix to spread over keyspace + // digest is 22, prefix is 4, slash is 1 + () -> assertField(fmt, 0).hasLength(22 + 5), + // name digest + () -> assertField(fmt, 1).hasLength(22), + () -> assertField(fmt, 2).isEqualTo("0001-0096.part"), + () -> assertThat(fmt).startsWith("asdf/")); + } + + @Test + public void partNameStrategy_prefix_stillWorksWithFmtPattern() throws Exception { + PartNamingStrategy strategy = PartNamingStrategy.prefix("[%s]"); + + String fmt = strategy.fmtName("301object23", PartRange.of(1, 96)); + assertAll( + // random digest with prefix to spread over keyspace + // digest is 22, prefix is 4, slash is 1 + () -> assertField(fmt, 0).hasLength(22 + 5), + // name digest + () -> assertField(fmt, 1).hasLength(22), + () -> assertField(fmt, 2).isEqualTo("0001-0096.part"), + () -> assertThat(fmt).startsWith("[%s]/")); + } + + @Test + public void partNameStrategy_objectNamePrefix() throws Exception { + // Creating an object level prefix without specifying an additional prefix will append the + // object name to the beginning of the part name. + PartNamingStrategy strategy = PartNamingStrategy.useObjectNameAsPrefix(); + + String fmt = strategy.fmtName("a/b/obj", PartRange.of(1, 96)); + assertAll( + // random digest with prefix to spread over keyspace + // digest is 22, objectName is 7, slash is 1 + () -> assertField(fmt, 0).hasLength(22 + 8), + // name digest + () -> assertField(fmt, 1).hasLength(22), + () -> assertField(fmt, 2).isEqualTo("0001-0096.part"), + () -> assertThat(fmt).startsWith("a/b/obj")); + } + + @Test + public void partMetadataFieldDecorator_customTime() { + BlobInfo.Builder testBlob = BlobInfo.newBuilder("testBlob", "testBucket"); + Duration duration = Duration.ofSeconds(30); + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + OffsetDateTime expected = + OffsetDateTime.from(Instant.EPOCH.plus(duration).atZone(ZoneId.of("Z"))); + PartMetadataFieldDecorator.setCustomTimeInFuture(duration).newInstance(clock).apply(testBlob); + + assertThat(expected).isEqualTo(testBlob.build().getCustomTimeOffsetDateTime()); + } + + private static StringSubject assertField(String fmt, int idx) { + String[] split = fmt.split(";"); + String s = split[idx]; + return assertWithMessage( + String.format(Locale.US, "Formatted name '%s', field[%d] = %s", fmt, idx, s)) + .that(s); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannelTest.java new file mode 100644 index 000000000000..2990aae5dd69 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ParallelCompositeUploadWritableByteChannelTest.java @@ -0,0 +1,999 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.cloud.storage.BufferHandlePool.PooledBuffer; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.MetadataField.PartRange; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecorator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecoratorInstance; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.ParallelCompositeUploadWritableByteChannel.BufferHandleReleaser; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.UnifiedOpts.EncryptionKey; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.storage.v2.WriteObjectRequest; +import io.grpc.Status.Code; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousCloseException; +import java.nio.channels.ClosedChannelException; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Before; +import org.junit.Test; + +public final class ParallelCompositeUploadWritableByteChannelTest { + + private static final Hasher HASHER = Hasher.enabled(); + + private BlobInfo info; + private Opts opts; + + private BufferHandlePool bufferHandlePool; + private SettableApiFuture finalObject; + private FakeStorageInternal storageInternal; + private SimplisticPartNamingStrategy partNamingStrategy; + private PartMetadataFieldDecoratorInstance partMetadataFieldDecorator; + private int bufferCapacity; + + @Before + public void setUp() throws Exception { + info = BlobInfo.newBuilder("bucket", "object").build(); + opts = Opts.from(UnifiedOpts.generationMatch(0)); + bufferCapacity = 10; + bufferHandlePool = BufferHandlePool.simple(bufferCapacity); + finalObject = SettableApiFuture.create(); + partNamingStrategy = new SimplisticPartNamingStrategy("prefix"); + storageInternal = new FakeStorageInternal(); + partMetadataFieldDecorator = PartMetadataFieldDecorator.noOp().newInstance(null); + } + + @Test + public void objectCreated_partCount_eqToLimit() throws Exception { + + int maxElementsPerCompact = 5; + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(maxElementsPerCompact); + + byte[] bytes = DataGenerator.base64Characters().genBytes(47); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + + String name = info.getName(); + // individual parts + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + BlobId p3 = id(partNamingStrategy.fmtName(name, PartRange.of(3)), 3L); + BlobId p4 = id(partNamingStrategy.fmtName(name, PartRange.of(4)), 4L); + BlobId p5 = id(partNamingStrategy.fmtName(name, PartRange.of(5)), 5L); + // compose + BlobId c1 = id(partNamingStrategy.fmtName(name, PartRange.of(1, 5)), 6L); + // ultimate object + BlobId expectedId = id(name, 7L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> + assertThat(storageInternal.addedObjects.keySet()) + .containsExactly(p1, p2, p3, p4, p5, c1, expectedId), + () -> assertThat(storageInternal.deleteRequests).containsExactly(p1, p2, p3, p4, p5, c1)); + } + + @Test + public void objectCreated_partCount_ltToLimit() throws Exception { + + int maxElementsPerCompact = 6; + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(maxElementsPerCompact); + + byte[] bytes = DataGenerator.base64Characters().genBytes(47); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + + String name = info.getName(); + // individual parts + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + BlobId p3 = id(partNamingStrategy.fmtName(name, PartRange.of(3)), 3L); + BlobId p4 = id(partNamingStrategy.fmtName(name, PartRange.of(4)), 4L); + BlobId p5 = id(partNamingStrategy.fmtName(name, PartRange.of(5)), 5L); + // no compose + // ultimate object + BlobId expectedId = id(name, 6L); + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> + assertThat(storageInternal.addedObjects.keySet()) + .containsExactly(p1, p2, p3, p4, p5, expectedId), + () -> assertThat(storageInternal.deleteRequests).containsExactly(p1, p2, p3, p4, p5)); + } + + @Test + public void objectCreated_partCount_gtToLimit() throws Exception { + + int maxElementsPerCompact = 4; + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(maxElementsPerCompact); + + byte[] bytes = DataGenerator.base64Characters().genBytes(47); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + + String name = info.getName(); + // parts 1-4 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + BlobId p3 = id(partNamingStrategy.fmtName(name, PartRange.of(3)), 3L); + BlobId p4 = id(partNamingStrategy.fmtName(name, PartRange.of(4)), 4L); + // compose 1-4 + BlobId c1 = id(partNamingStrategy.fmtName(name, PartRange.of(1, 4)), 5L); + // part 5 + BlobId p5 = id(partNamingStrategy.fmtName(name, PartRange.of(5)), 6L); + // ultimate object + BlobId expectedId = id(name, 7L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> + assertThat(storageInternal.addedObjects.keySet()) + .containsExactly(p1, p2, p3, p4, c1, p5, expectedId), + () -> assertThat(storageInternal.deleteRequests).containsExactly(p1, p2, p3, p4, p5, c1)); + } + + @Test + public void cleanup_success_disabled() throws Exception { + + int maxElementsPerCompact = 10; + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.never(), + maxElementsPerCompact, + partMetadataFieldDecorator, + finalObject, + storageInternal, + info, + opts); + + byte[] bytes = DataGenerator.base64Characters().genBytes(22); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + + String name = info.getName(); + // parts 1-4 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + BlobId p3 = id(partNamingStrategy.fmtName(name, PartRange.of(3)), 3L); + // ultimate object + BlobId expectedId = id(name, 4L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> + assertThat(storageInternal.addedObjects.keySet()) + .containsExactly(p1, p2, p3, expectedId), + () -> assertThat(storageInternal.deleteRequests).isEmpty()); + } + + @Test + public void writeDoesNotFlushIfItIsnNotFull() throws Exception { + + int maxElementsPerCompact = 10; + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.never(), + maxElementsPerCompact, + partMetadataFieldDecorator, + finalObject, + storageInternal, + info, + opts); + + byte[] bytes1 = DataGenerator.base64Characters().genBytes(bufferCapacity * 2 - 1); + int limit = bufferCapacity - 1; + pcu.write(ByteBuffer.wrap(bytes1, 0, limit)); + pcu.write(ByteBuffer.wrap(bytes1, limit, bytes1.length - limit)); + + pcu.close(); + + String name = info.getName(); + // parts 1-4 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + // ultimate object + BlobId expectedId = id(name, 3L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> assertThat(storageInternal.addedObjects.keySet()).containsExactly(p1, p2, expectedId), + () -> assertThat(storageInternal.deleteRequests).isEmpty(), + () -> { + Crc32cLengthKnown part1Crc32c = storageInternal.addedObjects.get(p1).getCrc32c(); + Crc32cLengthKnown expected = HASHER.hash(ByteBuffer.wrap(bytes1, 0, bufferCapacity)); + assertThat(part1Crc32c).isEqualTo(expected); + }); + } + + @Test + public void partOpts_stripsPreconditionsAndChecksums_addingIfGenEq0() { + EncryptionKey key = UnifiedOpts.encryptionKey("key"); + Opts opts = + Opts.from( + UnifiedOpts.generationMatch(4), + UnifiedOpts.generationNotMatch(5), + UnifiedOpts.metagenerationMatch(6), + UnifiedOpts.metagenerationNotMatch(7), + UnifiedOpts.userProject("user-project"), + key, + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE), + UnifiedOpts.kmsKeyName("kms-key"), + UnifiedOpts.crc32cMatch(1), + UnifiedOpts.md5Match("asdf"), + UnifiedOpts.generationMatch(8).asSource(), + UnifiedOpts.generationNotMatch(10).asSource(), + UnifiedOpts.metagenerationMatch(12).asSource(), + UnifiedOpts.metagenerationNotMatch(14).asSource()); + + Opts partOpts = ParallelCompositeUploadWritableByteChannel.getPartOpts(opts); + + ImmutableMap expected = + ImmutableMap.of( + StorageRpc.Option.IF_GENERATION_MATCH, + 0L, + StorageRpc.Option.USER_PROJECT, + "user-project", + StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, + Base64.getEncoder().encodeToString(key.val.getEncoded()), + StorageRpc.Option.PREDEFINED_ACL, + PredefinedAcl.PRIVATE.getEntry(), + StorageRpc.Option.KMS_KEY_NAME, + "kms-key"); + ImmutableMap rpcOptions = partOpts.getRpcOptions(); + + assertThat(rpcOptions).containsAtLeastEntriesIn(expected); + } + + @Test + public void callingCloseOnANeverWrittenPcuStillCreatesAnEmptyObject() throws Exception { + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(3); + // never call write + pcu.close(); + // ultimate object + BlobId expectedId = id(info.getName(), 1L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> assertThat(storageInternal.addedObjects.keySet()).containsExactly(expectedId), + () -> assertThat(storageInternal.deleteRequests).isEmpty()); + } + + @Test + public void partsRetainMetadata() throws Exception { + ImmutableMap metadata = ImmutableMap.of("a", "1", "b", "2"); + + List> metadatas = Collections.synchronizedList(new ArrayList<>()); + BlobInfo info = this.info.toBuilder().setMetadata(metadata).build(); + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.never(), + 3, + partMetadataFieldDecorator, + finalObject, + new FakeStorageInternal() { + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + metadatas.add(info.getMetadata()); + return super.internalDirectUpload(info, opts, buf); + } + + @Override + public BlobInfo compose(ComposeRequest composeRequest) { + metadatas.add(composeRequest.getTarget().getMetadata()); + return super.compose(composeRequest); + } + }, + info, + opts); + + pcu.write(DataGenerator.base64Characters().genByteBuffer(bufferCapacity * 3 + 5)); + pcu.close(); + + BlobInfo result = ApiFutureUtils.await(finalObject); + assertAll( + () -> assertThat(result.getMetadata()).isEqualTo(metadata), + () -> { + assertThat(metadatas).isNotEmpty(); + for (Map m : metadatas) { + assertThat(m).containsAtLeastEntriesIn(metadata); + } + }); + } + + @Test + public void channelClosedException_writeAfterClose() throws Exception { + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(3); + // never call write + pcu.close(); + assertThrows( + ClosedChannelException.class, + () -> pcu.write(DataGenerator.base64Characters().genByteBuffer(3))); + } + + @Test + public void multipleInvocationsOfCloseDoNotError() throws Exception { + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(3); + pcu.close(); + pcu.close(); + } + + @Test + public void openUponConstruction() throws Exception { + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(3); + assertThat(pcu.isOpen()).isTrue(); + pcu.close(); + } + + @Test + public void callingFlushWhileBufferIsEmptyIsANoOp() throws Exception { + ParallelCompositeUploadWritableByteChannel pcu = defaultPcu(3); + pcu.write(DataGenerator.base64Characters().genByteBuffer(bufferCapacity)); + pcu.flush(); + pcu.close(); + + String name = info.getName(); + // parts 1 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + // ultimate object + BlobId expectedId = id(name, 2L); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> assertThat(storageInternal.addedObjects.keySet()).containsExactly(p1, expectedId), + () -> assertThat(storageInternal.composeRequests).hasSize(1), + () -> assertThat(storageInternal.deleteRequests).containsExactly(p1)); + } + + @Test + public void creatingAnEmptyObjectWhichFailsIsSetAsResultFailureAndThrowFromClose() + throws Exception { + //noinspection resource + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + 3, + partMetadataFieldDecorator, + finalObject, + new FakeStorageInternal() { + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + throw StorageException.coalesce( + ApiExceptionFactory.createException( + null, GrpcStatusCode.of(Code.PERMISSION_DENIED), false)); + } + }, + info, + opts); + StorageException se1 = assertThrows(StorageException.class, pcu::close); + StorageException se2 = + assertThrows(StorageException.class, () -> ApiFutureUtils.await(finalObject)); + + assertAll( + () -> assertThat(se1).hasMessageThat().isEqualTo("Error: PERMISSION_DENIED"), + () -> assertThat(se2).hasMessageThat().isEqualTo("Error: PERMISSION_DENIED"), + () -> assertThat(se1.getCode()).isEqualTo(403), + () -> assertThat(se2.getCode()).isEqualTo(403)); + } + + @Test + public void badServerCrc32cResultsInException() throws Exception { + //noinspection resource + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + 3, + partMetadataFieldDecorator, + finalObject, + new FakeStorageInternal() { + @Override + public BlobInfo compose(ComposeRequest composeRequest) { + BlobInfo response = super.compose(composeRequest); + // return a bad crc32c + return response.toBuilder().setCrc32c(Utils.crc32cCodec.encode(0)).build(); + } + }, + info, + opts); + pcu.write(DataGenerator.base64Characters().genByteBuffer(3)); + + AsynchronousCloseException se1 = assertThrows(AsynchronousCloseException.class, pcu::close); + StorageException se2 = + assertThrows(StorageException.class, () -> ApiFutureUtils.await(finalObject)); + + assertAll( + () -> assertThat(se1).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(se1).hasCauseThat().hasMessageThat().contains("Checksum mismatch"), + () -> assertThat(se2).hasMessageThat().contains("Checksum mismatch"), + () -> assertThat(se2.getCode()).isEqualTo(400)); + } + + @Test + public void bufferHandleRelease_returnsBufferOnFailureAndSuccess() throws Exception { + AtomicReference failure = new AtomicReference<>(null); + AtomicReference success = new AtomicReference<>(null); + + ApiFutureCallback delegate = + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + failure.set(t); + } + + @Override + public void onSuccess(String result) { + success.set(result); + } + }; + + PooledBuffer p1 = PooledBuffer.of(BufferHandle.allocate(3)); + BufferHandlePool pool = + new BufferHandlePool() { + @Override + public PooledBuffer getBuffer() { + return null; + } + + @Override + public void returnBuffer(PooledBuffer handle) { + assertThat(handle).isSameInstanceAs(p1); + } + }; + + BufferHandleReleaser releaser = new BufferHandleReleaser<>(pool, p1, delegate); + + releaser.onSuccess("success"); + releaser.onFailure(new Exception("induced failure")); + + assertAll( + () -> assertThat(success.get()).isEqualTo("success"), + () -> assertThat(failure.get()).hasMessageThat().isEqualTo("induced failure")); + } + + @Test + public void shortCircuitExceptionResultsInFastFailure() throws Exception { + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("test-shortCircuit-%d").build(); + ExecutorService threadPool = Executors.newFixedThreadPool(1, threadFactory); + + try { + + AtomicBoolean induceFailure = new AtomicBoolean(true); + CountDownLatch blockForWrite1 = new CountDownLatch(1); + CountDownLatch blockForWrite1Complete = new CountDownLatch(1); + FakeStorageInternal storageInternal = + new FakeStorageInternal() { + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + if (induceFailure.getAndSet(false)) { + Uninterruptibles.awaitUninterruptibly(blockForWrite1); + try { + throw StorageException.coalesce( + ApiExceptionFactory.createException( + "induced failure: " + info.getBlobId().toGsUtilUri(), + null, + GrpcStatusCode.of(Code.DATA_LOSS), + false)); + } finally { + blockForWrite1Complete.countDown(); + } + } else { + return super.internalDirectUpload(info, opts, buf); + } + } + }; + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + threadPool, + partNamingStrategy, + PartCleanupStrategy.never(), + 32, + partMetadataFieldDecorator, + finalObject, + storageInternal, + info, + opts); + + byte[] bytes = DataGenerator.base64Characters().genBytes(bufferCapacity * 2 + 3); + // write the first parts worth of bytes + pcu.write(ByteBuffer.wrap(bytes, 0, bufferCapacity)); + // signal that the blocking on the internalDirectUpload can proceed + blockForWrite1.countDown(); + // wait until the internalDirectUpload has failed + blockForWrite1Complete.await(); + // attempt to write some more bytes, where we should get a failure + StorageException storageException = + assertThrows( + StorageException.class, + () -> { + // due to the multiple threads doing uploads, it can sometimes take more than one + // invocation in order for the short circuit to trigger + for (int i = 0; i < 300; i++) { + pcu.write(ByteBuffer.wrap(bytes, bufferCapacity, bufferCapacity)); + } + }); + // signal that the blocking on the internalDirectUpload can proceed + // blockForWrite2.countDown(); + // wait until the internalDirectUpload has failed + // blockForWrite2Complete.await(); + // calling close shouldn't cause another exception + pcu.close(); + + String name = info.getName(); + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), -1L); + + CancellationException cancellationException = + assertThrows( + CancellationException.class, + () -> ApiExceptions.callAndTranslateApiException(finalObject)); + + assertAll( + () -> { + Optional found = + storageInternal.addedObjects.keySet().stream() + .map(BlobId::toGsUtilUri) + .filter(p1.toGsUtilUri()::equals) + .findFirst(); + assertThat(found.isPresent()).isFalse(); + }, + () -> { + Optional found = + storageInternal.addedObjects.keySet().stream() + .map(BlobId::getName) + .filter(name::equals) + .findFirst(); + assertThat(found.isPresent()).isFalse(); + }, + () -> + assertThat(storageException) + .hasMessageThat() + .contains("induced failure: " + p1.toGsUtilUri()), + () -> + assertThat(cancellationException) + .hasCauseThat() + .hasMessageThat() + .contains("induced failure: " + p1.toGsUtilUri())); + } finally { + threadPool.shutdownNow(); + } + } + + @Test + public void errorContextIsPopulated() throws Exception { + //noinspection resource + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.never(), + 3, + partMetadataFieldDecorator, + finalObject, + new FakeStorageInternal() { + @Override + public BlobInfo compose(ComposeRequest composeRequest) { + BlobInfo response = super.compose(composeRequest); + // return a bad crc32c + return response.toBuilder().setCrc32c(Utils.crc32cCodec.encode(0)).build(); + } + }, + info, + opts); + pcu.write(DataGenerator.base64Characters().genByteBuffer(3)); + + AsynchronousCloseException se1 = assertThrows(AsynchronousCloseException.class, pcu::close); + StorageException se2 = + assertThrows(StorageException.class, () -> ApiFutureUtils.await(finalObject)); + + String name = info.getName(); + // parts 1-4 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + // ultimate object + BlobId expectedId = id(name, 2L); + + assertAll( + () -> assertThat(se1).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(se1).hasCauseThat().hasMessageThat().contains("Checksum mismatch"), + () -> assertThat(se2).hasMessageThat().contains("Checksum mismatch"), + () -> { + assertThat(se2).hasCauseThat().isInstanceOf(ParallelCompositeUploadException.class); + ParallelCompositeUploadException pcue = (ParallelCompositeUploadException) se2.getCause(); + // since we fail client side with a checksum validation, we expect the object to have been + // created + assertThat(pcue.getCreatedObjects().get()).containsExactly(p1, expectedId); + }, + () -> assertThat(se2.getCode()).isEqualTo(400)); + } + + @Test + public void partFailedPreconditionOnRetryIsHandledGracefully() throws Exception { + String name = info.getName(); + // parts 1-4 + BlobId p1 = id(partNamingStrategy.fmtName(name, PartRange.of(1)), 1L); + BlobId p2 = id(partNamingStrategy.fmtName(name, PartRange.of(2)), 2L); + BlobId p3 = id(partNamingStrategy.fmtName(name, PartRange.of(3)), 3L); + // ultimate object + BlobId expectedId = id(name, 4L); + + FakeStorageInternal storageInternal = + new FakeStorageInternal() { + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + BlobInfo blobInfo = super.internalDirectUpload(info, opts, buf); + if (info.getName().equals(p1.getName())) { + throw StorageException.coalesce( + ApiExceptionFactory.createException( + null, GrpcStatusCode.of(Code.FAILED_PRECONDITION), false)); + } else { + return blobInfo; + } + } + + @Override + public BlobInfo internalObjectGet(BlobId blobId, Opts opts) { + Optional found = this.objectGet(blobId); + if (found.isPresent()) { + BlobId foundId = found.get(); + Data d = this.addedObjects.get(foundId); + return d.getInfo(); + } + throw StorageException.coalesce( + ApiExceptionFactory.createException( + null, GrpcStatusCode.of(Code.NOT_FOUND), false)); + } + }; + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + 10, + partMetadataFieldDecorator, + finalObject, + storageInternal, + info, + opts); + + byte[] bytes = DataGenerator.base64Characters().genBytes(bufferCapacity * 3 - 1); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + + BlobInfo result = ApiFutureUtils.await(finalObject); + + assertAll( + () -> assertThat(result.getBlobId()).isEqualTo(expectedId), + () -> + assertThat(storageInternal.addedObjects.keySet()) + .containsExactly(p1, p2, p3, expectedId), + () -> assertThat(storageInternal.deleteRequests).containsExactly(p1, p2, p3)); + } + + @Test + public void partMetadataFieldDecoratorUsesCustomTime() throws IOException { + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + OffsetDateTime rangeBegin = + OffsetDateTime.from(Instant.EPOCH.plus(Duration.ofSeconds(29)).atZone(ZoneId.of("Z"))); + OffsetDateTime rangeEnd = + OffsetDateTime.from(Instant.EPOCH.plus(Duration.ofMinutes(2)).atZone(ZoneId.of("Z"))); + + FakeStorageInternal storageInternal = + new FakeStorageInternal() { + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + if (info.getBlobId().getName().endsWith(".part")) { + // Kinda hacky but since we are creating multiple parts we will use a range + // to ensure the customTimes are being calculated appropriately + assertThat(info.getCustomTimeOffsetDateTime().isAfter(rangeBegin)).isTrue(); + assertThat(info.getCustomTimeOffsetDateTime().isBefore(rangeEnd)).isTrue(); + } else { + assertThat(info.getCustomTimeOffsetDateTime()).isNull(); + } + return super.internalDirectUpload(info, opts, buf); + } + }; + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + 10, + PartMetadataFieldDecorator.setCustomTimeInFuture(Duration.ofSeconds(30)) + .newInstance(clock), + finalObject, + storageInternal, + info, + opts); + byte[] bytes = DataGenerator.base64Characters().genBytes(bufferCapacity * 3 - 1); + pcu.write(ByteBuffer.wrap(bytes)); + + pcu.close(); + } + + @Test + public void partDoesNotSpecifyAnyChecksum() throws IOException { + FakeStorageInternal storageInternal = + new FakeStorageInternal() { + @Override + public BlobInfo compose(ComposeRequest composeRequest) { + BlobInfo target = composeRequest.getTarget(); + if (target.getBlobId().getName().startsWith(partNamingStrategy.prefix)) { + assertThat(target.getCrc32c()).isNull(); + assertThat(target.getMd5()).isNull(); + } + return super.compose(composeRequest); + } + }; + ChecksummedTestContent content = ChecksummedTestContent.gen(bufferCapacity * 3 - 1); + ParallelCompositeUploadWritableByteChannel pcu = + new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + 2, + b -> b, + finalObject, + storageInternal, + info.toBuilder() + .setCrc32c(content.getCrc32cBase64()) + .setMd5(content.getMd5Base64()) + .build(), + opts); + pcu.write(content.asByteBuffer()); + + pcu.close(); + } + + @NonNull + private ParallelCompositeUploadWritableByteChannel defaultPcu(int maxElementsPerCompact) { + return new ParallelCompositeUploadWritableByteChannel( + bufferHandlePool, + MoreExecutors.directExecutor(), + partNamingStrategy, + PartCleanupStrategy.always(), + maxElementsPerCompact, + partMetadataFieldDecorator, + finalObject, + storageInternal, + info, + opts); + } + + private BlobId id(String name, long generation) { + return BlobId.of(info.getBucket(), name, generation); + } + + private static class FakeStorageInternal implements StorageInternal { + protected final AtomicInteger generations; + protected final Map addedObjects; + protected final List composeRequests; + protected final List deleteRequests; + + FakeStorageInternal() { + generations = new AtomicInteger(1); + addedObjects = Collections.synchronizedMap(new HashMap<>()); + composeRequests = Collections.synchronizedList(new ArrayList<>()); + deleteRequests = Collections.synchronizedList(new ArrayList<>()); + } + + @Override + public BlobInfo internalDirectUpload( + BlobInfo info, Opts opts, ByteBuffer buf) { + BlobId id = info.getBlobId(); + + BlobInfo.Builder b = info.toBuilder(); + WriteObjectRequest apply = + opts.writeObjectRequest().apply(WriteObjectRequest.newBuilder()).build(); + if (apply.hasWriteObjectSpec() && apply.getWriteObjectSpec().hasIfGenerationMatch()) { + long ifGenerationMatch = apply.getWriteObjectSpec().getIfGenerationMatch(); + Optional existing = objectGet(id); + if (existing.isPresent()) { + BlobId existingId = existing.get(); + if (ifGenerationMatch != existingId.getGeneration()) { + throw StorageException.coalesce( + ApiExceptionFactory.createException( + null, GrpcStatusCode.of(Code.FAILED_PRECONDITION), false)); + } + } + } + BlobId newId = id.withGeneration(generations.getAndIncrement()); + b.setBlobId(newId); + BlobInfo gen1 = b.build(); + addedObjects.put(newId, new Data(gen1, HASHER.hash(buf))); + return gen1; + } + + @NonNull + protected Optional objectGet(BlobId id) { + return addedObjects.keySet().stream() + .filter( + key -> key.getBucket().equals(id.getBucket()) && key.getName().equals(id.getName())) + .findFirst(); + } + + @Override + public BlobInfo compose(ComposeRequest composeRequest) { + composeRequests.add(composeRequest); + BlobInfo info = composeRequest.getTarget(); + String bucket = info.getBucket(); + BlobInfo.Builder b = info.toBuilder(); + BlobId newId = info.getBlobId().withGeneration(generations.getAndIncrement()); + b.setBlobId(newId); + ImmutableList crc32cs = + composeRequest.getSourceBlobs().stream() + .map(so -> BlobId.of(bucket, so.getName(), so.getGeneration())) + .map(addedObjects::get) + .map(Data::getCrc32c) + .collect(ImmutableList.toImmutableList()); + + Crc32cLengthKnown reduce = + crc32cs.stream().reduce(Crc32cValue.zero(), Crc32cLengthKnown::concat); + Preconditions.checkState(reduce != null, "unable to compute crc32c for compose request"); + b.setCrc32c(Utils.crc32cCodec.encode(reduce.getValue())); + BlobInfo gen1 = b.build(); + addedObjects.put(newId, new Data(gen1, reduce)); + return gen1; + } + + @Override + public Void internalObjectDelete(BlobId id, Opts opts) { + deleteRequests.add(id); + boolean containsKey = addedObjects.containsKey(id); + if (!containsKey) { + throw ApiExceptionFactory.createException(null, GrpcStatusCode.of(Code.NOT_FOUND), false); + } + return null; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generations", generations) + .add("addedObjects", addedObjects) + .add("composeRequests", composeRequests) + .add("deleteRequests", deleteRequests) + .toString(); + } + + protected static final class Data { + private final BlobInfo info; + private final Crc32cLengthKnown crc32c; + + private Data(BlobInfo info, Crc32cLengthKnown crc32c) { + this.info = info; + this.crc32c = crc32c; + } + + public BlobInfo getInfo() { + return info; + } + + public Crc32cLengthKnown getCrc32c() { + return crc32c; + } + } + } + + private static class SimplisticPartNamingStrategy extends PartNamingStrategy { + + private final String prefix; + + private SimplisticPartNamingStrategy(String prefix) { + super(null); + this.prefix = prefix; + } + + @Override + String fmtName(String ultimateObjectName, PartRange partRange) { + return String.format( + Locale.US, "%s/%s/%s.part", prefix, ultimateObjectName, partRange.encode()); + } + + @Override + protected String fmtFields(String randomKey, String nameDigest, String partRange) { + return null; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PolicyHelperTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PolicyHelperTest.java new file mode 100644 index 000000000000..5532364d74df --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PolicyHelperTest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import com.google.api.services.storage.model.Policy.Bindings; +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.junit.Test; + +public class PolicyHelperTest { + + private static final String ETAG = "CAE="; + + @Test + public void testEquivalence() { + Policy libPolicy = + Policy.newBuilder() + .addIdentity(StorageRoles.objectViewer(), Identity.allUsers()) + .addIdentity( + StorageRoles.objectAdmin(), + Identity.user("test1@gmail.com"), + Identity.user("test2@gmail.com")) + .setEtag(ETAG) + .setVersion(1) + .build(); + com.google.api.services.storage.model.Policy apiPolicy = + new com.google.api.services.storage.model.Policy() + .setBindings( + ImmutableList.of( + new Bindings() + .setMembers(ImmutableList.of("allUsers")) + .setRole("roles/storage.objectViewer"), + new Bindings() + .setMembers( + ImmutableList.of("user:test1@gmail.com", "user:test2@gmail.com")) + .setRole("roles/storage.objectAdmin"))) + .setEtag(ETAG) + .setVersion(1); + + Policy actualLibPolicy = Conversions.json().policyCodec().decode(apiPolicy); + + assertEquals(libPolicy, actualLibPolicy); + } + + @Test + public void testApiPolicyWithoutBinding() { + List bindings = null; + com.google.api.services.storage.model.Policy apiPolicy = + new com.google.api.services.storage.model.Policy() + .setBindings(bindings) + .setEtag(ETAG) + .setVersion(1); + Policy policy = Conversions.json().policyCodec().decode(apiPolicy); + assertEquals(policy.getBindings().size(), 0); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PostPolicyV4Test.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PostPolicyV4Test.java new file mode 100644 index 000000000000..c7587fda7e5c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/PostPolicyV4Test.java @@ -0,0 +1,565 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.text.SimpleDateFormat; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import org.junit.Test; + +public class PostPolicyV4Test { + private static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + private void assertMapsEquals(Map expected, Map returned) { + assertEquals("map sizes", expected.size(), returned.size()); + for (String key : expected.keySet()) { + assertEquals("value of $" + key, expected.get(key), returned.get(key)); + } + } + + private static final String[] VALID_FIELDS = { + "acl", + "bucket", + "cache-control", + "content-disposition", + "content-encoding", + "content-type", + "expires", + "file", + "key", + "policy", + "success_action_redirect", + "success_action_status", + "x-goog-algorithm", + "x-goog-credential", + "x-goog-date", + "x-goog-signature", + }; + + private static final String CUSTOM_PREFIX = "x-goog-meta-"; + + private static Map initAllFields() { + Map fields = new HashMap<>(); + for (String key : VALID_FIELDS) { + fields.put(key, "value of " + key); + } + fields.put(CUSTOM_PREFIX + "custom", "value of custom field"); + return Collections.unmodifiableMap(fields); + } + + private static final Map ALL_FIELDS = initAllFields(); + + @Test + public void testPostPolicyV4_of() { + String url = "http://example.com"; + PostPolicyV4 policy = PostPolicyV4.of(url, ALL_FIELDS); + assertEquals(url, policy.getUrl()); + assertMapsEquals(ALL_FIELDS, policy.getFields()); + } + + @Test + public void testPostPolicyV4_ofMalformedURL() { + try { + PostPolicyV4.of("example.com", new HashMap()); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("example.com is not an absolute URL", e.getMessage()); + } + + try { + PostPolicyV4.of("Scio nescio", new HashMap()); + fail(); + } catch (IllegalArgumentException e) { + assertEquals( + "java.net.URISyntaxException: Illegal character in path at index 4: Scio nescio", + e.getMessage()); + } + } + + @Test + public void testPostFieldsV4_of() { + PostPolicyV4.PostFieldsV4 fields = PostPolicyV4.PostFieldsV4.of(ALL_FIELDS); + assertMapsEquals(ALL_FIELDS, fields.getFieldsMap()); + } + + @Test + public void testPostPolicyV4_builder() { + PostPolicyV4.PostFieldsV4.Builder builder = PostPolicyV4.PostFieldsV4.newBuilder(); + builder.setAcl("acl"); + builder.setCacheControl("cache-control"); + builder.setContentDisposition("content-disposition"); + builder.setContentType("content-type"); + builder.setExpires("expires"); + builder.setSuccessActionRedirect("success_action_redirect"); + Map map = builder.build().getFieldsMap(); + assertEquals("map size", 6, map.size()); + for (String key : map.keySet()) { + assertEquals("value of $" + key, key, map.get(key)); + } + + Map expectedUpdated = new HashMap<>(map); + builder.setCustomMetadataField("xxx", "XXX"); + builder.setCustomMetadataField(CUSTOM_PREFIX + "yyy", "YYY"); + builder.setAcl(null); + builder.setContentType("new-content-type"); + builder.setSuccessActionStatus(42); + expectedUpdated.put(CUSTOM_PREFIX + "xxx", "XXX"); + expectedUpdated.put(CUSTOM_PREFIX + "yyy", "YYY"); + expectedUpdated.put("acl", null); + expectedUpdated.put("content-type", "new-content-type"); + expectedUpdated.put("success_action_status", "42"); + Map updated = builder.build().getFieldsMap(); + assertMapsEquals(expectedUpdated, updated); + } + + @Test + public void testPostPolicyV4_setContentLength() { + PostPolicyV4.PostFieldsV4.Builder builder = PostPolicyV4.PostFieldsV4.newBuilder(); + builder.setContentLength(12345); + assertTrue(builder.build().getFieldsMap().isEmpty()); + } + + @Test + public void testPostConditionsV4_builder() { + PostPolicyV4.PostConditionsV4.Builder builder = PostPolicyV4.PostConditionsV4.newBuilder(); + assertTrue(builder.build().getConditions().isEmpty()); + + builder.addAclCondition(PostPolicyV4.ConditionV4Type.STARTS_WITH, "public"); + builder.addBucketCondition(PostPolicyV4.ConditionV4Type.MATCHES, "travel-maps"); + builder.addContentLengthRangeCondition(0, 100000); + + PostPolicyV4.PostConditionsV4 postConditionsV4 = builder.build(); + Set conditions = postConditionsV4.getConditions(); + assertEquals(3, conditions.size()); + + try { + conditions.clear(); + fail(); + } catch (UnsupportedOperationException e) { + // expected + } + + PostPolicyV4.PostConditionsV4 postConditionsV4Extended = + postConditionsV4.toBuilder() + .addCustomCondition(PostPolicyV4.ConditionV4Type.STARTS_WITH, "key", "") + .build(); + assertEquals(4, postConditionsV4Extended.getConditions().size()); + } + + interface ConditionTest { + /** + * Calls one of addCondition method on the given builder and returns expected ConditionV4 + * object. + */ + PostPolicyV4.ConditionV4 addCondition(PostPolicyV4.PostConditionsV4.Builder builder); + } + + @Test + public void testPostConditionsV4_addCondition() { + // shortcuts + final PostPolicyV4.ConditionV4Type eq = PostPolicyV4.ConditionV4Type.MATCHES; + final PostPolicyV4.ConditionV4Type startsWith = PostPolicyV4.ConditionV4Type.STARTS_WITH; + final PostPolicyV4.ConditionV4Type range = PostPolicyV4.ConditionV4Type.CONTENT_LENGTH_RANGE; + + ConditionTest[] cases = { + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addContentLengthRangeCondition(123, 456); + return new PostPolicyV4.ConditionV4(range, "123", "456"); + } + + @Override + public String toString() { + return "addContentLengthRangeCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + long date = 2000000000000L; + builder.addExpiresCondition(date); + return new PostPolicyV4.ConditionV4(eq, "expires", dateFormat.format(date)); + } + + @Override + public String toString() { + return "addExpiresCondition(long)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addExpiresCondition("2030-Dec-31"); + return new PostPolicyV4.ConditionV4(eq, "expires", "2030-Dec-31"); + } + + @Override + public String toString() { + return "addExpiresCondition(String)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addExpiresCondition(range, 0); + return new PostPolicyV4.ConditionV4(eq, "expires", dateFormat.format(0)); + } + + @Override + public String toString() { + return "@deprecated addExpiresCondition(type,long)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addExpiresCondition(startsWith, "2030-Dec-31"); + return new PostPolicyV4.ConditionV4(eq, "expires", "2030-Dec-31"); + } + + @Override + public String toString() { + return "@deprecated addExpiresCondition(type,String)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addSuccessActionStatusCondition(202); + return new PostPolicyV4.ConditionV4(eq, "success_action_status", "202"); + } + + @Override + public String toString() { + return "addSuccessActionStatusCondition(int)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addSuccessActionStatusCondition(startsWith, 202); + return new PostPolicyV4.ConditionV4(eq, "success_action_status", "202"); + } + + @Override + public String toString() { + return "@deprecated addSuccessActionStatusCondition(type,int)"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addAclCondition(startsWith, "read"); + return new PostPolicyV4.ConditionV4(startsWith, "acl", "read"); + } + + @Override + public String toString() { + return "addAclCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addBucketCondition(eq, "my-bucket"); + return new PostPolicyV4.ConditionV4(eq, "bucket", "my-bucket"); + } + + @Override + public String toString() { + return "addBucketCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addCacheControlCondition(eq, "false"); + return new PostPolicyV4.ConditionV4(eq, "cache-control", "false"); + } + + @Override + public String toString() { + return "addCacheControlCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addContentDispositionCondition(startsWith, "gzip"); + return new PostPolicyV4.ConditionV4(startsWith, "content-disposition", "gzip"); + } + + @Override + public String toString() { + return "addContentDispositionCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addContentEncodingCondition(eq, "koi8"); + return new PostPolicyV4.ConditionV4(eq, "content-encoding", "koi8"); + } + + @Override + public String toString() { + return "addContentEncodingCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addContentTypeCondition(startsWith, "application/"); + return new PostPolicyV4.ConditionV4(startsWith, "content-type", "application/"); + } + + @Override + public String toString() { + return "addContentTypeCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addKeyCondition(startsWith, ""); + return new PostPolicyV4.ConditionV4(startsWith, "key", ""); + } + + @Override + public String toString() { + return "addKeyCondition()"; + } + }, + new ConditionTest() { + @Override + public PostPolicyV4.ConditionV4 addCondition( + PostPolicyV4.PostConditionsV4.Builder builder) { + builder.addSuccessActionRedirectUrlCondition(eq, "fail"); + return new PostPolicyV4.ConditionV4(eq, "success_action_redirect", "fail"); + } + + @Override + public String toString() { + return "addSuccessActionRedirectUrlCondition()"; + } + }, + }; + + for (ConditionTest testCase : cases) { + PostPolicyV4.PostConditionsV4.Builder builder = PostPolicyV4.PostConditionsV4.newBuilder(); + PostPolicyV4.ConditionV4 expected = testCase.addCondition(builder); + Set conditions = builder.build().getConditions(); + assertEquals("size", 1, conditions.size()); + PostPolicyV4.ConditionV4 actual = conditions.toArray(new PostPolicyV4.ConditionV4[1])[0]; + assertEquals(testCase.toString(), expected, actual); + } + } + + @Test + public void testPostConditionsV4_addConditionFail() { + final PostPolicyV4.PostConditionsV4.Builder builder = + PostPolicyV4.PostConditionsV4.newBuilder(); + final PostPolicyV4.ConditionV4Type range = PostPolicyV4.ConditionV4Type.CONTENT_LENGTH_RANGE; + + Callable[] cases = { + new Callable() { + @Override + public Void call() { + builder.addAclCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "acl"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addBucketCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "bucket"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addCacheControlCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "cache-control"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addContentDispositionCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "content-disposition"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addContentEncodingCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "content-encoding"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addContentTypeCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "content-type"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addKeyCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "key"; + } + }, + new Callable() { + @Override + public Void call() { + builder.addSuccessActionRedirectUrlCondition(range, ""); + return null; + } + + @Override + public String toString() { + return "success_action_redirect"; + } + }, + }; + + for (Callable testCase : cases) { + try { + testCase.call(); + fail(); + } catch (Exception e) { + String expected = + "java.lang.IllegalArgumentException: Field " + + testCase + + " can't use content-length-range"; + assertEquals(expected, e.toString()); + } + } + assertTrue(builder.build().getConditions().isEmpty()); + } + + @Test + public void testPostConditionsV4_toString() { + PostPolicyV4.PostConditionsV4.Builder builder = PostPolicyV4.PostConditionsV4.newBuilder(); + builder.addKeyCondition(PostPolicyV4.ConditionV4Type.MATCHES, "test-object"); + builder.addAclCondition(PostPolicyV4.ConditionV4Type.STARTS_WITH, "public"); + builder.addContentLengthRangeCondition(246, 266); + + Set toStringSet = new HashSet<>(); + for (PostPolicyV4.ConditionV4 conditionV4 : builder.build().getConditions()) { + toStringSet.add(conditionV4.toString()); + } + assertEquals(3, toStringSet.size()); + + String[] expectedStrings = { + "[\"eq\", \"$key\", \"test-object\"]", + "[\"starts-with\", \"$acl\", \"public\"]", + "[\"content-length-range\", 246, 266]" + }; + + for (String expected : expectedStrings) { + assertTrue(expected + "/" + toStringSet, toStringSet.contains(expected)); + } + } + + @Test + public void testPostPolicyV4Document_of_toJson() { + PostPolicyV4.PostConditionsV4 emptyConditions = + PostPolicyV4.PostConditionsV4.newBuilder().build(); + PostPolicyV4.PostPolicyV4Document emptyDocument = + PostPolicyV4.PostPolicyV4Document.of("", emptyConditions); + String emptyJson = emptyDocument.toJson(); + assertEquals(emptyJson, "{\"conditions\":[],\"expiration\":\"\"}"); + + PostPolicyV4.PostConditionsV4 postConditionsV4 = + PostPolicyV4.PostConditionsV4.newBuilder() + .addBucketCondition(PostPolicyV4.ConditionV4Type.MATCHES, "my-bucket") + .addKeyCondition(PostPolicyV4.ConditionV4Type.STARTS_WITH, "") + .addContentLengthRangeCondition(1, 1000) + .build(); + + String expiration = dateFormat.format(System.currentTimeMillis()); + PostPolicyV4.PostPolicyV4Document document = + PostPolicyV4.PostPolicyV4Document.of(expiration, postConditionsV4); + String json = document.toJson(); + assertEquals( + json, + "{\"conditions\":[{\"bucket\":\"my-bucket\"},[\"starts-with\",\"$key\",\"\"],[\"content-length-range\",1,1000]],\"expiration\":\"" + + expiration + + "\"}"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RangeSpecFunctionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RangeSpecFunctionTest.java new file mode 100644 index 000000000000..32c9b851371e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RangeSpecFunctionTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; + +public final class RangeSpecFunctionTest { + private static final long KiB = 1024; + private static final long MiB = 1024 * KiB; + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Test + public void linearExponential_withMaxLength() { + RangeSpecFunction e = + RangeSpecFunction.linearExponential() + .withInitialMaxLength(KiB) + .withMaxLengthScalar(4.0) + .andThen(RangeSpecFunction.maxLength(64 * MiB)); + + RangeSpec apply = null; + + apply = e.apply(0, apply); + assertThat(apply).isEqualTo(RangeSpec.of(0, KiB)); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(16 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(64 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(256 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(MiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * MiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(16 * MiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(64 * MiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(64 * MiB); + } + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Test + public void linearExponential_resetsIfNotSequential_forward() { + RangeSpecFunction e = + RangeSpecFunction.linearExponential().withInitialMaxLength(KiB).withMaxLengthScalar(4.0); + + RangeSpec apply = null; + + apply = e.apply(0, apply); + assertThat(apply).isEqualTo(RangeSpec.of(0, KiB)); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(16 * KiB); + + apply = e.apply(apply.begin() + apply.maxLength().getAsLong() + 1, apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * KiB); + } + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Test + public void linearExponential_resetsIfNotSequential_backward() { + RangeSpecFunction e = + RangeSpecFunction.linearExponential().withInitialMaxLength(KiB).withMaxLengthScalar(4.0); + + RangeSpec apply = null; + + apply = e.apply(0, apply); + assertThat(apply).isEqualTo(RangeSpec.of(0, KiB)); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(16 * KiB); + + apply = e.apply(apply.begin() + apply.maxLength().getAsLong() - 1, apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(KiB); + apply = e.apply(apply.begin() + apply.maxLength().getAsLong(), apply); + assertThat(apply.maxLength().getAsLong()).isEqualTo(4 * KiB); + } + + @Test + public void linearExponential_resetsIfNotSequential() { + RangeSpecFunction e = + RangeSpecFunction.linearExponential().withInitialMaxLength(1).withMaxLengthScalar(4.0); + + RangeSpec apply = null; + + apply = e.apply(0, apply); + assertThat(apply).isEqualTo(RangeSpec.of(0, 1)); + apply = e.apply(1, apply); + assertThat(apply).isEqualTo(RangeSpec.of(1, 4)); + apply = e.apply(5, apply); + assertThat(apply).isEqualTo(RangeSpec.of(5, 16)); + + apply = e.apply(4, apply); + assertThat(apply).isEqualTo(RangeSpec.of(4, 1)); + apply = e.apply(5, apply); + assertThat(apply).isEqualTo(RangeSpec.of(5, 4)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ReadProjectionConfigsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ReadProjectionConfigsTest.java new file mode 100644 index 000000000000..63b4c9c8772f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ReadProjectionConfigsTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; + +public final class ReadProjectionConfigsTest { + + @Test + public void sameInstanceMustBeReturnedIfNoChange_seekable_hasher_true() { + ReadAsSeekableChannel config1 = ReadProjectionConfigs.asSeekableChannel(); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(true); + + ReadAsSeekableChannel config2 = config1.withCrc32cValidationEnabled(true); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_seekable_hasher_false() { + ReadAsSeekableChannel config1 = + ReadProjectionConfigs.asSeekableChannel().withCrc32cValidationEnabled(false); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(false); + + ReadAsSeekableChannel config2 = config1.withCrc32cValidationEnabled(false); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void differentInstanceWhenChanged_seekable_hasher() { + ReadAsSeekableChannel config1 = ReadProjectionConfigs.asSeekableChannel(); + ReadAsSeekableChannel config2 = config1.withCrc32cValidationEnabled(false); + + assertThat(config2).isNotSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_bytes_hasher_true() { + ReadAsFutureBytes config1 = ReadProjectionConfigs.asFutureBytes(); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(true); + + ReadAsFutureBytes config2 = config1.withCrc32cValidationEnabled(true); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_bytes_hasher_false() { + ReadAsFutureBytes config1 = + ReadProjectionConfigs.asFutureBytes().withCrc32cValidationEnabled(false); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(false); + + ReadAsFutureBytes config2 = config1.withCrc32cValidationEnabled(false); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void differentInstanceWhenChanged_bytes_hasher() { + ReadAsFutureBytes config1 = ReadProjectionConfigs.asFutureBytes(); + ReadAsFutureBytes config2 = config1.withCrc32cValidationEnabled(false); + + assertThat(config2).isNotSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_byteString_hasher_true() { + ReadAsFutureByteString config1 = ReadProjectionConfigs.asFutureByteString(); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(true); + + ReadAsFutureByteString config2 = config1.withCrc32cValidationEnabled(true); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_byteString_hasher_false() { + ReadAsFutureByteString config1 = + ReadProjectionConfigs.asFutureByteString().withCrc32cValidationEnabled(false); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(false); + + ReadAsFutureByteString config2 = config1.withCrc32cValidationEnabled(false); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void differentInstanceWhenChanged_byteString_hasher() { + ReadAsFutureByteString config1 = ReadProjectionConfigs.asFutureByteString(); + ReadAsFutureByteString config2 = config1.withCrc32cValidationEnabled(false); + + assertThat(config2).isNotSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_channel_hasher_true() { + ReadAsChannel config1 = ReadProjectionConfigs.asChannel(); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(true); + + ReadAsChannel config2 = config1.withCrc32cValidationEnabled(true); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void sameInstanceMustBeReturnedIfNoChange_channel_hasher_false() { + ReadAsChannel config1 = ReadProjectionConfigs.asChannel().withCrc32cValidationEnabled(false); + + assertThat(config1.getCrc32cValidationEnabled()).isEqualTo(false); + + ReadAsChannel config2 = config1.withCrc32cValidationEnabled(false); + assertThat(config2).isSameInstanceAs(config1); + } + + @Test + public void differentInstanceWhenChanged_channel_hasher() { + ReadAsChannel config1 = ReadProjectionConfigs.asChannel(); + ReadAsChannel config2 = config1.withCrc32cValidationEnabled(false); + + assertThat(config2).isNotSameInstanceAs(config1); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RecoveryFileManagerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RecoveryFileManagerTest.java new file mode 100644 index 000000000000..584ab68b5409 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RecoveryFileManagerTest.java @@ -0,0 +1,186 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.SeekableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.Instant; +import java.util.Objects; +import java.util.Random; +import java.util.stream.Stream; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestName; + +public final class RecoveryFileManagerTest { + private static final int _128KiB = 128 * 1024; + + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Rule public final TestName testName = new TestName(); + + private final TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + + @Test + public void happyPath() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + RecoveryFileManager rfm = + RecoveryFileManager.of( + ImmutableList.of(tempDir), + path -> ThroughputSink.logged(path.toAbsolutePath().toString(), clock)); + + BlobInfo info = BlobInfo.newBuilder("bucket", "object").build(); + try (RecoveryFile recoveryFile = rfm.newRecoveryFile(info)) { + + byte[] bytes = DataGenerator.base64Characters().genBytes(_128KiB); + try (WritableByteChannel writer = recoveryFile.writer()) { + writer.write(ByteBuffer.wrap(bytes)); + } + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (SeekableByteChannel r = recoveryFile.reader(); + WritableByteChannel w = Channels.newChannel(baos)) { + long copy = ByteStreams.copy(r, w); + assertThat(copy).isEqualTo(_128KiB); + } + + assertThat(baos.toByteArray()).isEqualTo(bytes); + } + + try (Stream stream = Files.list(tempDir)) { + boolean b = stream.anyMatch(Objects::nonNull); + assertThat(b).isFalse(); + } + } + + @Test + public void argValidation_nonEmpty() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, () -> RecoveryFileManager.of(ImmutableList.of())); + + assertThat(iae).hasMessageThat().isNotEmpty(); + } + + @Test + public void argValidation_fileInsteadOfDirectory() throws IOException { + Path tempDir = temporaryFolder.newFile(testName.getMethodName()).toPath(); + + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> RecoveryFileManager.of(ImmutableList.of(tempDir))); + + assertThat(iae).hasMessageThat().isNotEmpty(); + } + + @Test + public void argValidation_directoryDoesNotExistIsCreated() throws IOException { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + + Path subPathA = tempDir.resolve("sub/path/a"); + + assertThat(Files.exists(subPathA)).isFalse(); + RecoveryFileManager rfm = RecoveryFileManager.of(ImmutableList.of(subPathA)); + assertThat(Files.exists(subPathA)).isTrue(); + } + + @Test + public void fileAssignmentIsRoundRobin() throws IOException { + Path tempDir1 = temporaryFolder.newFolder(testName.getMethodName() + "1").toPath(); + Path tempDir2 = temporaryFolder.newFolder(testName.getMethodName() + "2").toPath(); + Path tempDir3 = temporaryFolder.newFolder(testName.getMethodName() + "3").toPath(); + RecoveryFileManager rfm = + RecoveryFileManager.of(ImmutableList.of(tempDir1, tempDir2, tempDir3)); + + BlobInfo info1 = BlobInfo.newBuilder("bucket", "object1").build(); + BlobInfo info2 = BlobInfo.newBuilder("bucket", "object2").build(); + BlobInfo info3 = BlobInfo.newBuilder("bucket", "object3").build(); + try (RecoveryFile recoveryFile1 = rfm.newRecoveryFile(info1); + RecoveryFile recoveryFile2 = rfm.newRecoveryFile(info2); + RecoveryFile recoveryFile3 = rfm.newRecoveryFile(info3)) { + + ImmutableSet paths = + Stream.of(recoveryFile1, recoveryFile2, recoveryFile3) + .map(rf -> rf.unsafe().touch()) + .map(Path::toAbsolutePath) + .collect(ImmutableSet.toImmutableSet()); + + ImmutableSet parentDirs = + Stream.of(recoveryFile1, recoveryFile2, recoveryFile3) + .map(RecoveryFile::getPath) + .map(Path::getParent) + .collect(ImmutableSet.toImmutableSet()); + + assertThat(paths).hasSize(3); + assertThat(parentDirs).isEqualTo(ImmutableSet.of(tempDir1, tempDir2, tempDir3)); + } + } + + @Test + public void multipleRecoveryFilesForEqualBlobInfoAreAbleToExistConcurrently() throws Exception { + Path tempDir = temporaryFolder.newFolder(testName.getMethodName()).toPath(); + RecoveryFileManager rfm = + RecoveryFileManager.of( + ImmutableList.of(tempDir), + path -> ThroughputSink.logged(path.toAbsolutePath().toString(), clock)); + + BlobInfo info = BlobInfo.newBuilder("bucket", "object").build(); + try (RecoveryFile rf1 = rfm.newRecoveryFile(info); + RecoveryFile rf2 = rfm.newRecoveryFile(info); ) { + + Random rand = new Random(467123); + byte[] bytes1 = DataGenerator.rand(rand).genBytes(7); + byte[] bytes2 = DataGenerator.rand(rand).genBytes(41); + try (WritableByteChannel writer = rf1.writer()) { + writer.write(ByteBuffer.wrap(bytes1)); + } + try (WritableByteChannel writer = rf2.writer()) { + writer.write(ByteBuffer.wrap(bytes2)); + } + + byte[] actual1 = ByteStreams.toByteArray(Files.newInputStream(rf1.getPath())); + byte[] actual2 = ByteStreams.toByteArray(Files.newInputStream(rf2.getPath())); + + String expected1 = xxd(bytes1); + String expected2 = xxd(bytes2); + + String xxd1 = xxd(actual1); + String xxd2 = xxd(actual2); + assertAll( + () -> assertWithMessage("rf1 should contain bytes1").that(xxd1).isEqualTo(expected1), + () -> assertWithMessage("rf2 should contain bytes2").that(xxd2).isEqualTo(expected2)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResultRetryAlgorithmCompatibilityTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResultRetryAlgorithmCompatibilityTest.java new file mode 100644 index 000000000000..40aebc94a876 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResultRetryAlgorithmCompatibilityTest.java @@ -0,0 +1,40 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.common.collect.ImmutableSet; +import java.util.Set; +import org.junit.Test; + +public final class ResultRetryAlgorithmCompatibilityTest { + @Test + public void validateDefaultStorageRetryStrategy_idempotent() { + ResultRetryAlgorithm idempotentHandler = + StorageRetryStrategy.getDefaultStorageRetryStrategy().getIdempotentHandler(); + + Set codes = + GrpcToHttpStatusCodeTranslation.resultRetryAlgorithmToCodes(idempotentHandler); + ImmutableSet expected = + ImmutableSet.of( + Code.INTERNAL, Code.UNAVAILABLE, Code.RESOURCE_EXHAUSTED, Code.DEADLINE_EXCEEDED); + assertThat(codes).isEqualTo(expected); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableMediaTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableMediaTest.java new file mode 100644 index 000000000000..6c658c7456ff --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableMediaTest.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.spi.v1.StorageRpc; +import java.net.URL; +import java.util.function.Supplier; +import org.junit.Assert; +import org.junit.Test; + +public final class ResumableMediaTest { + private static final String SIGNED_URL_INVALID = + "http://localhost/test-bucket/test1.txt?GoogAccessId=testClient-test@test.com&Expires=1553839761&Signature=MJUBXAZ7"; + private static final String SIGNED_URL_VALID = + "http://localhost/test-bucket/test1.txt?GoogleAccessId=testClient-test@test.com&Expires=1553839761&Signature=MJUBXAZ7"; + + @Test + public void startUploadForSignedUrl_expectStorageException_whenUrlInvalid() throws Exception { + try { + ResumableMedia.startUploadForSignedUrl( + HttpStorageOptions.newBuilder().build(), + new URL(SIGNED_URL_INVALID), + RetrierWithAlg.attemptOnce()) + .get(); + Assert.fail(); + } catch (StorageException ex) { + assertNotNull(ex.getMessage()); + } + } + + @Test + public void startUploadForSignedUrl_whenUrlValid() throws Exception { + StorageRpc rpc = mock(StorageRpc.class); + HttpStorageOptions options = + HttpStorageOptions.newBuilder().setServiceRpcFactory(opts -> rpc).build(); + + URL url = new URL(SIGNED_URL_VALID); + when(rpc.open(url.toString())).thenReturn("upload-id"); + + Supplier uploadIdSupplier = + ResumableMedia.startUploadForSignedUrl(options, url, RetrierWithAlg.attemptOnce()); + assertThat(uploadIdSupplier.get()).isEqualTo("upload-id"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java new file mode 100644 index 000000000000..e4d2ad839fc5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java @@ -0,0 +1,536 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiClock; +import com.google.api.core.NanoClock; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ResourceExhaustedException; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.RetryContext.BackoffComment; +import com.google.cloud.storage.RetryContext.InterruptedBackoffComment; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.common.base.Stopwatch; +import io.grpc.Status.Code; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Test; + +public final class RetryContextTest { + private static final OnSuccess NOOP = () -> {}; + + private TestApiClock testClock; + private TestScheduledExecutorService scheduledExecutorService; + + @Before + public void setUp() throws Exception { + testClock = TestApiClock.tickBy(0, Duration.ofMillis(1)); + scheduledExecutorService = new TestScheduledExecutorService(testClock); + } + + @Test + public void retryable_when_maxAttemptBudget_consumed() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(1), Retrying.alwaysRetry(), Jitterer.noJitter()); + + ctx.recordError( + t1, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t1); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Operation failed to complete within attempt budget (attempts: 1, maxAttempts: 1," + + " elapsed: PT0.001S, nextBackoff: PT3S)"); + }); + } + + @Test + public void retryable_maxAttemptBudget_still_available() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(2), Retrying.alwaysRetry(), Jitterer.noJitter()); + + ctx.recordError(t1, NOOP, failOnFailure()); + } + + @Test + public void + retryable_when_maxAttemptBudget_multipleAttempts_previousErrorsIncludedAsSuppressed() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + Throwable t2 = apiException(Code.INTERNAL, "{internal}"); + Throwable t3 = apiException(Code.RESOURCE_EXHAUSTED, "{resource exhausted}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(3), Retrying.alwaysRetry(), Jitterer.noJitter()); + + ctx.recordError(t1, NOOP, failOnFailure()); + ctx.recordError(t2, NOOP, failOnFailure()); + + ctx.recordError( + t3, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t3); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Operation failed to complete within attempt budget (attempts: 3, maxAttempts: 3," + + " elapsed: PT6.001S, nextBackoff: PT3S) previous failures follow in order" + + " of occurrence", + "{unavailable}", + "{internal}"); + }); + } + + @Test + public void nonretryable_regardlessOfAttemptBudget() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(3), Retrying.neverRetry(), Jitterer.noJitter()); + + ctx.recordError( + t1, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t1); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Unretryable error (attempts: 1, maxAttempts: 3, elapsed: PT0.001S, nextBackoff:" + + " PT3S)"); + }); + } + + @Test + public void nonRetryable_regardlessOfAttemptBudget_previousErrorsIncludedAsSuppressed() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + Throwable t2 = apiException(Code.INTERNAL, "{internal}"); + Throwable t3 = apiException(Code.RESOURCE_EXHAUSTED, "{resource exhausted}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, + maxAttempts(6), + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return !(previousThrowable instanceof ResourceExhaustedException); + } + }, + Jitterer.noJitter()); + + ctx.recordError(t1, NOOP, failOnFailure()); + ctx.recordError(t2, NOOP, failOnFailure()); + + ctx.recordError( + t3, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t3); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Unretryable error (attempts: 3, maxAttempts: 6, elapsed: PT6.001S, nextBackoff:" + + " PT3S) previous failures follow in order of occurrence", + "{unavailable}", + "{internal}"); + }); + } + + @Test + public void resetDiscardsPreviousErrors() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + Throwable t2 = apiException(Code.INTERNAL, "{internal}"); + Throwable t3 = apiException(Code.RESOURCE_EXHAUSTED, "{resource exhausted}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, + maxAttempts(6), + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return !(previousThrowable instanceof ResourceExhaustedException); + } + }, + Jitterer.noJitter()); + + ctx.recordError(t1, NOOP, failOnFailure()); + ctx.recordError(t2, NOOP, failOnFailure()); + ctx.reset(); + + ctx.recordError( + t3, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t3); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Unretryable error (attempts: 1, maxAttempts: 6, elapsed: PT0.001S, nextBackoff:" + + " PT3S)"); + }); + } + + @Test + public void preservesCauseOfFailureAsReturnedFailure() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(1), Retrying.alwaysRetry(), Jitterer.noJitter()); + + ctx.recordError(t1, failOnSuccess(), actual -> assertThat(actual).isEqualTo(t1)); + } + + @Test + public void retryable_when_timeoutBudget_consumed() { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable 1}"); + Throwable t2 = apiException(Code.UNAVAILABLE, "{unavailable 2}"); + Throwable t3 = apiException(Code.UNAVAILABLE, "{unavailable 3}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, + RetryingDependencies.simple( + testClock, + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(2)) + .setMaxRetryDelayDuration(Duration.ofSeconds(6)) + .setTotalTimeoutDuration(Duration.ofSeconds(24)) + .setRetryDelayMultiplier(2.0) + .build()), + Retrying.alwaysRetry(), + Jitterer.noJitter()); + + testClock.advance(Duration.ofSeconds(7)); + + ctx.recordError(t1, NOOP, failOnFailure()); + testClock.advance(TestApiClock.addExact(Duration.ofSeconds(7))); + + ctx.recordError(t2, NOOP, failOnFailure()); + testClock.advance(TestApiClock.addExact(Duration.ofSeconds(7))); + ctx.recordError( + t3, + failOnSuccess(), + actual -> { + assertThat(actual).isEqualTo(t3); + Throwable[] suppressed = actual.getSuppressed(); + List suppressedMessages = + Arrays.stream(suppressed).map(Throwable::getMessage).collect(Collectors.toList()); + assertThat(suppressedMessages) + .containsExactly( + "Operation failed to complete within backoff budget (attempts: 3, elapsed: PT27S," + + " nextBackoff: EXHAUSTED, timeout: PT24S) previous failures follow in order" + + " of occurrence", + "{unavailable 1}", + "{unavailable 2}"); + }); + } + + @Test + public void recordErrorWhileAlreadyInBackoffTruncatesExistingBackoffAndReevaluates() + throws Exception { + Throwable t1 = apiException(Code.UNAVAILABLE, "{unavailable 1}"); + Throwable t2 = apiException(Code.UNAVAILABLE, "{unavailable 2}"); + Throwable t3 = apiException(Code.UNAVAILABLE, "{unavailable 3}"); + Throwable t4 = apiException(Code.ABORTED, "{aborted}"); + ScheduledExecutorService scheduledExecutorService = + Executors.newSingleThreadScheduledExecutor(); + try { + DefaultRetryContext ctx = + (DefaultRetryContext) + RetryContext.of( + scheduledExecutorService, + RetryingDependencies.simple( + NanoClock.getDefaultClock(), + RetrySettings.newBuilder() + .setMaxAttempts(4) + .setInitialRetryDelayDuration(Duration.ofMillis(250)) + .setMaxRetryDelayDuration(Duration.ofSeconds(1)) + .setRetryDelayMultiplier(2.0) + .build()), + Retrying.alwaysRetry(), + Jitterer.noJitter()); + + BlockingOnSuccess s1 = new BlockingOnSuccess(); + + ctx.recordError(t1, s1, failOnFailure()); + ctx.recordError(t2, NOOP, failOnFailure()); + s1.release(); + ctx.awaitBackoffComplete(); + ctx.recordError(t3, NOOP, failOnFailure()); + ctx.awaitBackoffComplete(); + AtomicReference t = new AtomicReference<>(null); + ctx.recordError(t4, failOnSuccess(), t::set); + + Throwable actual = t.get(); + String messagesToText = TestUtils.messagesToText(actual); + assertAll( + () -> assertThat(messagesToText).contains("{aborted}"), + () -> + assertThat(messagesToText) + .contains( + "Operation failed to complete within attempt budget (attempts: 4," + + " maxAttempts: 4, elapsed: PT"), + () -> + assertThat(messagesToText) + .contains(", nextBackoff: PT1S) previous failures follow in order of occurrence"), + () -> assertThat(messagesToText).containsMatch("\\{unavailable 2}\n\\s*Previous"), + () -> + assertThat(messagesToText) + .contains( + "Previous backoff interrupted by this error (previousBackoff: PT0.25S," + + " elapsed: PT")); + } finally { + scheduledExecutorService.shutdownNow(); + scheduledExecutorService.awaitTermination(5, TimeUnit.SECONDS); + } + } + + @Test + public void similarToRetryingHelper() { + RetrySettings retrySettings = + StorageOptions.getDefaultRetrySettings().toBuilder() + .setTotalTimeoutDuration(Duration.ofMillis(3_125)) + .setInitialRetryDelayDuration(Duration.ofNanos(12_500_000)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelayDuration(Duration.ofSeconds(2)) + .setMaxAttempts(6) + .setJittered(false) + .build(); + ResultRetryAlgorithm alg = + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return previousThrowable instanceof Invocation; + } + }; + ApiClock clock = NanoClock.getDefaultClock(); + + RetryContext ctx = + RetryContext.of( + RetryContext.directScheduledExecutorService(), + RetryingDependencies.simple(clock, retrySettings), + alg, + Jitterer.noJitter()); + + List retryHelperSplits = new ArrayList<>(); + Stopwatch retryHelperStopwatch = Stopwatch.createStarted(); + try { + RetryHelper.runWithRetries( + () -> { + retryHelperSplits.add(retryHelperStopwatch.elapsed()); + throw new Invocation(); + }, + retrySettings, + alg, + clock); + } catch (RetryHelperException ignore) { + } + + List retryContextSplits = new ArrayList<>(); + Stopwatch retryContextStopwatch = Stopwatch.createStarted(); + ctx.reset(); + AtomicBoolean attemptAgain = new AtomicBoolean(false); + do { + attemptAgain.set(false); + try { + retryContextSplits.add(retryContextStopwatch.elapsed()); + throw new Invocation(); + } catch (Exception e) { + ctx.recordError(e, () -> attemptAgain.set(true), noop -> {}); + } + } while (attemptAgain.get()); + + assertThat(retryContextSplits.size()).isEqualTo(retryHelperSplits.size()); + } + + @Test + public void resetAlsoResetsBackoffState() throws Exception { + Throwable t1 = apiException(Code.INTERNAL, "{err1}"); + Throwable t2 = apiException(Code.INTERNAL, "{err2}"); + RetryContext ctx = + RetryContext.of( + scheduledExecutorService, maxAttempts(1), Retrying.alwaysRetry(), Jitterer.noJitter()); + + AtomicReference err1 = new AtomicReference<>(); + AtomicReference err2 = new AtomicReference<>(); + ctx.recordError(t1, failOnSuccess(), err1::set); + ctx.reset(); + ctx.recordError(t2, failOnSuccess(), err2::set); + + assertAll( + () -> { + String messages = TestUtils.messagesToText(err1.get()); + assertThat(messages) + .contains( + "Operation failed to complete within attempt budget (attempts: 1, maxAttempts: 1," + + " elapsed: PT0.001S, nextBackoff: PT3S)"); + }, + () -> { + String messages = TestUtils.messagesToText(err2.get()); + assertThat(messages) + .contains( + "Operation failed to complete within attempt budget (attempts: 1, maxAttempts: 1," + + " elapsed: PT0.001S, nextBackoff: PT3S)"); + }); + } + + @Test + public void rejectedExecutionException_funneledToOnFailureHandlerAsSuppressedException() { + ScheduledExecutorService exec = mock(ScheduledExecutorService.class); + RejectedExecutionException alreadyShutdown = new RejectedExecutionException("already shutdown"); + when(exec.schedule(any(Runnable.class), anyLong(), any())).thenThrow(alreadyShutdown); + Throwable t1 = new RuntimeException("{err1}", new Throwable("{err1Cause}")); + RetryContext ctx = + RetryContext.of(exec, maxAttempts(2), Retrying.alwaysRetry(), Jitterer.noJitter()); + + AtomicReference err1 = new AtomicReference<>(); + ctx.recordError(t1, failOnSuccess(), err1::set); + Throwable t = err1.get(); + assertThat(t).isNotNull(); + assertThat(t.getSuppressed()[0]).isInstanceOf(BackoffComment.class); + assertThat(t.getSuppressed()[1]).isInstanceOf(InterruptedBackoffComment.class); + assertThat(t.getSuppressed()[1].getSuppressed()[0]).isSameInstanceAs(alreadyShutdown); + } + + private static ApiException apiException(Code code, String message) { + return ApiExceptionFactory.createException(message, null, GrpcStatusCode.of(code), false); + } + + private MaxAttemptRetryingDependencies maxAttempts(int maxAttempts) { + return new MaxAttemptRetryingDependencies( + RetrySettings.newBuilder() + .setMaxAttempts(maxAttempts) + .setInitialRetryDelayDuration(Duration.ofSeconds(3)) + .setMaxRetryDelayDuration(Duration.ofSeconds(35)) + .setRetryDelayMultiplier(1.0) + .build(), + testClock); + } + + static OnFailure failOnFailure() { + InvocationTracer invocationTracer = new InvocationTracer("Unexpected onFailure invocation"); + return t -> { + if (t != invocationTracer) { + invocationTracer.addSuppressed(t); + } + throw invocationTracer; + }; + } + + static OnSuccess failOnSuccess() { + InvocationTracer invocationTracer = new InvocationTracer("Unexpected onSuccess invocation"); + return () -> { + throw invocationTracer; + }; + } + + private static final class MaxAttemptRetryingDependencies implements RetryingDependencies { + private final RetrySettings settings; + private final ApiClock clock; + + private MaxAttemptRetryingDependencies(RetrySettings settings, ApiClock clock) { + this.settings = settings; + this.clock = clock; + } + + @Override + public RetrySettings getRetrySettings() { + return settings; + } + + @Override + public ApiClock getClock() { + return clock; + } + } + + private static final class InvocationTracer extends RuntimeException { + private InvocationTracer(String message) { + super(message); + } + } + + static final class BlockingOnSuccess implements OnSuccess { + private final CountDownLatch cdl; + + BlockingOnSuccess() { + this.cdl = new CountDownLatch(1); + } + + @Override + public void onSuccess() { + try { + cdl.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + public void release() { + cdl.countDown(); + } + } + + static final class Invocation extends Exception { + private Invocation() { + super(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryingTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryingTest.java new file mode 100644 index 000000000000..5c1eda96c3b0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryingTest.java @@ -0,0 +1,140 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.NanoClock; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.cloud.storage.Conversions.Decoder; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Retrying.HttpRetrier; +import com.google.cloud.storage.Retrying.RetrierWithAlg; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.cloud.storage.spi.v1.HttpRpcContext; +import io.grpc.Status.Code; +import java.time.Duration; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.UnaryOperator; +import java.util.regex.Pattern; +import org.junit.Test; + +public final class RetryingTest { + private static final Pattern UUID_PATTERN = + Pattern.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"); + + private static final RetryingDependencies RETRYING_DEPENDENCIES = + RetryingDependencies.simple( + NanoClock.getDefaultClock(), + RetrySettings.newBuilder() + .setMaxAttempts(3) + .setInitialRetryDelayDuration(Duration.ofMillis(30)) + .setMaxRetryDelayDuration(Duration.ofSeconds(35)) + .setRetryDelayMultiplier(1.0) + .build()); + + @Test + public void run() throws Exception { + ApiException t1 = apiException(Code.UNAVAILABLE, "{unavailable}"); + ApiException t2 = apiException(Code.INTERNAL, "{internal}"); + ApiException t3 = apiException(Code.RESOURCE_EXHAUSTED, "{resource exhausted}"); + + AtomicInteger counter = new AtomicInteger(0); + RetrierWithAlg retrier = + new DefaultRetrier(UnaryOperator.identity(), RETRYING_DEPENDENCIES) + .withAlg(Retrying.alwaysRetry()); + StorageException actual = + assertThrows( + StorageException.class, + () -> + retrier.run( + () -> { + int i = counter.incrementAndGet(); + switch (i) { + case 1: + throw t1; + case 2: + throw t2; + case 3: + throw t3; + default: + throw new RuntimeException("unexpected"); + } + }, + Decoder.identity())); + String messages = TestUtils.messagesToText(actual); + assertAll( + () -> + assertThat(messages) + .contains( + "Operation failed to complete within attempt budget (attempts: 3, maxAttempts:" + + " 3"), + () -> assertThat(messages).contains("{unavailable}"), + () -> assertThat(messages).contains("{internal}")); + } + + @Test + public void http() throws Exception { + + RetrierWithAlg retrier = + new HttpRetrier(new DefaultRetrier(UnaryOperator.identity(), RETRYING_DEPENDENCIES)) + .withAlg(Retrying.alwaysRetry()); + + AtomicInteger counter = new AtomicInteger(0); + StorageException actual = + assertThrows( + StorageException.class, + () -> + retrier.run( + () -> { + int i = counter.incrementAndGet(); + UUID invocationId = HttpRpcContext.getInstance().getInvocationId(); + switch (i) { + case 1: + throw apiException(Code.UNAVAILABLE, "{unavailable} " + invocationId); + case 2: + throw apiException(Code.INTERNAL, "{internal} " + invocationId); + case 3: + throw apiException( + Code.RESOURCE_EXHAUSTED, "{resource exhausted} " + invocationId); + default: + throw new RuntimeException("unexpected"); + } + }, + Decoder.identity())); + String messages = TestUtils.messagesToText(actual); + assertAll( + () -> + assertThat(messages) + .contains( + "Operation failed to complete within attempt budget (attempts: 3, maxAttempts:" + + " 3"), + () -> assertThat(messages).contains("{unavailable}"), + () -> assertThat(messages).contains("{internal}"), + () -> assertThat(messages).containsMatch(UUID_PATTERN)); + } + + private static ApiException apiException(Code code, String message) { + return ApiExceptionFactory.createException(message, null, GrpcStatusCode.of(code), false); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableByteBufferContentTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableByteBufferContentTest.java new file mode 100644 index 000000000000..fe867e6f2f90 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableByteBufferContentTest.java @@ -0,0 +1,160 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.RewindableContentPropertyTest.ErroringOutputStream; +import com.google.protobuf.ByteString; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; + +public final class RewindableByteBufferContentTest { + + private long total; + private ByteBuffer[] buffers; + private String fullXxd; + + @Before + public void setUp() throws Exception { + // full buffer + ByteBuffer bufFull = DataGenerator.base64Characters().genByteBuffer(16); + // limited buffer + ByteBuffer bufLimit = DataGenerator.base64Characters().genByteBuffer(16); + bufLimit.limit(15); + // offset buffer + ByteBuffer bufOffset = DataGenerator.base64Characters().genByteBuffer(16); + bufOffset.position(3); + // offset and limited buffer + ByteBuffer bufLimitAndOffset = DataGenerator.base64Characters().genByteBuffer(16); + bufLimitAndOffset.position(9).limit(12); + + total = + bufFull.remaining() + + bufLimit.remaining() + + bufOffset.remaining() + + bufLimitAndOffset.remaining(); + buffers = new ByteBuffer[] {bufFull, bufLimit, bufOffset, bufLimitAndOffset}; + fullXxd = xxd(false, buffers); + } + + @Test + public void getLength() { + RewindableContent content = RewindableContent.of(buffers); + + assertThat(content.getLength()).isEqualTo(total); + } + + @Test + public void writeTo() throws IOException { + + RewindableContent content = RewindableContent.of(buffers); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + String actual = xxd(baos.toByteArray()); + assertThat(actual).isEqualTo(fullXxd); + } + + @Test + public void rewind() throws IOException { + + RewindableContent content = RewindableContent.of(buffers); + + assertThrows( + IOException.class, + () -> { + try (ErroringOutputStream erroringOutputStream = new ErroringOutputStream(25)) { + content.writeTo(erroringOutputStream); + } + }); + content.rewindTo(0L); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + String actual = xxd(baos.toByteArray()); + assertThat(actual).isEqualTo(fullXxd); + } + + @Test + public void rewindTo() throws Exception { + RewindableContent content = RewindableContent.of(buffers); + + ByteString reduce = + Arrays.stream(buffers) + .map(ByteBuffer::duplicate) + .map(ByteStringStrategy.noCopy()) + .reduce(ByteString.empty(), ByteString::concat, (l, r) -> r); + + assertThat(content.getLength()).isEqualTo(total); + + int readOffset = 37; + ByteString substring = reduce.substring(readOffset); + ByteBuffer readOnlyByteBuffer = substring.asReadOnlyByteBuffer(); + String expected = xxd(false, readOnlyByteBuffer); + long value = total - readOffset; + content.rewindTo(readOffset); + assertThat(content.getLength()).isEqualTo(value); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + String actual = xxd(baos.toByteArray()); + assertAll( + () -> assertThat(baos.toByteArray()).hasLength(Math.toIntExact(value)), + () -> assertThat(actual).isEqualTo(expected)); + } + + @Test + public void rewind_dirtyAware() throws IOException { + + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(10); + buf.position(3).limit(7); + + int position = buf.position(); + int limit = buf.limit(); + + RewindableContent content = RewindableContent.of(buf); + int hackPosition = 2; + // after content has initialized, mutate the position underneath it. We're doing this to detect + // if rewind is actually modifying things. It shouldn't until the content is dirtied by calling + // writeTo + buf.position(hackPosition); + + // invoke rewind, and expect it to not do anything + content.rewindTo(0L); + assertThat(buf.position()).isEqualTo(hackPosition); + assertThat(buf.limit()).isEqualTo(limit); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + assertThat(buf.position()).isEqualTo(limit); + assertThat(buf.limit()).isEqualTo(limit); + + content.rewindTo(0L); + assertThat(buf.position()).isEqualTo(position); + assertThat(buf.limit()).isEqualTo(limit); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentInputStreamTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentInputStreamTest.java new file mode 100644 index 000000000000..b8d7bd5c5d6d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentInputStreamTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.junit.Test; + +public final class RewindableContentInputStreamTest { + + @Test + public void read_empty() throws IOException { + RewindableContent content = RewindableContent.empty(); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + int read = in.read(); + assertThat(read).isEqualTo(-1); + } + } + + @Test + public void readB_emptySrc() throws IOException { + RewindableContent content = RewindableContent.empty(); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + int read = in.read(new byte[1]); + assertThat(read).isEqualTo(-1); + } + } + + @Test + public void readB_emptyDst() throws IOException { + byte[] bytes = DataGenerator.base64Characters().genBytes(1); + RewindableContent content = RewindableContent.of(ByteBuffer.wrap(bytes)); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + byte[] tmp = new byte[0]; + int read = in.read(tmp); + assertThat(read).isEqualTo(0); + } + } + + @Test + public void readB_singleByte() throws IOException { + byte[] bytes = DataGenerator.base64Characters().genBytes(1); + RewindableContent content = RewindableContent.of(ByteBuffer.wrap(bytes)); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + byte[] tmp = new byte[_256KiB]; + int read = in.read(tmp); + assertThat(read).isEqualTo(1); + assertThat(tmp[0]).isEqualTo(bytes[0]); + } + } + + @Test + public void read_singleByte() throws IOException { + byte[] bytes = DataGenerator.base64Characters().genBytes(1); + RewindableContent content = RewindableContent.of(ByteBuffer.wrap(bytes)); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + int read = in.read(); + assertThat(read).isEqualTo(bytes[0]); + } + } + + @Test + public void readB_multiContent() throws IOException { + byte[] bytes = DataGenerator.base64Characters().genBytes(30); + RewindableContent content = + RewindableContent.of( + ByteBuffer.wrap(bytes, 0, 10), + ByteBuffer.wrap(bytes, 10, 10), + ByteBuffer.wrap(bytes, 20, 10)); + try (RewindableContentInputStream in = new RewindableContentInputStream(content)) { + byte[] tmp = new byte[_256KiB]; + int read = in.read(tmp); + assertThat(read).isEqualTo(30); + assertThat(xxd(ByteString.copyFrom(tmp, 0, read))).isEqualTo(xxd(bytes)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentPropertyTest.java new file mode 100644 index 000000000000..79453af55933 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/RewindableContentPropertyTest.java @@ -0,0 +1,363 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.common.base.MoreObjects; +import com.google.protobuf.ByteString; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.RandomDistribution; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class RewindableContentPropertyTest { + + @Property + void path(@ForAll("PathScenario") PathScenario pathScenario) throws Exception { + try (PathScenario s = pathScenario) { + RewindableContent content = RewindableContent.of(s.getPath()); + assertThrows( + IOException.class, + () -> { + try (ErroringOutputStream erroringOutputStream = + new ErroringOutputStream(s.getErrorAtOffset())) { + content.writeTo(erroringOutputStream); + } + }); + content.rewindTo(s.getRewindOffset()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + String actual = xxd(baos.toByteArray()); + + assertThat(actual).isEqualTo(s.getExpectedXxd()); + } + } + + @Property + void byteBuffers(@ForAll("ByteBuffersScenario") ByteBuffersScenario s) throws IOException { + RewindableContent content = RewindableContent.of(s.getBuffers()); + assertThat(content.getLength()).isEqualTo(s.getFullLength()); + assertThrows( + IOException.class, + () -> { + try (ErroringOutputStream erroringOutputStream = + new ErroringOutputStream(s.getErrorAtOffset())) { + content.writeTo(erroringOutputStream); + } + }); + content.rewindTo(s.getRewindOffset()); + assertThat(content.getLength()).isEqualTo(s.getPostRewindLength()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + content.writeTo(baos); + + String actual = xxd(baos.toByteArray()); + + assertThat(actual).isEqualTo(s.getExpectedXxd()); + } + + @Provide("PathScenario") + static Arbitrary pathScenario() { + return Arbitraries.lazyOf( + () -> + Arbitraries.oneOf( + bytes(1, 10), + bytes(10, 100), + bytes(100, 1_000), + bytes(1_000, 10_000), + bytes(10_000, 100_000), + bytes(100_000, 1_000_000), + bytes(1_000_000, 10_000_000)) + .flatMap( + bytes -> + Combinators.combine( + Arbitraries.integers().between(0, bytes.length - 1), + Arbitraries.integers().between(0, bytes.length - 1), + Arbitraries.just(bytes)) + .as(PathScenario::of))); + } + + @Provide("ByteBuffersScenario") + static Arbitrary byteBuffersScenarioArbitrary() { + return Arbitraries.lazyOf( + () -> + Arbitraries.oneOf( + byteBuffers(1, 10), + byteBuffers(10, 100), + byteBuffers(100, 1_000), + byteBuffers(1_000, 10_000), + byteBuffers(10_000, 100_000), + byteBuffers(100_000, 1_000_000))) + .flatMap( + buffers -> { + long totalAvailable = Arrays.stream(buffers).mapToLong(ByteBuffer::remaining).sum(); + + return Combinators.combine( + Arbitraries.longs().between(0, Math.max(0L, totalAvailable - 1)), + Arbitraries.longs().between(0, Math.max(0L, totalAvailable - 1)), + Arbitraries.just(buffers)) + .as(ByteBuffersScenario::of); + }) + .filter(bbs -> bbs.getFullLength() > 0); + } + + @NonNull + private static Arbitrary bytes(int minFileSize, int maxFileSize) { + return Arbitraries.integers() + .between(minFileSize, maxFileSize) + .withDistribution(RandomDistribution.uniform()) + .map(DataGenerator.base64Characters()::genBytes); + } + + @NonNull + static Arbitrary byteBuffers(int perBufferMinSize, int perBufferMaxSize) { + return byteBuffer(perBufferMinSize, perBufferMaxSize) + .array(ByteBuffer[].class) + .ofMinSize(1) + .ofMaxSize(10); + } + + /** + * Generate a ByteBuffer with size between minSize, maxSize with a random position and random + * limit + */ + @NonNull + static Arbitrary byteBuffer(int minSize, int maxSize) { + return Arbitraries.integers() + .between(minSize, maxSize) + .withDistribution(RandomDistribution.uniform()) + .withoutEdgeCases() + .map(DataGenerator.base64Characters()::genByteBuffer) + .flatMap( + buf -> + Arbitraries.integers() + .between(0, Math.max(0, buf.capacity() - 1)) + .withoutEdgeCases() + .flatMap( + limit -> + Arbitraries.integers() + .between(0, limit) + .withoutEdgeCases() + .flatMap( + position -> { + buf.limit(limit); + buf.position(position); + return Arbitraries.of(buf); + }))); + } + + private static final class PathScenario implements AutoCloseable { + + private static final Path TMP_DIR = Paths.get(System.getProperty("java.io.tmpdir")); + + private final int rewindOffset; + private final int errorAtOffset; + private final TmpFile tmpFile; + private final byte[] expectedBytes; + private final String expectedXxd; + + private PathScenario( + int rewindOffset, int errorAtOffset, TmpFile tmpFile, byte[] expectedBytes) { + this.rewindOffset = rewindOffset; + this.errorAtOffset = errorAtOffset; + this.tmpFile = tmpFile; + this.expectedBytes = expectedBytes; + this.expectedXxd = xxd(expectedBytes); + } + + public int getRewindOffset() { + return rewindOffset; + } + + public int getErrorAtOffset() { + return errorAtOffset; + } + + public Path getPath() { + return tmpFile.getPath(); + } + + public String getExpectedXxd() { + return expectedXxd; + } + + public long getFullLength() throws IOException { + return Files.size(tmpFile.getPath()); + } + + @Override + public void close() throws IOException { + tmpFile.close(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("expectedXxd", "\n" + expectedXxd) + .add("expectedBytes.length", expectedBytes.length) + .add("rewindOffset", rewindOffset) + .add("errorAtOffset", errorAtOffset) + .add("tmpFile", tmpFile) + .toString(); + } + + private static PathScenario of(int rewindOffset, int errorAtOffset, byte[] bytes) { + try { + TmpFile tmpFile1 = TmpFile.of(TMP_DIR, "PathScenario", ".bin"); + try (SeekableByteChannel writer = tmpFile1.writer()) { + writer.write(ByteBuffer.wrap(bytes)); + } + byte[] expectedBytes = + Arrays.copyOfRange(bytes, Math.min(rewindOffset, bytes.length), bytes.length); + return new PathScenario(rewindOffset, errorAtOffset, tmpFile1, expectedBytes); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private static class ByteBuffersScenario { + + private final long rewindOffset; + private final long errorAtOffset; + private final ByteBuffer[] buffers; + private final long fullLength; + private final String expectedXxd; + + private ByteBuffersScenario( + long rewindOffset, + long errorAtOffset, + ByteBuffer[] buffers, + byte[] expectedBytes, + long fullLength) { + this.rewindOffset = rewindOffset; + this.errorAtOffset = errorAtOffset; + this.buffers = buffers; + this.fullLength = fullLength; + this.expectedXxd = xxd(expectedBytes); + } + + public long getRewindOffset() { + return rewindOffset; + } + + public long getErrorAtOffset() { + return errorAtOffset; + } + + public ByteBuffer[] getBuffers() { + // duplicate the buffer so we have stable toString + return Arrays.stream(buffers).map(ByteBuffer::duplicate).toArray(ByteBuffer[]::new); + } + + public String getExpectedXxd() { + return expectedXxd; + } + + public long getFullLength() { + return fullLength; + } + + public long getPostRewindLength() { + return fullLength - rewindOffset; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("\nexpectedXxd", "\n" + expectedXxd) + .add( + "\nbuffers", + Arrays.stream(buffers) + .map(Object::toString) + .collect(Collectors.joining("\n\t", "[\n\t", "\n]"))) + .add("\nrewindOffset", rewindOffset) + .add("\nerrorAtOffset", errorAtOffset) + .toString(); + } + + public static ByteBuffersScenario of( + long rewindOffset, long errorAtOffset, ByteBuffer[] buffers) { + + ByteString reduce = + Arrays.stream(buffers) + .map(ByteBuffer::duplicate) + .map(ByteStringStrategy.noCopy()) + .reduce(ByteString.empty(), ByteString::concat, (l, r) -> r); + + byte[] byteArray = reduce.substring(Math.toIntExact(rewindOffset)).toByteArray(); + return new ByteBuffersScenario( + rewindOffset, errorAtOffset, buffers, byteArray, reduce.size()); + } + } + + static final class ErroringOutputStream extends OutputStream { + private final long errorAt; + private long totalWritten; + + ErroringOutputStream(long errorAt) { + this.errorAt = errorAt; + this.totalWritten = 0; + } + + @Override + public void write(int b) throws IOException { + if (totalWritten++ >= errorAt) { + throw new IOException("Reached errorAt limit"); + } + } + + @Override + public void write(byte[] b) throws IOException { + if (totalWritten + b.length >= errorAt) { + throw new IOException("Reached errorAt limit"); + } else { + totalWritten += b.length; + } + } + + @Override + public void write(@SuppressWarnings("NullableProblems") byte[] b, int off, int len) + throws IOException { + int diff = len - off; + if (totalWritten + diff >= errorAt) { + throw new IOException("Reached errorAt limit"); + } else { + totalWritten += diff; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java new file mode 100644 index 000000000000..a5dd377812ca --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java @@ -0,0 +1,180 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageByteChannels.readable; +import static com.google.cloud.storage.TestUtils.snapshotData; +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.SequenceInputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Test; + +public final class ScatteringByteChannelFacadeTest { + + @Test + public void lackOfAvailabilityDoesNotBlock() throws IOException { + ByteArrayInputStream bais1 = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4}); + ByteArrayInputStream bais2 = new ByteArrayInputStream(new byte[] {5, 6, 7, 8, 9}); + SequenceInputStream all = new SequenceInputStream(bais1, bais2); + ReadableByteChannel rbc = Channels.newChannel(all); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(6), ByteBuffer.allocate(6)}; + sbc.read(bufs); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0, 1, 2, 3, 4}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {}); + sbc.read(bufs); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0, 1, 2, 3, 4, 5}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {6, 7, 8, 9}); + } + + @Test + public void lackOfCapacityReturnsFast() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4}); + ReadableByteChannel rbc = Channels.newChannel(bais); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(0)}; + long read = sbc.read(bufs); + assertThat(read).isEqualTo(0); + } + + @Test + public void readNegativeOneReturnIfPreviouslyReadBytes() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[] {0}); + ReadableByteChannel rbc = Channels.newChannel(bais); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(1), ByteBuffer.allocate(1)}; + long read = sbc.read(bufs); + assertThat(read).isEqualTo(1); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {}); + } + + @Test + public void readNegativeOneReturnsNegativeOneIfPreviouslyReadZeroBytes() throws IOException { + AtomicBoolean closeCalled = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public int read(ByteBuffer dst) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() { + closeCalled.compareAndSet(false, true); + } + }); + + ByteBuffer buf = ByteBuffer.allocate(1); + int read = sbc.read(buf); + assertThat(read).isEqualTo(-1); + assertThat(snapshotData(buf)).isEqualTo(new byte[] {}); + assertThat(closeCalled.get()).isTrue(); + } + + @Test(expected = ClosedChannelException.class) + public void closeChannelExceptionIfUnderlyingIsNotOpen() throws IOException { + ScatteringByteChannel sbc = newSbc(new ClosedReadableByteChannel()); + sbc.read(null, 0, 0); + } + + @Test + public void openDelegates() { + AtomicBoolean open = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public boolean isOpen() { + return open.get(); + } + }); + + assertThat(sbc.isOpen()).isFalse(); + open.set(true); + assertThat(sbc.isOpen()).isTrue(); + } + + @Test + public void closeDelegates() throws IOException { + AtomicBoolean closeCalled = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public void close() { + closeCalled.compareAndSet(false, true); + } + }); + + sbc.close(); + assertThat(closeCalled.get()).isTrue(); + } + + private static ScatteringByteChannel newSbc(ReadableByteChannel c) { + return readable().asScatteringByteChannel(c); + } + + private static final class ClosedReadableByteChannel implements ReadableByteChannel { + + @Override + public int read(ByteBuffer dst) throws IOException { + throw new ClosedChannelException(); + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() {} + } + + private abstract static class ReadableByteChannelStub implements ReadableByteChannel { + + @Override + public int read(ByteBuffer dst) throws IOException { + return 0; + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() throws IOException {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java new file mode 100644 index 000000000000..8905a48c90c7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java @@ -0,0 +1,423 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.BaseSerializationTest; +import com.google.cloud.NoCredentials; +import com.google.cloud.PageImpl; +import com.google.cloud.ReadChannel; +import com.google.cloud.Restorable; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelV2State; +import com.google.cloud.storage.BlobWriteChannelV2.BlobWriteChannelV2State; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.BufferAllocationStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecorator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.UnifiedOpts.Opt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InvalidClassException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.time.Duration; +import java.util.Base64; +import java.util.Collections; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class SerializationTest extends BaseSerializationTest { + + private static final Acl.Domain ACL_DOMAIN = new Acl.Domain("domain"); + private static final Acl.Group ACL_GROUP = new Acl.Group("group"); + private static final Acl.Project ACL_PROJECT_ = new Acl.Project(ProjectRole.VIEWERS, "pid"); + private static final Acl.User ACL_USER = new Acl.User("user"); + private static final Acl.RawEntity ACL_RAW = new Acl.RawEntity("raw"); + private static final Acl ACL = Acl.of(ACL_DOMAIN, Acl.Role.OWNER); + private static final BlobInfo BLOB_INFO = BlobInfo.newBuilder("b", "n").build(); + private static final BucketInfo BUCKET_INFO = BucketInfo.of("b"); + private static final Cors.Origin ORIGIN = Cors.Origin.any(); + private static final Cors CORS = + Cors.newBuilder().setMaxAgeSeconds(1).setOrigins(Collections.singleton(ORIGIN)).build(); + private static final StorageException STORAGE_EXCEPTION = new StorageException(42, "message"); + private static final Storage.BlobListOption BLOB_LIST_OPTIONS = + Storage.BlobListOption.pageSize(100); + private static final Storage.BlobSourceOption BLOB_SOURCE_OPTIONS = + Storage.BlobSourceOption.generationMatch(1); + private static final Storage.BlobTargetOption BLOB_TARGET_OPTIONS = + Storage.BlobTargetOption.generationMatch(); + private static final Storage.BucketListOption BUCKET_LIST_OPTIONS = + Storage.BucketListOption.prefix("bla"); + private static final Storage.BucketSourceOption BUCKET_SOURCE_OPTIONS = + Storage.BucketSourceOption.metagenerationMatch(1); + private static final Storage.BucketTargetOption BUCKET_TARGET_OPTIONS = + Storage.BucketTargetOption.metagenerationNotMatch(); + private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); + + private static Storage STORAGE; + private static Blob BLOB; + private static Bucket BUCKET; + private static PageImpl PAGE_RESULT; + + @BeforeClass + public static void beforeClass() { + StorageOptions storageOptions = + StorageOptions.newBuilder() + .setProjectId("p") + .setCredentials(NoCredentials.getInstance()) + .build(); + STORAGE = storageOptions.getService(); + BLOB = BLOB_INFO.asBlob(STORAGE); + BUCKET = BUCKET_INFO.asBucket(STORAGE); + PAGE_RESULT = new PageImpl<>(null, "c", Collections.singletonList(BLOB)); + } + + @AfterClass + public static void afterClass() throws Exception { + if (STORAGE != null) { + STORAGE.close(); + } + } + + @Override + protected Serializable[] serializableObjects() { + StorageOptions optionsDefault1 = + StorageOptions.newBuilder() + .setProjectId("p1") + .setCredentials(NoCredentials.getInstance()) + .build(); + StorageOptions optionsDefault2 = optionsDefault1.toBuilder().setProjectId("p2").build(); + StorageOptions optionsHttp1 = + StorageOptions.http() + .setProjectId("http1") + .setCredentials(NoCredentials.getInstance()) + .build(); + StorageOptions optionsHttp2 = optionsHttp1.toBuilder().setProjectId("http2").build(); + StorageOptions optionsGrpc1 = + StorageOptions.grpc() + .setProjectId("grpc1") + .setCredentials(NoCredentials.getInstance()) + .build(); + StorageOptions optionsGrpc2 = optionsGrpc1.toBuilder().setProjectId("grpc2").build(); + + // echo -n "key" | base64 + String keyBase64 = "a2V5"; + + ImmutableList serializableOpts = + ImmutableList.builder() + .add(UnifiedOpts.crc32cMatch("crc32c")) + .add(UnifiedOpts.currentDirectory()) + .add(UnifiedOpts.decryptionKey(keyBase64)) + .add(UnifiedOpts.delimiter("/")) + .add(UnifiedOpts.detectContentType()) + .add(UnifiedOpts.disableGzipContent()) + .add(UnifiedOpts.doesNotExist()) + .add(UnifiedOpts.encryptionKey(keyBase64)) + .add(UnifiedOpts.endOffset("end")) + .add(UnifiedOpts.fields(ImmutableSet.of(BucketField.LOCATION))) + .add(UnifiedOpts.generationMatch(0)) + .add(UnifiedOpts.generationNotMatch(0)) + .add(UnifiedOpts.kmsKeyName("key")) + .add(UnifiedOpts.md5Match("md5")) + .add(UnifiedOpts.metagenerationMatch(1)) + .add(UnifiedOpts.metagenerationNotMatch(1)) + .add(UnifiedOpts.pageSize(3)) + .add(UnifiedOpts.pageToken("token")) + .add(UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE)) + .add(UnifiedOpts.predefinedDefaultObjectAcl(PredefinedAcl.PRIVATE)) + .add(UnifiedOpts.prefix("prefix")) + .add(UnifiedOpts.projectId("proj")) + .add(UnifiedOpts.projection("full")) + .add(UnifiedOpts.requestedPolicyVersion(2)) + .add(UnifiedOpts.returnRawInputStream(false)) + .add(UnifiedOpts.serviceAccount(ServiceAccount.of("x@y.z"))) + .add(UnifiedOpts.setContentType("text/plain")) + .add(UnifiedOpts.showDeletedKeys(false)) + .add(UnifiedOpts.startOffset("start")) + .add(UnifiedOpts.userProject("user-proj")) + .add(UnifiedOpts.versionsFilter(false)) + .add(UnifiedOpts.generationMatchExtractor()) + .add(UnifiedOpts.generationNotMatchExtractor()) + .add(UnifiedOpts.metagenerationMatchExtractor()) + .add(UnifiedOpts.metagenerationNotMatchExtractor()) + .add(UnifiedOpts.crc32cMatchExtractor()) + .add(UnifiedOpts.md5MatchExtractor()) + .build(); + + try { + GrpcStorageOptions grpcStorageOptionsBufferToTemp = + StorageOptions.grpc() + .setCredentials(NoCredentials.getInstance()) + .setProjectId("project1") + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bufferToTempDirThenUpload()) + .build(); + + return new Serializable[] { + ACL_DOMAIN, + ACL_GROUP, + ACL_PROJECT_, + ACL_USER, + ACL_RAW, + ACL, + BLOB_INFO, + BLOB, + BUCKET_INFO, + BUCKET, + ORIGIN, + CORS, + PAGE_RESULT, + BLOB_LIST_OPTIONS, + BLOB_SOURCE_OPTIONS, + BLOB_TARGET_OPTIONS, + BUCKET_LIST_OPTIONS, + BUCKET_SOURCE_OPTIONS, + BUCKET_TARGET_OPTIONS, + STORAGE_EXCEPTION, + optionsDefault1, + optionsDefault2, + optionsHttp1, + optionsHttp2, + optionsGrpc1, + optionsGrpc2, + serializableOpts, + grpcStorageOptionsBufferToTemp + }; + } catch (IOException ioe) { + throw new AssertionError(ioe); + } + } + + @Test + public void avoidNpeHttpStorageOptions_retryDeps() throws IOException, ClassNotFoundException { + HttpStorageOptions optionsHttp1 = + StorageOptions.http() + .setProjectId("http1") + .setCredentials(NoCredentials.getInstance()) + .build(); + + assertThat(optionsHttp1.asRetryDependencies()).isNotNull(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { + oos.writeObject(optionsHttp1); + } + + byte[] byteArray = baos.toByteArray(); + try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(byteArray))) { + Object o = ois.readObject(); + HttpStorageOptions hso = (HttpStorageOptions) o; + assertThat(hso.asRetryDependencies()).isNotNull(); + } + } + + @Override + @SuppressWarnings("resource") + protected Restorable[] restorableObjects() { + HttpStorageOptions options = HttpStorageOptions.newBuilder().setProjectId("p2").build(); + ReadChannel readerV2 = + new BlobReadChannelV2( + new StorageObject().setBucket("b").setName("n"), + EMPTY_RPC_OPTIONS, + BlobReadChannelContext.from(options)); + WriteChannel writer = + new BlobWriteChannelV2( + BlobReadChannelContext.from(options), + JsonResumableWrite.of( + Conversions.json().blobInfo().encode(BlobInfo.newBuilder("b", "n").build()), + ImmutableMap.of(), + "upload-id", + 0, + Hasher.enabled(), + Crc32cValue.zero())); + return new Restorable[] {readerV2, writer}; + } + + @SuppressWarnings({"deprecation", "rawtypes"}) + @Test + public void restoreOfV1BlobReadChannelShouldReturnV2Channel() + throws IOException, ClassNotFoundException { + + Properties properties = new Properties(); + try (InputStream is = + SerializationTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/storage/blobReadChannel.ser.properties")) { + properties.load(is); + } + String b64bytes = properties.getProperty("b64bytes"); + assertThat(b64bytes).isNotEmpty(); + + byte[] decode = Base64.getDecoder().decode(b64bytes); + try (ByteArrayInputStream bais = new ByteArrayInputStream(decode); + ObjectInputStream ois = new ObjectInputStream(bais)) { + Object o = ois.readObject(); + assertThat(o).isInstanceOf(RestorableState.class); + RestorableState restorableState = (RestorableState) o; + assertThat(o).isInstanceOf(BlobReadChannel.StateImpl.class); + BlobReadChannel.StateImpl state = (BlobReadChannel.StateImpl) restorableState; + ReadChannel restore = state.restore(); + assertThat(restore).isInstanceOf(BlobReadChannelV2.class); + RestorableState capture = restore.capture(); + assertThat(capture).isInstanceOf(BlobReadChannelV2State.class); + } + } + + @SuppressWarnings({"deprecation", "rawtypes"}) + @Test + public void restoreOfV1BlobWriteChannelShouldReturnV2Channel() + throws IOException, ClassNotFoundException { + + Properties properties = new Properties(); + try (InputStream is = + SerializationTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/storage/blobWriteChannel.ser.properties")) { + properties.load(is); + } + String b64bytes = properties.getProperty("b64bytes"); + assertThat(b64bytes).isNotEmpty(); + + byte[] decode = Base64.getDecoder().decode(b64bytes); + try (ByteArrayInputStream bais = new ByteArrayInputStream(decode); + ObjectInputStream ois = new ObjectInputStream(bais)) { + Object o = ois.readObject(); + assertThat(o).isInstanceOf(RestorableState.class); + RestorableState restorableState = (RestorableState) o; + assertThat(o).isInstanceOf(BlobWriteChannel.StateImpl.class); + BlobWriteChannel.StateImpl state = (BlobWriteChannel.StateImpl) restorableState; + WriteChannel restore = state.restore(); + assertThat(restore).isInstanceOf(BlobWriteChannelV2.class); + RestorableState capture = restore.capture(); + assertThat(capture).isInstanceOf(BlobWriteChannelV2State.class); + } + } + + @Test + public void composeRequest() throws IOException, ClassNotFoundException { + + Properties properties = new Properties(); + try (InputStream is = + SerializationTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/storage/composeRequest.ser.properties")) { + properties.load(is); + } + String b64bytes = properties.getProperty("b64bytes"); + assertThat(b64bytes).isNotEmpty(); + + byte[] decode = Base64.getDecoder().decode(b64bytes); + try (ByteArrayInputStream bais = new ByteArrayInputStream(decode); + ObjectInputStream ois = new ObjectInputStream(bais)) { + Object o = ois.readObject(); + assertThat(o).isInstanceOf(ComposeRequest.class); + ComposeRequest composeRequest = (ComposeRequest) o; + assertThat(composeRequest.getSourceBlobs()).hasSize(4); + assertThat(composeRequest.getTarget().getBucket()).isEqualTo("buck"); + assertThat(composeRequest.getTarget().getName()).isEqualTo("comp"); + assertThat(composeRequest.getTargetOptions()) + .containsExactly(BlobTargetOption.doesNotExist()); + assertThat(composeRequest.getTargetOpts()) + .isEqualTo(Opts.from(UnifiedOpts.generationMatch(0))); + } + } + + /** + * Here we override the super classes implementation to remove the "assertNotSame". + * + *

We should not enforce that two instances are not the same. As long as they're equal and have + * the same hashCode that should be sufficient. + */ + @Test + @Override + public void testSerializableObjects() throws Exception { + for (Serializable obj : firstNonNull(serializableObjects(), new Serializable[0])) { + Object copy = serializeAndDeserialize(obj); + assertEquals(obj, obj); + assertEquals(obj, copy); + assertEquals(obj.hashCode(), copy.hashCode()); + assertEquals(obj.toString(), copy.toString()); + assertEquals(copy, copy); + } + } + + @Test + public void blobWriteSessionConfig_pcu() throws IOException, ClassNotFoundException { + ParallelCompositeUploadBlobWriteSessionConfig pcu1 = + BlobWriteSessionConfigs.parallelCompositeUpload(); + ParallelCompositeUploadBlobWriteSessionConfig pcu1copy = serializeAndDeserialize(pcu1); + assertThat(pcu1copy).isNotNull(); + + ParallelCompositeUploadBlobWriteSessionConfig pcu2 = + BlobWriteSessionConfigs.parallelCompositeUpload() + .withBufferAllocationStrategy(BufferAllocationStrategy.fixedPool(1, 3)) + .withPartCleanupStrategy(PartCleanupStrategy.never()) + .withPartNamingStrategy(PartNamingStrategy.prefix("prefix")) + .withExecutorSupplier(ExecutorSupplier.fixedPool(5)) + .withPartMetadataFieldDecorator( + PartMetadataFieldDecorator.setCustomTimeInFuture(Duration.ofMinutes(10))); + ParallelCompositeUploadBlobWriteSessionConfig pcu2copy = serializeAndDeserialize(pcu2); + assertThat(pcu2copy).isNotNull(); + + PartMetadataFieldDecorator noop = PartMetadataFieldDecorator.noOp(); + PartMetadataFieldDecorator noopCopy = serializeAndDeserialize(noop); + assertThat(noopCopy).isSameInstanceAs(noop); + + InvalidClassException invalidClassException = + assertThrows( + InvalidClassException.class, + () -> { + Executor executor = command -> {}; + ParallelCompositeUploadBlobWriteSessionConfig pcu3 = + BlobWriteSessionConfigs.parallelCompositeUpload() + .withExecutorSupplier(ExecutorSupplier.useExecutor(executor)); + // executor is not serializable, this should throw an exception + serializeAndDeserialize(pcu3); + }); + + assertThat(invalidClassException) + .hasMessageThat() + .isEqualTo( + "com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig$ExecutorSupplier$SuppliedExecutorSupplier;" + + " Not serializable"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ServiceAccountTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ServiceAccountTest.java new file mode 100644 index 000000000000..321e64d60bf2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ServiceAccountTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class ServiceAccountTest { + + private static final ServiceAccount SERVICE_ACCOUNT = ServiceAccount.of("email"); + + @Test + public void testOf() { + compareServiceAccount(SERVICE_ACCOUNT, ServiceAccount.of("email")); + } + + @Test + public void testToAndFromPb() { + compareServiceAccount( + SERVICE_ACCOUNT, + Conversions.json() + .serviceAccount() + .decode(Conversions.json().serviceAccount().encode(SERVICE_ACCOUNT))); + } + + public void compareServiceAccount(ServiceAccount expected, ServiceAccount value) { + assertEquals(expected, value); + assertEquals(expected.getEmail(), value.getEmail()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SignatureInfoTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SignatureInfoTest.java new file mode 100644 index 000000000000..711a84d041fe --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/SignatureInfoTest.java @@ -0,0 +1,138 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.SignatureInfo.Builder; +import com.google.common.hash.Hashing; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; + +public class SignatureInfoTest { + + private static final String RESOURCE = "/bucketName/blobName"; + + @Test(expected = IllegalArgumentException.class) + public void requireHttpVerb() { + + new SignatureInfo.Builder(null, 0L, URI.create(RESOURCE)).build(); + } + + @Test(expected = IllegalArgumentException.class) + public void requireResource() { + + new SignatureInfo.Builder(HttpMethod.GET, 0L, null).build(); + } + + @Test + public void constructUnsignedPayload() { + + Builder builder = new SignatureInfo.Builder(HttpMethod.PUT, 0L, URI.create(RESOURCE)); + + String unsignedPayload = builder.build().constructUnsignedPayload(); + + assertEquals("PUT\n\n\n0\n" + RESOURCE, unsignedPayload); + } + + @Test + public void constructUnsignedPayloadWithExtensionHeaders() { + + Builder builder = new SignatureInfo.Builder(HttpMethod.PUT, 0L, URI.create(RESOURCE)); + + Map extensionHeaders = new HashMap<>(); + extensionHeaders.put("x-goog-acl", "public-read"); + extensionHeaders.put("x-goog-meta-owner", "myself"); + + builder.setCanonicalizedExtensionHeaders(extensionHeaders); + + String unsignedPayload = builder.build().constructUnsignedPayload(); + + String rawPayload = "PUT\n\n\n0\nx-goog-acl:public-read\nx-goog-meta-owner:myself\n" + RESOURCE; + + assertEquals(rawPayload, unsignedPayload); + } + + @Test + public void constructV4UnsignedPayload() { + Builder builder = new SignatureInfo.Builder(HttpMethod.PUT, 10L, URI.create(RESOURCE)); + + builder.setSignatureVersion(Storage.SignUrlOption.SignatureVersion.V4); + builder.setAccountEmail("me@google.com"); + builder.setTimestamp(1000000000000L); + + String unsignedPayload = builder.build().constructUnsignedPayload(); + + assertTrue( + unsignedPayload.startsWith( + "GOOG4-RSA-SHA256\n" + "20010909T014640Z\n" + "20010909/auto/storage/goog4_request\n")); + } + + @Test + public void constructV4QueryString() { + Builder builder = new SignatureInfo.Builder(HttpMethod.PUT, 10L, URI.create(RESOURCE)); + + builder.setSignatureVersion(Storage.SignUrlOption.SignatureVersion.V4); + builder.setAccountEmail("me@google.com"); + builder.setTimestamp(1000000000000L); + + String queryString = builder.build().constructV4QueryString(); + assertEquals( + "X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=me%40google.com%2F20010909%2F" + + "auto%2Fstorage%2Fgoog4_request&X-Goog-Date=20010909T014640Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host", + queryString); + } + + @Test + public void constructV4UnsignedPayloadWithContentSha256Header() { + Builder builder = new SignatureInfo.Builder(HttpMethod.PUT, 10L, URI.create(RESOURCE)); + builder.setSignatureVersion(Storage.SignUrlOption.SignatureVersion.V4); + builder.setAccountEmail("me@google.com"); + builder.setTimestamp(1000000000000L); + + Map extensionHeaders = new HashMap<>(); + // Add the header with a lowercase key, which triggers the bug. + String contentSha256 = "sha256"; + extensionHeaders.put("X-goog-content-sha256", contentSha256); + builder.setCanonicalizedExtensionHeaders(extensionHeaders); + + // This is the payload hash that SHOULD be generated + String correctCanonicalRequest = + "PUT\n" + + "/bucketName/blobName\n" + + "X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=me%40google.com%2F20010909%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20010909T014640Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host%3Bx-goog-content-sha256\n" + + "host:storage.googleapis.com\n" + + "x-goog-content-sha256:" + + contentSha256 + + "\n" + + "\n" + + "host;x-goog-content-sha256\n" + + contentSha256; + String expectedPayloadHash = + Hashing.sha256().hashString(correctCanonicalRequest, StandardCharsets.UTF_8).toString(); + + String unsignedPayload = builder.build().constructUnsignedPayload(); + String[] parts = unsignedPayload.split("\n"); + String generatedPayloadHash = parts[parts.length - 1]; + + assertEquals(expectedPayloadHash, generatedPayloadHash); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java new file mode 100644 index 000000000000..5c6d87c164c8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java @@ -0,0 +1,270 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageChannelUtils.blockingEmptyTo; +import static com.google.cloud.storage.StorageChannelUtils.blockingFillFrom; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public final class StorageChannelUtilsTest { + + @Test + public void emptyTo_fullyConsumed() throws Exception { + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger writeInvocationCount = new AtomicInteger(0); + int written = + blockingEmptyTo( + buf, + new SimpleWritableByteChannel() { + @Override + public int write(ByteBuffer src) { + int i = writeInvocationCount.getAndIncrement(); + if (i % 2 == 0) { + return 0; + } else { + src.get(); + return 1; + } + } + }); + assertAll( + () -> assertThat(written).isEqualTo(16), + () -> assertThat(writeInvocationCount.get()).isEqualTo(32), + () -> assertThat(buf.hasRemaining()).isFalse()); + } + + @Test + public void emptyTo_errorPropagated() throws Exception { + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger writeInvocationCount = new AtomicInteger(0); + IOException ioException = + assertThrows( + IOException.class, + () -> + blockingEmptyTo( + buf, + new SimpleWritableByteChannel() { + @Override + public int write(ByteBuffer src) throws IOException { + int i = writeInvocationCount.incrementAndGet(); + if (i == 0) { + return 0; + } else if (i == 3) { + throw new IOException("boom boom"); + } else { + src.get(); + return 1; + } + } + })); + assertAll( + () -> assertThat(ioException).hasMessageThat().isEqualTo("boom boom"), + () -> assertThat(writeInvocationCount.get()).isEqualTo(3), + () -> assertThat(buf.position()).isEqualTo(2)); + } + + @Test + public void fillFrom_fullyConsumed_dstGtEq_data() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(32); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + if (!data.hasRemaining()) { + return -1; + } else { + dst.put(data.get()); + return 1; + } + } + }); + assertAll( + () -> assertThat(read).isEqualTo(16), + () -> assertThat(readInvocationCount.get()).isEqualTo(16 + 1), // + 1 to read EOF + () -> assertThat(data.hasRemaining()).isFalse(), + () -> assertThat(buf.position()).isEqualTo(16)); + } + + @Test + public void fillFrom_fullyConsumed_dstLt_data() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(8); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + if (!data.hasRemaining()) { + return -1; + } else { + dst.put(data.get()); + return 1; + } + } + }); + assertAll( + () -> assertThat(read).isEqualTo(8), + () -> assertThat(readInvocationCount.get()).isEqualTo(8), + () -> assertThat(data.hasRemaining()).isTrue(), + () -> assertThat(buf.position()).isEqualTo(8)); + } + + @Test + public void fillFrom_eofPropagated() throws Exception { + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(8); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + return -1; + } + }); + assertAll( + () -> assertThat(read).isEqualTo(-1), + () -> assertThat(readInvocationCount.get()).isEqualTo(1), + () -> assertThat(buf.position()).isEqualTo(0)); + } + + @Test + public void fillFrom_errorPropagated() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(32); + IOException ioException = + assertThrows( + IOException.class, + () -> + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) throws IOException { + int i = readInvocationCount.incrementAndGet(); + if (i == 0) { + return 0; + } else if (i == 3) { + throw new IOException("boom boom"); + } else { + dst.put(data.get()); + return 1; + } + } + })); + assertAll( + () -> assertThat(ioException).hasMessageThat().isEqualTo("boom boom"), + () -> assertThat(readInvocationCount.get()).isEqualTo(3), + () -> assertThat(buf.position()).isEqualTo(2), + () -> assertThat(buf.position()).isEqualTo(2)); + } + + @Test + public void fillFrom_handles_0SizeRead_someBytesRead() throws Exception { + byte[] bytes = new byte[14]; + ByteBuffer buf = ByteBuffer.wrap(bytes); + + byte[] expected = + new byte[] { + (byte) 'A', + (byte) 'B', + (byte) 'C', + (byte) 'A', + (byte) 'B', + (byte) 'A', + (byte) 'A', + (byte) 'A', + (byte) 'B', + (byte) 'A', + (byte) 'B', + (byte) 'C', + (byte) 0, + (byte) 0 + }; + + int[] acceptSequence = new int[] {3, 2, 1, 0, 0, 1, 2, 3}; + AtomicInteger readCount = new AtomicInteger(0); + + int filled = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + int i = readCount.getAndIncrement(); + if (i == acceptSequence.length) { + return -1; + } + int bytesToRead = acceptSequence[i]; + if (bytesToRead > 0) { + long copy = + Buffers.copy( + DataGenerator.base64Characters().genByteBuffer(bytesToRead), dst); + assertThat(copy).isEqualTo(bytesToRead); + } + + return bytesToRead; + } + }); + + assertAll( + () -> assertThat(filled).isEqualTo(12), + () -> assertThat(xxd(bytes)).isEqualTo(xxd(expected))); + } + + private abstract static class SimpleWritableByteChannel implements WritableByteChannel { + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } + + private abstract static class SimpleReadableByteChannel implements ReadableByteChannel { + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageDataClientTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageDataClientTest.java new file mode 100644 index 000000000000..c3ec75fd7d7c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageDataClientTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.ReadRange; +import java.time.Duration; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public final class StorageDataClientTest { + + @Mock public ScheduledExecutorService exec; + + @Test + public void readSession_requestWithRangeRead_noAllowed() throws Exception { + try (StorageDataClient dc = + StorageDataClient.create(exec, Duration.ofSeconds(2), null, null, IOAutoCloseable.noOp())) { + assertThrows( + IllegalArgumentException.class, + () -> { + BidiReadObjectRequest req = + BidiReadObjectRequest.newBuilder() + .addReadRanges(ReadRange.newBuilder().setReadId(1)) + .build(); + dc.readSession(req, GrpcCallContext.createDefault()); + }); + } + } + + @Test + public void executorServiceProvidedShouldBeClosed() throws Exception { + assertThat(exec).isNotNull(); + StorageDataClient sdc = + StorageDataClient.create(exec, Duration.ofSeconds(2), null, null, IOAutoCloseable.noOp()); + + sdc.close(); + verify(exec, times(1)).shutdownNow(); + verify(exec, times(1)).awaitTermination(TimeUnit.SECONDS.toNanos(2), TimeUnit.NANOSECONDS); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageExceptionGrpcCompatibilityTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageExceptionGrpcCompatibilityTest.java new file mode 100644 index 000000000000..106e59449344 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageExceptionGrpcCompatibilityTest.java @@ -0,0 +1,279 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.cloud.BaseServiceException; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Any; +import com.google.protobuf.TextFormat; +import com.google.protobuf.TextFormat.Printer; +import com.google.rpc.BadRequest; +import com.google.rpc.BadRequest.FieldViolation; +import com.google.rpc.DebugInfo; +import com.google.rpc.ErrorInfo; +import com.google.rpc.Help; +import com.google.rpc.Help.Link; +import com.google.rpc.LocalizedMessage; +import com.google.rpc.PreconditionFailure; +import com.google.rpc.QuotaFailure; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.util.List; +import org.junit.Test; + +public final class StorageExceptionGrpcCompatibilityTest { + + @Test + public void testCoalesce_CANCELLED() { + doTestCoalesce(0, Code.CANCELLED); + } + + @Test + public void testCoalesce_UNKNOWN() { + doTestCoalesce(0, Code.UNKNOWN); + } + + @Test + public void testCoalesce_INVALID_ARGUMENT() { + doTestCoalesce(400, Code.INVALID_ARGUMENT); + } + + @Test + public void testCoalesce_DEADLINE_EXCEEDED() { + doTestCoalesce(504, Code.DEADLINE_EXCEEDED); + } + + @Test + public void testCoalesce_NOT_FOUND() { + doTestCoalesce(404, Code.NOT_FOUND); + } + + @Test + public void testCoalesce_ALREADY_EXISTS() { + doTestCoalesce(409, Code.ALREADY_EXISTS); + } + + @Test + public void testCoalesce_PERMISSION_DENIED() { + doTestCoalesce(403, Code.PERMISSION_DENIED); + } + + @Test + public void testCoalesce_RESOURCE_EXHAUSTED() { + doTestCoalesce(429, Code.RESOURCE_EXHAUSTED); + } + + @Test + public void testCoalesce_FAILED_PRECONDITION() { + doTestCoalesce(412, Code.FAILED_PRECONDITION); + } + + @Test + public void testCoalesce_ABORTED() { + doTestCoalesce(409, Code.ABORTED); + } + + @Test + public void testCoalesce_OUT_OF_RANGE() { + doTestCoalesce(400, Code.OUT_OF_RANGE); + } + + @Test + public void testCoalesce_UNIMPLEMENTED() { + doTestCoalesce(501, Code.UNIMPLEMENTED); + } + + @Test + public void testCoalesce_INTERNAL() { + doTestCoalesce(500, Code.INTERNAL); + } + + @Test + public void testCoalesce_UNAVAILABLE() { + doTestCoalesce(503, Code.UNAVAILABLE); + } + + @Test + public void testCoalesce_DATA_LOSS() { + doTestCoalesce(400, Code.DATA_LOSS); + } + + @Test + public void testCoalesce_UNAUTHENTICATED() { + doTestCoalesce(401, Code.UNAUTHENTICATED); + } + + @Test + public void apiExceptionErrorDetails() throws Exception { + ErrorInfo errorInfo = + ErrorInfo.newBuilder() + .setReason("STACKOUT") + .setDomain("spanner.googlepais.com") + .putMetadata("availableRegions", "us-central1,us-east2") + .build(); + DebugInfo debugInfo = + DebugInfo.newBuilder() + .addStackEntries("HEAD") + .addStackEntries("HEAD~1") + .addStackEntries("HEAD~2") + .addStackEntries("HEAD~3") + .setDetail("some detail") + .build(); + QuotaFailure quotaFailure = + QuotaFailure.newBuilder() + .addViolations( + QuotaFailure.Violation.newBuilder() + .setSubject("clientip:127.0.3.3") + .setDescription("Daily limit") + .build()) + .build(); + PreconditionFailure preconditionFailure = + PreconditionFailure.newBuilder() + .addViolations( + PreconditionFailure.Violation.newBuilder() + .setType("TOS") + .setSubject("google.com/cloud") + .setDescription("Terms of service not accepted") + .build()) + .build(); + BadRequest badRequest = + BadRequest.newBuilder() + .addFieldViolations( + FieldViolation.newBuilder() + .setField("email_addresses[3].type[2]") + .setDescription("duplicate value 'WORK'") + .setReason("INVALID_EMAIL_ADDRESS_TYPE") + .setLocalizedMessage( + LocalizedMessage.newBuilder() + .setLocale("en-US") + .setMessage("Invalid email type: duplicate value") + .build()) + .build()) + .build(); + Help help = + Help.newBuilder() + .addLinks( + Link.newBuilder().setDescription("link1").setUrl("https://google.com").build()) + .build(); + List errors = + ImmutableList.of( + Any.pack(errorInfo), + Any.pack(debugInfo), + Any.pack(quotaFailure), + Any.pack(preconditionFailure), + Any.pack(badRequest), + Any.pack(help)); + ErrorDetails errorDetails = ErrorDetails.builder().setRawErrorMessages(errors).build(); + ApiException ae = + ApiExceptionFactory.createException( + Code.OUT_OF_RANGE.toStatus().asRuntimeException(), + GrpcStatusCode.of(Code.OUT_OF_RANGE), + false, + errorDetails); + + BaseServiceException se = StorageException.coalesce(ae); + String message = se.getCause().getSuppressed()[0].getMessage(); + Printer printer = TextFormat.printer(); + assertAll( + () -> assertThat(message).contains("ErrorDetails {"), + () -> assertThat(message).contains(printer.shortDebugString(errorInfo)), + () -> assertThat(message).contains(printer.shortDebugString(debugInfo)), + () -> assertThat(message).contains(printer.shortDebugString(quotaFailure)), + () -> assertThat(message).contains(printer.shortDebugString(preconditionFailure)), + () -> assertThat(message).contains(printer.shortDebugString(badRequest)), + () -> assertThat(message).contains(printer.shortDebugString(help)), + () -> assertThat(message).contains("\t}")); + } + + @SuppressWarnings("ThrowableNotThrown") + @Test + public void apiExceptionErrorDetails_onlyAttachedOnce() throws Exception { + Help help = + Help.newBuilder() + .addLinks( + Link.newBuilder().setDescription("link1").setUrl("https://google.com").build()) + .build(); + List errors = ImmutableList.of(Any.pack(help)); + ErrorDetails errorDetails = ErrorDetails.builder().setRawErrorMessages(errors).build(); + + ApiException ex = + ApiExceptionFactory.createException( + Code.OUT_OF_RANGE.toStatus().asRuntimeException(), + GrpcStatusCode.of(Code.OUT_OF_RANGE), + false, + errorDetails); + + // apply a coalesce to the exception -- similar to what a retry algorithm might do to determine + // retryability. This is not ideal, as it is unpure but it is the way things are today with the + // structure of storage exception and ApiException. + BaseServiceException ignore1 = StorageException.coalesce(ex); + BaseServiceException se = StorageException.coalesce(ex); + + String message = TestUtils.messagesToText(se); + Printer printer = TextFormat.printer(); + assertAll( + () -> assertThat(message).contains("ErrorDetails {"), + () -> assertThat(message).contains(printer.shortDebugString(help)), + () -> assertThat(message).contains("\t}"), + () -> { + // make sure the error details are only attached to the exception once + String str = "ErrorDetails {"; + int indexOf1 = message.indexOf(str); + int indexOf2 = message.indexOf(str, indexOf1 + str.length()); + assertThat(indexOf2).isEqualTo(-1); + }); + } + + private void doTestCoalesce(int expectedCode, Code code) { + Status status = code.toStatus(); + GrpcStatusCode statusCode = GrpcStatusCode.of(code); + ErrorInfo errorInfo = + ErrorInfo.newBuilder() + .setReason("reason") + .setDomain("global") + .putMetadata("errors", "x") + .build(); + + DebugInfo debugInfo = + DebugInfo.newBuilder() + .setDetail( + "bw-storage-dev-region-fine@default-223119.iam.gserviceaccount.com does not have" + + " storage.hmacKeys.list access to the Google Cloud project.") + .build(); + + ImmutableList anys = ImmutableList.of(Any.pack(errorInfo), Any.pack(debugInfo)); + ErrorDetails errorDetails = ErrorDetails.builder().setRawErrorMessages(anys).build(); + + StatusRuntimeException cause = + new StatusRuntimeException(status.withDescription(debugInfo.getDetail())); + ApiException x = ApiExceptionFactory.createException(cause, statusCode, false, errorDetails); + + BaseServiceException ex = StorageException.coalesce(x); + assertThat(ex.getCode()).isEqualTo(expectedCode); + assertThat(ex.getReason()).isEqualTo(x.getReason()); + assertThat(ex.getMessage()).contains(x.getErrorDetails().getDebugInfo().getDetail()); + assertThat(ex).hasCauseThat().isEqualTo(x); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java new file mode 100644 index 000000000000..ec9dfe9e7460 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java @@ -0,0 +1,1041 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +import com.google.api.core.ApiClock; +import com.google.api.gax.paging.Page; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.ServiceOptions; +import com.google.cloud.Tuple; +import com.google.cloud.storage.spi.StorageRpcFactory; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.io.BaseEncoding; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.math.BigInteger; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.Key; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.spec.EncodedKeySpec; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.PKCS8EncodedKeySpec; +import java.security.spec.X509EncodedKeySpec; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +public class StorageImplMockitoTest { + + private static final String BUCKET_NAME1 = "b1"; + private static final String BUCKET_NAME2 = "b2"; + private static final String BUCKET_NAME3 = "b3"; + private static final String BLOB_NAME1 = "n1"; + private static final String BLOB_NAME2 = "n2"; + private static final String BLOB_NAME3 = "n3"; + private static final byte[] BLOB_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final byte[] BLOB_SUB_CONTENT = {0xE, 0xA}; + private static final String CONTENT_MD5 = "O1R4G1HJSDUISJjoIYmVhQ=="; + private static final String CONTENT_CRC32C = "9N3EPQ=="; + private static final String SUB_CONTENT_MD5 = "5e7c7CdasUiOn3BO560jPg=="; + private static final String SUB_CONTENT_CRC32C = "bljNYA=="; + private static final int DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024; + private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + private static final Key KEY = + new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256"); + private static final String KMS_KEY_NAME = + "projects/gcloud-devel/locations/us/keyRings/gcs_kms_key_ring_us/cryptoKeys/key"; + private static final Long RETENTION_PERIOD = 10L; + private static final int DEFAULT_BUFFER_SIZE = 15 * 1024 * 1024; + private static final int MIN_BUFFER_SIZE = 256 * 1024; + protected static final long PAGE_SIZE = 42L; + // BucketInfo objects + private static final BucketInfo BUCKET_INFO1 = + BucketInfo.newBuilder(BUCKET_NAME1).setMetageneration(PAGE_SIZE).build(); + private static final BucketInfo BUCKET_INFO2 = BucketInfo.newBuilder(BUCKET_NAME2).build(); + private static final BucketInfo BUCKET_INFO3 = + BucketInfo.newBuilder(BUCKET_NAME3) + .setRetentionPeriod(RETENTION_PERIOD) + .setRetentionPolicyIsLocked(true) + .setMetageneration(PAGE_SIZE) + .build(); + + // BlobInfo objects + private static final BlobInfo BLOB_INFO1 = + BlobInfo.newBuilder(BUCKET_NAME1, BLOB_NAME1, 24L) + .setMetageneration(PAGE_SIZE) + .setContentType("application/json") + .build(); + private static final BlobInfo BLOB_INFO2 = BlobInfo.newBuilder(BUCKET_NAME1, BLOB_NAME2).build(); + private static final BlobInfo BLOB_INFO3 = BlobInfo.newBuilder(BUCKET_NAME1, BLOB_NAME3).build(); + + private static final BlobInfo BLOB_INFO_WITH_HASHES = + BLOB_INFO1.toBuilder().setCrc32c(CONTENT_CRC32C).build(); + private static final BlobInfo BLOB_INFO_WITHOUT_HASHES = + BLOB_INFO1.toBuilder().setCrc32c(null).build(); + + // Empty StorageRpc options + private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); + private static final Map BLOB_INFO1_RPC_OPTIONS_WITH_GENERATION = + ImmutableMap.of(StorageRpc.Option.IF_GENERATION_MATCH, 24L); + + // Bucket target options + private static final Storage.BucketTargetOption BUCKET_TARGET_METAGENERATION = + Storage.BucketTargetOption.metagenerationMatch(); + private static final Storage.BucketTargetOption BUCKET_TARGET_PREDEFINED_ACL = + Storage.BucketTargetOption.predefinedAcl(Storage.PredefinedAcl.PRIVATE); + private static final Map BUCKET_TARGET_OPTIONS = + ImmutableMap.of( + StorageRpc.Option.IF_METAGENERATION_MATCH, BUCKET_INFO1.getMetageneration(), + StorageRpc.Option.PREDEFINED_ACL, Storage.PredefinedAcl.PRIVATE.getEntry()); + + private static final Storage.BlobTargetOption BLOB_TARGET_METAGENERATION = + Storage.BlobTargetOption.metagenerationMatch(); + private static final Storage.BlobTargetOption BLOB_TARGET_DISABLE_GZIP_CONTENT = + Storage.BlobTargetOption.disableGzipContent(); + private static final Storage.BlobTargetOption BLOB_TARGET_NOT_EXIST = + Storage.BlobTargetOption.doesNotExist(); + private static final Storage.BlobTargetOption BLOB_TARGET_PREDEFINED_ACL = + Storage.BlobTargetOption.predefinedAcl(Storage.PredefinedAcl.PRIVATE); + private static final Map BLOB_TARGET_OPTIONS_CREATE = + ImmutableMap.of( + StorageRpc.Option.IF_METAGENERATION_MATCH, BLOB_INFO1.getMetageneration(), + StorageRpc.Option.IF_GENERATION_MATCH, 0L, + StorageRpc.Option.PREDEFINED_ACL, Storage.PredefinedAcl.PRIVATE.getEntry()); + private static final Map BLOB_TARGET_OPTIONS_CREATE_DISABLE_GZIP_CONTENT = + ImmutableMap.of(StorageRpc.Option.IF_DISABLE_GZIP_CONTENT, true); + + // Blob write options (create, writer) + private static final Storage.BlobWriteOption BLOB_WRITE_METAGENERATION = + Storage.BlobWriteOption.metagenerationMatch(); + private static final Storage.BlobWriteOption BLOB_WRITE_NOT_EXIST = + Storage.BlobWriteOption.doesNotExist(); + private static final Storage.BlobWriteOption BLOB_WRITE_PREDEFINED_ACL = + Storage.BlobWriteOption.predefinedAcl(Storage.PredefinedAcl.PRIVATE); + private static final Storage.BlobWriteOption BLOB_WRITE_MD5_HASH = + Storage.BlobWriteOption.md5Match(); + private static final Storage.BlobWriteOption BLOB_WRITE_CRC2C = + Storage.BlobWriteOption.crc32cMatch(); + + // Bucket get/source options + private static final Storage.BucketGetOption BUCKET_GET_METAGENERATION = + Storage.BucketGetOption.metagenerationMatch(BUCKET_INFO1.getMetageneration()); + private static final Storage.BucketGetOption BUCKET_GET_FIELDS = + Storage.BucketGetOption.fields(Storage.BucketField.LOCATION, Storage.BucketField.ACL); + private static final Storage.BucketGetOption BUCKET_GET_EMPTY_FIELDS = + Storage.BucketGetOption.fields(); + private static final Map BUCKET_GET_OPTIONS = + ImmutableMap.of(StorageRpc.Option.IF_METAGENERATION_MATCH, BUCKET_INFO1.getMetageneration()); + + // Blob get/source options + private static final Storage.BlobGetOption BLOB_GET_METAGENERATION = + Storage.BlobGetOption.metagenerationMatch(BLOB_INFO1.getMetageneration()); + private static final Storage.BlobGetOption BLOB_GET_GENERATION = + Storage.BlobGetOption.generationMatch(BLOB_INFO1.getGeneration()); + private static final Storage.BlobGetOption BLOB_GET_GENERATION_FROM_BLOB_ID = + Storage.BlobGetOption.generationMatch(); + private static final Storage.BlobGetOption BLOB_GET_FIELDS = + Storage.BlobGetOption.fields(Storage.BlobField.CONTENT_TYPE, Storage.BlobField.CRC32C); + private static final Storage.BlobGetOption BLOB_GET_EMPTY_FIELDS = Storage.BlobGetOption.fields(); + private static final Map BLOB_GET_OPTIONS = + ImmutableMap.of( + StorageRpc.Option.IF_METAGENERATION_MATCH, BLOB_INFO1.getMetageneration(), + StorageRpc.Option.IF_GENERATION_MATCH, BLOB_INFO1.getGeneration()); + private static final Storage.BlobSourceOption BLOB_SOURCE_METAGENERATION = + Storage.BlobSourceOption.metagenerationMatch(BLOB_INFO1.getMetageneration()); + private static final Storage.BlobSourceOption BLOB_SOURCE_GENERATION = + Storage.BlobSourceOption.generationMatch(BLOB_INFO1.getGeneration()); + private static final Storage.BlobSourceOption BLOB_SOURCE_GENERATION_FROM_BLOB_ID = + Storage.BlobSourceOption.generationMatch(); + private static final Map BLOB_SOURCE_OPTIONS = + ImmutableMap.of( + StorageRpc.Option.IF_METAGENERATION_MATCH, BLOB_INFO1.getMetageneration(), + StorageRpc.Option.IF_GENERATION_MATCH, BLOB_INFO1.getGeneration()); + + // Bucket list options + private static final Storage.BucketListOption BUCKET_LIST_PAGE_SIZE = + Storage.BucketListOption.pageSize(PAGE_SIZE); + private static final Storage.BucketListOption BUCKET_LIST_PREFIX = + Storage.BucketListOption.prefix("prefix"); + private static final Storage.BucketListOption BUCKET_LIST_FIELDS = + Storage.BucketListOption.fields(Storage.BucketField.LOCATION, Storage.BucketField.ACL); + private static final Storage.BucketListOption BUCKET_LIST_EMPTY_FIELDS = + Storage.BucketListOption.fields(); + private static final Map BUCKET_LIST_OPTIONS = + ImmutableMap.of(StorageRpc.Option.MAX_RESULTS, PAGE_SIZE, StorageRpc.Option.PREFIX, "prefix"); + private static final Map BUCKET_LIST_PARTIAL_SUCCESS_OPTION = + ImmutableMap.of(StorageRpc.Option.RETURN_PARTIAL_SUCCESS, true); + + // Blob list options + private static final Storage.BlobListOption BLOB_LIST_PAGE_SIZE = + Storage.BlobListOption.pageSize(PAGE_SIZE); + private static final Storage.BlobListOption BLOB_LIST_PREFIX = + Storage.BlobListOption.prefix("prefix"); + private static final Storage.BlobListOption BLOB_LIST_FIELDS = + Storage.BlobListOption.fields(Storage.BlobField.CONTENT_TYPE, Storage.BlobField.MD5HASH); + private static final Storage.BlobListOption BLOB_LIST_VERSIONS = + Storage.BlobListOption.versions(false); + private static final Storage.BlobListOption BLOB_LIST_EMPTY_FIELDS = + Storage.BlobListOption.fields(); + private static final Map BLOB_LIST_OPTIONS = + ImmutableMap.of( + StorageRpc.Option.MAX_RESULTS, + PAGE_SIZE, + StorageRpc.Option.PREFIX, + "prefix", + StorageRpc.Option.VERSIONS, + false); + + // Customer supplied encryption key options + private static final Map ENCRYPTION_KEY_OPTIONS = + ImmutableMap.of(StorageRpc.Option.CUSTOMER_SUPPLIED_KEY, BASE64_KEY); + + // Customer managed encryption key options + private static final Map KMS_KEY_NAME_OPTIONS = + ImmutableMap.of(StorageRpc.Option.KMS_KEY_NAME, KMS_KEY_NAME); + + private static final String PRIVATE_KEY_STRING = + "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoG" + + "BAL2xolH1zrISQ8+GzOV29BNjjzq4/HIP8Psd1+cZb81vDklSF+95wB250MSE0BDc81pvIMwj5OmIfLg1NY6uB" + + "1xavOPpVdx1z664AGc/BEJ1zInXGXaQ6s+SxGenVq40Yws57gikQGMZjttpf1Qbz4DjkxsbRoeaRHn06n9pH1e" + + "jAgMBAAECgYEAkWcm0AJF5LMhbWKbjkxm/LG06UNApkHX6vTOOOODkonM/qDBnhvKCj8Tan+PaU2j7679Cd19q" + + "xCm4SBQJET7eBhqLD9L2j9y0h2YUQnLbISaqUS1/EXcr2C1Lf9VCEn1y/GYuDYqs85rGoQ4ZYfM9ClROSq86fH" + + "+cbIIssqJqukCQQD18LjfJz/ichFeli5/l1jaFid2XoCH3T6TVuuysszVx68fh60gSIxEF/0X2xB+wuPxTP4IQ" + + "+t8tD/ktd232oWXAkEAxXPych2QBHePk9/lek4tOkKBgfnDzex7S/pI0G1vpB3VmzBbCsokn9lpOv7JV8071GD" + + "lW/7R6jlLfpQy3hN31QJAE10osSk99m5Uv8XDU3hvHnywDrnSFOBulNs7I47AYfSe7TSZhPkxUgsxejddTR27J" + + "LyTI8N1PxRSE4feNSOXcQJAMMKJRJT4U6IS2rmXubREhvaVdLtxFxEnAYQ1JwNfZm/XqBMw6GEy2iaeTetNXVl" + + "ZRQEIoscyn1y2v/No/F5iYQJBAKBOGASoQcBjGTOg/H/SfcE8QVNsKEpthRrs6CkpT80aZ/AV+ksfoIf2zw2M3" + + "mAHfrO+TBLdz4sicuFQvlN9SEc="; + + private static final String PUBLIC_KEY_STRING = + "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9saJR9c6y" + + "EkPPhszldvQTY486uPxyD/D7HdfnGW/Nbw5JUhfvecAdudDEhNAQ3PNabyDMI+TpiHy4NTWOrgdcWrzj6VXcdc" + + "+uuABnPwRCdcyJ1xl2kOrPksRnp1auNGMLOe4IpEBjGY7baX9UG8+A45MbG0aHmkR59Op/aR9XowIDAQAB"; + + private static final ApiClock TIME_SOURCE = + new ApiClock() { + @Override + public long nanoTime() { + return 42_000_000_000L; + } + + @Override + public long millisTime() { + return 42_000L; + } + }; + + // Notification + private static final String ETAG = "0xFF00"; + private static final String GENERATED_ID = "B/N:1"; + private static final String SELF_LINK = "http://storage/b/n"; + private static final Notification.EventType[] EVENT_TYPES = { + Notification.EventType.OBJECT_FINALIZE, Notification.EventType.OBJECT_METADATA_UPDATE + }; + private static final String OBJECT_NAME_PREFIX = "index.html"; + private static final Notification.PayloadFormat PAYLOAD_FORMAT = + Notification.PayloadFormat.JSON_API_V1.JSON_API_V1; + private static final String TOPIC = "projects/myProject/topics/topic1"; + private static final Map CUSTOM_ATTRIBUTES = ImmutableMap.of("label1", "value1"); + private static final NotificationInfo NOTIFICATION_INFO_01 = + NotificationInfo.newBuilder(TOPIC) + .setEtag(ETAG) + .setCustomAttributes(CUSTOM_ATTRIBUTES) + .setSelfLink(SELF_LINK) + .setEventTypes(EVENT_TYPES) + .setObjectNamePrefix(OBJECT_NAME_PREFIX) + .setPayloadFormat(PAYLOAD_FORMAT) + .build(); + private static final NotificationInfo NOTIFICATION_INFO_02 = + NotificationInfo.newBuilder(TOPIC) + .setEtag(ETAG) + .setCustomAttributes(CUSTOM_ATTRIBUTES) + .setSelfLink(SELF_LINK) + .setEventTypes(EVENT_TYPES) + .setObjectNamePrefix(OBJECT_NAME_PREFIX) + .setPayloadFormat(PAYLOAD_FORMAT) + .build(); + + private static PrivateKey privateKey; + private static PublicKey publicKey; + + private StorageOptions options; + private StorageRpcFactory rpcFactoryMock; + private StorageRpc storageRpcMock; + private Storage storage; + private com.google.api.services.storage.Storage apiary; + + private Blob expectedBlob1, expectedBlob2, expectedBlob3, expectedUpdated; + private Bucket expectedBucket1, expectedBucket2, expectedBucket3; + + @BeforeClass + public static void beforeClass() throws NoSuchAlgorithmException, InvalidKeySpecException { + KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + EncodedKeySpec privateKeySpec = + new PKCS8EncodedKeySpec(BaseEncoding.base64().decode(PRIVATE_KEY_STRING)); + privateKey = keyFactory.generatePrivate(privateKeySpec); + EncodedKeySpec publicKeySpec = + new X509EncodedKeySpec(BaseEncoding.base64().decode(PUBLIC_KEY_STRING)); + publicKey = keyFactory.generatePublic(publicKeySpec); + } + + private static final RuntimeException STORAGE_FAILURE = + new RuntimeException("Something went wrong"); + + private static final RuntimeException UNEXPECTED_CALL_EXCEPTION = + new RuntimeException("Unexpected call"); + private static final Answer UNEXPECTED_CALL_ANSWER = + invocation -> { + throw new IllegalArgumentException( + "Unexpected call of " + + invocation.getMethod() + + " with " + + Arrays.toString(invocation.getArguments())); + }; + + @Before + public void setUp() { + rpcFactoryMock = mock(StorageRpcFactory.class, UNEXPECTED_CALL_ANSWER); + storageRpcMock = mock(StorageRpc.class, UNEXPECTED_CALL_ANSWER); + apiary = mock(com.google.api.services.storage.Storage.class, UNEXPECTED_CALL_ANSWER); + doReturn(storageRpcMock).when(rpcFactoryMock).create(Mockito.any(StorageOptions.class)); + doReturn(apiary).when(storageRpcMock).getStorage(); + options = + StorageOptions.http() + .setProjectId("projectId") + .setClock(TIME_SOURCE) + .setServiceRpcFactory(rpcFactoryMock) + .setRetrySettings(ServiceOptions.getNoRetrySettings()) + .build(); + } + + private void initializeService() { + storage = options.getService(); + initializeServiceDependentObjects(); + } + + private void initializeServiceDependentObjects() { + expectedBlob1 = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO1)); + expectedBlob2 = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO2)); + expectedBlob3 = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO3)); + expectedBucket1 = new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO1)); + expectedBucket2 = new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO2)); + expectedBucket3 = new Bucket(storage, new BucketInfo.BuilderImpl(BUCKET_INFO3)); + expectedUpdated = null; + } + + @Test + public void testGetOptions() { + initializeService(); + assertSame(options, storage.getOptions()); + } + + @Test + public void testCreateBucket() { + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .create(Conversions.json().bucketInfo().encode(BUCKET_INFO1), EMPTY_RPC_OPTIONS); + initializeService(); + Bucket bucket = storage.create(BUCKET_INFO1); + assertEquals(expectedBucket1, bucket); + } + + @Test + public void testCreateBucketWithOptions() { + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .create(Conversions.json().bucketInfo().encode(BUCKET_INFO1), BUCKET_TARGET_OPTIONS); + initializeService(); + Bucket bucket = + storage.create(BUCKET_INFO1, BUCKET_TARGET_METAGENERATION, BUCKET_TARGET_PREDEFINED_ACL); + assertEquals(expectedBucket1, bucket); + } + + @Test + public void testCreateBucketFailure() { + doThrow(STORAGE_FAILURE) + .when(storageRpcMock) + .create(Conversions.json().bucketInfo().encode(BUCKET_INFO1), EMPTY_RPC_OPTIONS); + initializeService(); + try { + storage.create(BUCKET_INFO1); + fail(); + } catch (StorageException e) { + assertEquals(STORAGE_FAILURE, e.getCause()); + } + } + + @Test + public void testGetBucket() { + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Conversions.json().bucketInfo().encode(BucketInfo.of(BUCKET_NAME1)), EMPTY_RPC_OPTIONS); + initializeService(); + Bucket bucket = storage.get(BUCKET_NAME1); + assertEquals(expectedBucket1, bucket); + } + + @Test + public void testGetBucketWithOptions() { + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Conversions.json().bucketInfo().encode(BucketInfo.of(BUCKET_NAME1)), + BUCKET_GET_OPTIONS); + initializeService(); + Bucket bucket = storage.get(BUCKET_NAME1, BUCKET_GET_METAGENERATION); + assertEquals(expectedBucket1, bucket); + } + + @Test + public void testGetBucketWithSelectedFields() { + ArgumentCaptor> capturedOptions = + ArgumentCaptor.forClass(Map.class); + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Mockito.eq(Conversions.json().bucketInfo().encode(BucketInfo.of(BUCKET_NAME1))), + capturedOptions.capture()); + initializeService(); + Bucket bucket = storage.get(BUCKET_NAME1, BUCKET_GET_METAGENERATION, BUCKET_GET_FIELDS); + assertEquals( + BUCKET_INFO1.getMetageneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_METAGENERATION_MATCH)); + String selector = (String) capturedOptions.getValue().get(StorageRpc.Option.FIELDS); + assertTrue(selector.contains("name")); + assertTrue(selector.contains("location")); + assertTrue(selector.contains("acl")); + assertEquals(17, selector.length()); + assertEquals(BUCKET_INFO1.getName(), bucket.getName()); + } + + @Test + public void testGetBucketWithEmptyFields() { + ArgumentCaptor> capturedOptions = + ArgumentCaptor.forClass(Map.class); + doReturn(Conversions.json().bucketInfo().encode(BUCKET_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Mockito.eq(Conversions.json().bucketInfo().encode(BucketInfo.of(BUCKET_NAME1))), + capturedOptions.capture()); + initializeService(); + Bucket bucket = storage.get(BUCKET_NAME1, BUCKET_GET_METAGENERATION, BUCKET_GET_EMPTY_FIELDS); + assertEquals( + BUCKET_INFO1.getMetageneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_METAGENERATION_MATCH)); + String selector = (String) capturedOptions.getValue().get(StorageRpc.Option.FIELDS); + assertTrue(selector.contains("name")); + assertEquals(4, selector.length()); + assertEquals(BUCKET_INFO1.getName(), bucket.getName()); + } + + @Test + public void testGetBucketFailure() { + doThrow(STORAGE_FAILURE) + .when(storageRpcMock) + .get( + Conversions.json().bucketInfo().encode(BucketInfo.of(BUCKET_NAME1)), EMPTY_RPC_OPTIONS); + initializeService(); + try { + storage.get(BUCKET_NAME1); + fail(); + } catch (StorageException e) { + assertEquals(STORAGE_FAILURE, e.getCause()); + } + } + + @Test + public void testGetBlob() { + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Conversions.json().blobId().encode(BlobId.of(BUCKET_NAME1, BLOB_NAME1)), + EMPTY_RPC_OPTIONS); + initializeService(); + Blob blob = storage.get(BUCKET_NAME1, BLOB_NAME1); + assertEquals(expectedBlob1, blob); + } + + @Test + public void testGetBlobWithOptions() { + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Conversions.json().blobId().encode(BlobId.of(BUCKET_NAME1, BLOB_NAME1)), + BLOB_GET_OPTIONS); + initializeService(); + Blob blob = storage.get(BUCKET_NAME1, BLOB_NAME1, BLOB_GET_METAGENERATION, BLOB_GET_GENERATION); + assertEquals(expectedBlob1, blob); + } + + @Test + public void testGetBlobWithOptionsFromBlobId() { + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get(Conversions.json().blobId().encode(BLOB_INFO1.getBlobId()), BLOB_GET_OPTIONS); + initializeService(); + Blob blob = + storage.get( + BLOB_INFO1.getBlobId(), BLOB_GET_METAGENERATION, BLOB_GET_GENERATION_FROM_BLOB_ID); + assertEquals(expectedBlob1, blob); + } + + @Test + public void testGetBlobWithSelectedFields() { + ArgumentCaptor> capturedOptions = + ArgumentCaptor.forClass(Map.class); + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Mockito.eq(Conversions.json().blobId().encode(BlobId.of(BUCKET_NAME1, BLOB_NAME1))), + capturedOptions.capture()); + initializeService(); + Blob blob = + storage.get( + BUCKET_NAME1, + BLOB_NAME1, + BLOB_GET_METAGENERATION, + BLOB_GET_GENERATION, + BLOB_GET_FIELDS); + assertEquals( + BLOB_INFO1.getMetageneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_METAGENERATION_MATCH)); + assertEquals( + BLOB_INFO1.getGeneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_GENERATION_MATCH)); + String selector = (String) capturedOptions.getValue().get(StorageRpc.Option.FIELDS); + assertTrue(selector.contains("bucket")); + assertTrue(selector.contains("name")); + assertTrue(selector.contains("contentType")); + assertTrue(selector.contains("crc32c")); + assertEquals(30, selector.length()); + assertEquals(expectedBlob1, blob); + } + + @Test + public void testGetBlobWithEmptyFields() { + ArgumentCaptor> capturedOptions = + ArgumentCaptor.forClass(Map.class); + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .get( + Mockito.eq(Conversions.json().blobId().encode(BlobId.of(BUCKET_NAME1, BLOB_NAME1))), + capturedOptions.capture()); + initializeService(); + Blob blob = + storage.get( + BUCKET_NAME1, + BLOB_NAME1, + BLOB_GET_METAGENERATION, + BLOB_GET_GENERATION, + BLOB_GET_EMPTY_FIELDS); + assertEquals( + BLOB_INFO1.getMetageneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_METAGENERATION_MATCH)); + assertEquals( + BLOB_INFO1.getGeneration(), + capturedOptions.getValue().get(StorageRpc.Option.IF_GENERATION_MATCH)); + String selector = (String) capturedOptions.getValue().get(StorageRpc.Option.FIELDS); + assertTrue(selector.contains("bucket")); + assertTrue(selector.contains("name")); + assertEquals(11, selector.length()); + assertEquals(expectedBlob1, blob); + } + + @Test + public void testGetBlobFailure() { + doThrow(STORAGE_FAILURE) + .when(storageRpcMock) + .get( + Conversions.json().blobId().encode(BlobId.of(BUCKET_NAME1, BLOB_NAME1)), + EMPTY_RPC_OPTIONS); + initializeService(); + try { + storage.get(BUCKET_NAME1, BLOB_NAME1); + fail(); + } catch (StorageException e) { + assertEquals(STORAGE_FAILURE, e.getCause()); + } + } + + private void verifyCreateBlobCapturedStream(ArgumentCaptor capturedStream) + throws IOException { + ByteArrayInputStream byteStream = capturedStream.getValue(); + byte[] streamBytes = new byte[BLOB_CONTENT.length]; + assertEquals(BLOB_CONTENT.length, byteStream.read(streamBytes)); + assertArrayEquals(BLOB_CONTENT, streamBytes); + assertEquals(-1, byteStream.read(streamBytes)); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobFromStream() throws IOException { + ArgumentCaptor capturedStream = + ArgumentCaptor.forClass(ByteArrayInputStream.class); + + ByteArrayInputStream fileStream = new ByteArrayInputStream(BLOB_CONTENT); + + // verify that md5 and crc32c are cleared if present when calling create + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .create( + Mockito.eq(Conversions.json().blobInfo().encode(BLOB_INFO_WITHOUT_HASHES)), + capturedStream.capture(), + Mockito.eq(EMPTY_RPC_OPTIONS)); + initializeService(); + + Blob blob = storage.create(BLOB_INFO_WITH_HASHES, fileStream); + + assertEquals(expectedBlob1, blob); + verifyCreateBlobCapturedStream(capturedStream); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobFromStreamDisableGzipContent() throws IOException { + ArgumentCaptor capturedStream = + ArgumentCaptor.forClass(ByteArrayInputStream.class); + + // verify that md5 and crc32c are cleared if present when calling create + ByteArrayInputStream fileStream = new ByteArrayInputStream(BLOB_CONTENT); + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .create( + Mockito.eq(Conversions.json().blobInfo().encode(BLOB_INFO_WITHOUT_HASHES)), + capturedStream.capture(), + Mockito.eq(BLOB_TARGET_OPTIONS_CREATE_DISABLE_GZIP_CONTENT)); + initializeService(); + + Blob blob = + storage.create( + BLOB_INFO_WITH_HASHES, fileStream, Storage.BlobWriteOption.disableGzipContent()); + + assertEquals(expectedBlob1, blob); + verifyCreateBlobCapturedStream(capturedStream); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobFromStreamWithEncryptionKey() throws IOException { + ByteArrayInputStream fileStream = new ByteArrayInputStream(BLOB_CONTENT); + + // verify that md5 and crc32c are cleared if present when calling create + doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doReturn(Conversions.json().blobInfo().encode(BLOB_INFO1)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .create( + Conversions.json().blobInfo().encode(BLOB_INFO_WITHOUT_HASHES), + fileStream, + ENCRYPTION_KEY_OPTIONS); + initializeService(); + Blob blob = + storage.create( + BLOB_INFO_WITH_HASHES, fileStream, Storage.BlobWriteOption.encryptionKey(BASE64_KEY)); + assertEquals(expectedBlob1, blob); + blob = + storage.create( + BLOB_INFO_WITH_HASHES, fileStream, Storage.BlobWriteOption.encryptionKey(BASE64_KEY)); + assertEquals(expectedBlob1, blob); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobFromStreamRetryableException() throws IOException { + + ByteArrayInputStream fileStream = new ByteArrayInputStream(BLOB_CONTENT); + + // verify that md5 and crc32c are cleared if present when calling create + Exception internalErrorException = new StorageException(500, "internalError"); + doThrow(internalErrorException) + .when(storageRpcMock) + .create( + Conversions.json().blobInfo().encode(BLOB_INFO_WITHOUT_HASHES), + fileStream, + EMPTY_RPC_OPTIONS); + + storage = + options.toBuilder() + .setRetrySettings(ServiceOptions.getDefaultRetrySettings()) + .build() + .getService(); + + // Even though this exception is retryable, storage.create(BlobInfo, InputStream) + // shouldn't retry. + try { + storage.create(BLOB_INFO_WITH_HASHES, fileStream); + fail(); + } catch (StorageException ex) { + assertSame(internalErrorException, ex); + } + } + + @Test + public void testCreateFromDirectory() throws IOException { + initializeService(); + Path dir = Files.createTempDirectory("unit_"); + try { + storage.createFrom(BLOB_INFO1, dir); + fail(); + } catch (StorageException e) { + assertEquals(dir + " is a directory", e.getMessage()); + } + } + + private BlobInfo initializeUpload(byte[] bytes) { + return initializeUpload(bytes, DEFAULT_BUFFER_SIZE, EMPTY_RPC_OPTIONS); + } + + private BlobInfo initializeUpload(byte[] bytes, int bufferSize) { + return initializeUpload(bytes, bufferSize, EMPTY_RPC_OPTIONS); + } + + private BlobInfo initializeUpload( + byte[] bytes, int bufferSize, Map rpcOptions) { + String uploadId = "upload-id"; + byte[] buffer = new byte[bufferSize]; + System.arraycopy(bytes, 0, buffer, 0, bytes.length); + BlobInfo blobInfo = BLOB_INFO1.toBuilder().setMd5(null).setCrc32c(null).build(); + StorageObject storageObject = new StorageObject(); + storageObject.setBucket(BLOB_INFO1.getBucket()); + storageObject.setName(BLOB_INFO1.getName()); + storageObject.setSize(BigInteger.valueOf(bytes.length)); + doReturn(uploadId) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .open(Conversions.json().blobInfo().encode(blobInfo), rpcOptions); + + doReturn(storageObject) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .writeWithResponse(uploadId, buffer, 0, 0L, bytes.length, true); + + initializeService(); + BlobInfo info = Conversions.json().blobInfo().decode(storageObject); + expectedUpdated = info.asBlob(storage); + return blobInfo; + } + + @Test + public void testListBuckets() { + String cursor = "cursor"; + ImmutableList bucketInfoList = ImmutableList.of(BUCKET_INFO1, BUCKET_INFO2); + Tuple> result = + Tuple.of( + cursor, Iterables.transform(bucketInfoList, Conversions.json().bucketInfo()::encode)); + + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(EMPTY_RPC_OPTIONS); + + initializeService(); + ImmutableList bucketList = ImmutableList.of(expectedBucket1, expectedBucket2); + Page page = storage.list(); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(bucketList.toArray(), Iterables.toArray(page.getValues(), Bucket.class)); + } + + @Test + public void testListBucketsEmpty() { + doReturn(Tuple.>of(null, null)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(EMPTY_RPC_OPTIONS); + + initializeService(); + Page page = storage.list(); + assertNull(page.getNextPageToken()); + assertArrayEquals( + ImmutableList.of().toArray(), Iterables.toArray(page.getValues(), Bucket.class)); + } + + @Test + public void testListBucketsWithOptions() { + String cursor = "cursor"; + ImmutableList bucketInfoList = ImmutableList.of(BUCKET_INFO1, BUCKET_INFO2); + Tuple> result = + Tuple.of( + cursor, Iterables.transform(bucketInfoList, Conversions.json().bucketInfo()::encode)); + + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_LIST_OPTIONS); + + initializeService(); + ImmutableList bucketList = ImmutableList.of(expectedBucket1, expectedBucket2); + Page page = storage.list(BUCKET_LIST_PAGE_SIZE, BUCKET_LIST_PREFIX); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(bucketList.toArray(), Iterables.toArray(page.getValues(), Bucket.class)); + } + + @Test + public void testListBucketsWithException() { + doThrow(STORAGE_FAILURE).when(storageRpcMock).list(EMPTY_RPC_OPTIONS); + initializeService(); + try { + storage.list(); + fail(); + } catch (StorageException e) { + assertEquals(STORAGE_FAILURE.getMessage(), e.getMessage()); + } + } + + @Test + public void testListBlobs() { + String cursor = "cursor"; + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, EMPTY_RPC_OPTIONS); + + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = storage.list(BUCKET_NAME1); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsEmpty() { + doReturn( + Tuple.>of( + null, null)) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, EMPTY_RPC_OPTIONS); + + initializeService(); + Page page = storage.list(BUCKET_NAME1); + assertNull(page.getNextPageToken()); + assertArrayEquals( + ImmutableList.of().toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsWithOptions() { + String cursor = "cursor"; + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, BLOB_LIST_OPTIONS); + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = + storage.list(BUCKET_NAME1, BLOB_LIST_PAGE_SIZE, BLOB_LIST_PREFIX, BLOB_LIST_VERSIONS); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsCurrentDirectory() { + String cursor = "cursor"; + Map options = ImmutableMap.of(StorageRpc.Option.DELIMITER, "/"); + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, options); + + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = storage.list(BUCKET_NAME1, Storage.BlobListOption.currentDirectory()); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsDelimiter() { + String cursor = "cursor"; + String delimiter = "/"; + Map options = ImmutableMap.of(StorageRpc.Option.DELIMITER, delimiter); + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, options); + + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = storage.list(BUCKET_NAME1, Storage.BlobListOption.delimiter(delimiter)); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsWithOffset() { + String cursor = "cursor"; + String startOffset = "startOffset"; + String endOffset = "endOffset"; + Map options = + ImmutableMap.of( + StorageRpc.Option.START_OFF_SET, startOffset, StorageRpc.Option.END_OFF_SET, endOffset); + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, options); + + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = + storage.list( + BUCKET_NAME1, + Storage.BlobListOption.startOffset(startOffset), + Storage.BlobListOption.endOffset(endOffset)); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsMatchGlob() { + String cursor = "cursor"; + String matchGlob = "foo*bar"; + Map options = ImmutableMap.of(StorageRpc.Option.MATCH_GLOB, matchGlob); + ImmutableList blobInfoList = ImmutableList.of(BLOB_INFO1, BLOB_INFO2); + Tuple> result = + Tuple.of(cursor, Iterables.transform(blobInfoList, Conversions.json().blobInfo()::encode)); + doReturn(result) + .doThrow(UNEXPECTED_CALL_EXCEPTION) + .when(storageRpcMock) + .list(BUCKET_NAME1, options); + + initializeService(); + ImmutableList blobList = ImmutableList.of(expectedBlob1, expectedBlob2); + Page page = storage.list(BUCKET_NAME1, Storage.BlobListOption.matchGlob(matchGlob)); + assertEquals(cursor, page.getNextPageToken()); + assertArrayEquals(blobList.toArray(), Iterables.toArray(page.getValues(), Blob.class)); + } + + @Test + public void testListBlobsWithException() { + doThrow(STORAGE_FAILURE).when(storageRpcMock).list(BUCKET_NAME1, EMPTY_RPC_OPTIONS); + initializeService(); + try { + storage.list(BUCKET_NAME1); + fail(); + } catch (StorageException e) { + assertEquals(STORAGE_FAILURE.getMessage(), e.getMessage()); + } + } + + @Test + public void testCreateNotification() { + doReturn(Conversions.json().notificationInfo().encode(NOTIFICATION_INFO_01)) + .when(storageRpcMock) + .createNotification( + BUCKET_NAME1, Conversions.json().notificationInfo().encode(NOTIFICATION_INFO_01)); + initializeService(); + Notification notification = storage.createNotification(BUCKET_NAME1, NOTIFICATION_INFO_01); + verifyBucketNotification(notification); + } + + @Test + public void testGetNotification() { + doReturn(Conversions.json().notificationInfo().encode(NOTIFICATION_INFO_01)) + .when(storageRpcMock) + .getNotification(BUCKET_NAME1, GENERATED_ID); + initializeService(); + Notification notification = storage.getNotification(BUCKET_NAME1, GENERATED_ID); + verifyBucketNotification(notification); + } + + @Test + public void testListNotification() { + doReturn( + Arrays.asList( + Conversions.json().notificationInfo().encode(NOTIFICATION_INFO_01), + Conversions.json().notificationInfo().encode(NOTIFICATION_INFO_02))) + .when(storageRpcMock) + .listNotifications(BUCKET_NAME1); + initializeService(); + List notifications = storage.listNotifications(BUCKET_NAME1); + assertEquals(2, notifications.size()); + verifyBucketNotification(notifications.get(0)); + verifyBucketNotification(notifications.get(1)); + } + + @Test + public void testDeleteNotification() { + doReturn(true).when(storageRpcMock).deleteNotification(BUCKET_NAME1, GENERATED_ID); + initializeService(); + Boolean isDeleted = storage.deleteNotification(BUCKET_NAME1, GENERATED_ID); + assertEquals(isDeleted, Boolean.TRUE); + } + + private void verifyBucketNotification(Notification value) { + assertNull(value.getNotificationId()); + assertEquals(CUSTOM_ATTRIBUTES, value.getCustomAttributes()); + assertEquals(ETAG, value.getEtag()); + assertEquals(SELF_LINK, value.getSelfLink()); + assertEquals(OBJECT_NAME_PREFIX, value.getObjectNamePrefix()); + assertEquals(PAYLOAD_FORMAT.name(), value.getPayloadFormat().name()); + assertEquals(TOPIC, value.getTopic()); + assertEquals(Arrays.asList(EVENT_TYPES), value.getEventTypes()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageOptionsBuilderTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageOptionsBuilderTest.java new file mode 100644 index 000000000000..4601a3b2e8df --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageOptionsBuilderTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.BufferAllocationStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import org.junit.Before; +import org.junit.Test; + +public final class StorageOptionsBuilderTest { + + private MyStorageRetryStrategy storageRetryStrategy; + private BlobWriteSessionConfig bwsc; + + @Before + public void setUp() throws Exception { + storageRetryStrategy = new MyStorageRetryStrategy(); + bwsc = + BlobWriteSessionConfigs.parallelCompositeUpload() + .withBufferAllocationStrategy(BufferAllocationStrategy.simple(256 * 1024)) + .withPartNamingStrategy(PartNamingStrategy.prefix("blahblahblah")) + .withExecutorSupplier(ExecutorSupplier.fixedPool(2)); + } + + @Test + public void http() throws Exception { + HttpStorageOptions base = + HttpStorageOptions.http() + .setStorageRetryStrategy(storageRetryStrategy) + .setBlobWriteSessionConfig(bwsc) + .build(); + + HttpStorageOptions rebuilt = base.toBuilder().build(); + assertAll( + () -> assertThat(rebuilt).isEqualTo(base), + () -> assertThat(rebuilt.hashCode()).isEqualTo(base.hashCode())); + } + + @Test + public void grpc() throws Exception { + GrpcStorageOptions base = + GrpcStorageOptions.grpc() + .setStorageRetryStrategy(storageRetryStrategy) + .setBlobWriteSessionConfig(bwsc) + .build(); + + GrpcStorageOptions rebuilt = base.toBuilder().build(); + assertAll( + () -> assertThat(rebuilt).isEqualTo(base), + () -> assertThat(rebuilt.hashCode()).isEqualTo(base.hashCode())); + } + + private static class MyStorageRetryStrategy implements StorageRetryStrategy { + + @Override + public ResultRetryAlgorithm getIdempotentHandler() { + return null; + } + + @Override + public ResultRetryAlgorithm getNonidempotentHandler() { + return null; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageV2ProtoUtilsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageV2ProtoUtilsTest.java new file mode 100644 index 000000000000..a6ccc8ffee5b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageV2ProtoUtilsTest.java @@ -0,0 +1,111 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteRangeSpec.relativeLength; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.StorageV2ProtoUtils.seekReadObjectRequest; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.jqwik.StorageArbitraries; +import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ReadObjectRequest; +import java.util.function.Predicate; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; + +public final class StorageV2ProtoUtilsTest { + + @Example + void objectAclEntityIdOrAltEq() { + String entity = "project-viewer-123123"; + Predicate p = StorageV2ProtoUtils.objectAclEntityOrAltEq(entity); + + ObjectAccessControl inAlt = + ObjectAccessControl.newBuilder().setEntity("something").setEntityAlt(entity).build(); + ObjectAccessControl inPrimary = + ObjectAccessControl.newBuilder().setEntity(entity).setEntityAlt("something-else").build(); + + assertThat(p.test(inAlt)).isTrue(); + assertThat(p.test(inPrimary)).isTrue(); + } + + @Property(tries = 100_000) + void seek(@ForAll("seekCases") SeekCase srr) { + Long offset = srr.offset; + Long limit = srr.limit; + ReadObjectRequest seek = seekReadObjectRequest(srr.req, relativeLength(offset, limit)); + + // If both offset and limit are null, avoid allocating a new instance as we don't have any + // meaningful change to apply + if (offset == null && limit == null) { + assertThat(seek).isSameInstanceAs(srr.req); + } else { + if (offset != null && offset != 0) { + assertThat(seek.getReadOffset()).isEqualTo(offset); + } + if (limit != null && limit != Long.MAX_VALUE) { + assertThat(seek.getReadLimit()).isEqualTo(limit); + } + } + } + + @Provide("seekCases") + Arbitrary arbitrarySeekCase() { + return Combinators.combine( + StorageArbitraries.objects().name(), + Arbitraries.longs().greaterOrEqual(0).injectNull(0.6), + Arbitraries.longs().greaterOrEqual(0).injectNull(0.6), + Arbitraries.longs().greaterOrEqual(0).injectNull(0.3), + Arbitraries.longs().greaterOrEqual(0).injectNull(0.3)) + .as(SeekCase::of); + } + + private static final class SeekCase { + private final ReadObjectRequest req; + private final Long offset; + private final Long limit; + + public SeekCase(ReadObjectRequest req, Long offset, Long limit) { + this.req = req; + this.offset = offset; + this.limit = limit; + } + + @Override + public String toString() { + return "SeekCase{" + "req=" + fmtProto(req) + ", offset=" + offset + ", limit=" + limit + '}'; + } + + private static SeekCase of( + String name, Long embedOffset, Long embedLimit, Long offset, Long limit) { + ReadObjectRequest.Builder b = ReadObjectRequest.newBuilder().setObject(name); + if (embedOffset != null) { + b.setReadOffset(embedOffset); + } + if (embedLimit != null) { + b.setReadLimit(embedLimit); + } + return new SeekCase(b.build(), offset, limit); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestApiClock.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestApiClock.java new file mode 100644 index 000000000000..d96be753cde2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestApiClock.java @@ -0,0 +1,161 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.ApiClock; +import com.google.common.base.MoreObjects; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import java.util.function.LongUnaryOperator; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** "Test" {@link ApiClock} that allows control of time advancement and by how much. */ +final class TestApiClock implements ApiClock { + + private static final long NANOS_PER_MILLI = TimeUnit.MILLISECONDS.toNanos(1); + private final long beginNs; + private final LongUnaryOperator tick; + + @Nullable private LongUnaryOperator next; + private long prevNs; + + private TestApiClock(long beginNs, LongUnaryOperator tick) { + this.beginNs = beginNs; + this.tick = tick; + this.prevNs = beginNs; + } + + @Override + public long nanoTime() { + final long ret; + if (next != null) { + ret = next.applyAsLong(prevNs); + next = null; + } else { + ret = tick.applyAsLong(prevNs); + } + prevNs = ret; + return ret; + } + + @Override + public long millisTime() { + return nanoTime() / NANOS_PER_MILLI; + } + + public void advance(long nanos) { + next = addExact(nanos); + } + + public void advance(Duration d) { + advance(d.toNanos()); + } + + public void advance(LongUnaryOperator op) { + if (next == null) { + next = op; + } else { + next = next.andThen(op); + } + } + + public void reset() { + prevNs = beginNs; + next = null; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("beginNs", Duration.ofNanos(beginNs)) + .add("prevNs", Duration.ofNanos(prevNs)) + .add("tick", tick) + .add("next", next) + .toString(); + } + + public static TestApiClock tickBy(long begin, Duration d) { + return of(begin, addExact(d)); + } + + public static TestApiClock of() { + return of(0L, addExact(1L)); + } + + /** + * @param tick Given the previous nanoseconds of the clock generate the new nanoseconds + */ + public static TestApiClock of(long beginNs, LongUnaryOperator tick) { + return new TestApiClock(beginNs, tick); + } + + static LongUnaryOperator addExact(Duration amountToAdd) { + return new AddExact(amountToAdd.toNanos()); + } + + static LongUnaryOperator addExact(long amountToAdd) { + return new AddExact(amountToAdd); + } + + private static final class AddExact implements LongUnaryOperator { + private final long amountToAdd; + + private AddExact(long amountToAdd) { + this.amountToAdd = amountToAdd; + } + + @Override + public long applyAsLong(long operand) { + return Math.addExact(operand, amountToAdd); + } + + @Override + @NonNull + public LongUnaryOperator andThen(@NonNull LongUnaryOperator after) { + return new AndThen(after); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("amountToAdd", Duration.ofNanos(amountToAdd)) + .toString(); + } + + private final class AndThen implements LongUnaryOperator { + private final LongUnaryOperator then; + + private AndThen(LongUnaryOperator then) { + this.then = then; + } + + @Override + public long applyAsLong(long operand) { + return then.applyAsLong(AddExact.this.applyAsLong(operand)); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("before", AddExact.this) + .add("then", then) + .toString(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestClock.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestClock.java new file mode 100644 index 000000000000..89d04c35ccdb --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestClock.java @@ -0,0 +1,62 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; +import java.util.function.UnaryOperator; + +final class TestClock extends Clock { + + private final Instant begin; + private final UnaryOperator next; + + private Instant now; + + private TestClock(Instant begin, UnaryOperator next) { + this.begin = begin; + this.next = next; + this.now = begin; + } + + @Override + public ZoneId getZone() { + return ZoneId.of("Z"); + } + + @Override + public Clock withZone(ZoneId zone) { + throw new UnsupportedOperationException("TestClock.withZone()"); + } + + @Override + public Instant instant() { + Instant ret = now; + now = next.apply(now); + return ret; + } + + public static TestClock tickBy(Instant begin, Duration d) { + return of(begin, i -> i.plus(d)); + } + + public static TestClock of(Instant begin, UnaryOperator next) { + return new TestClock(begin, next); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestScheduledExecutorService.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestScheduledExecutorService.java new file mode 100644 index 000000000000..01294f41261a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestScheduledExecutorService.java @@ -0,0 +1,175 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.time.Duration; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.Delayed; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * "Test" {@link ScheduledExecutorService} that integrated with {@link TestApiClock} to provide + * "instant" time advancement for any invocation of {@link #schedule(Runnable, long, TimeUnit)} + * + *

All other methods will throw {@link UnsupportedOperationException} if invoked. + */ +final class TestScheduledExecutorService implements ScheduledExecutorService { + + private final TestApiClock clock; + + TestScheduledExecutorService(TestApiClock clock) { + this.clock = clock; + } + + @Override + public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + Duration nanos = Duration.ofNanos(unit.toNanos(delay)); + clock.advance(nanos); + command.run(); + return new ScheduledFuture() { + @Override + public long getDelay(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public int compareTo(Delayed o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCancelled() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public Object get() throws InterruptedException, ExecutionException { + return null; + } + + @Override + public Object get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public ScheduledFuture scheduleAtFixedRate( + Runnable command, long initialDelay, long period, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public ScheduledFuture scheduleWithFixedDelay( + Runnable command, long initialDelay, long delay, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public void shutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public List shutdownNow() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isShutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isTerminated() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Callable task) { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Runnable task, T result) { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Runnable task) { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll(Collection> tasks) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll( + Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks) + throws InterruptedException, ExecutionException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + throw new UnsupportedOperationException(); + } + + @Override + public void execute(Runnable command) { + throw new UnsupportedOperationException(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java new file mode 100644 index 000000000000..5231b8fe002d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java @@ -0,0 +1,426 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.NanoClock; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.http.BaseHttpServiceException; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.Retrying.DefaultRetrier; +import com.google.cloud.storage.Retrying.HttpRetrier; +import com.google.cloud.storage.Retrying.Retrier; +import com.google.cloud.storage.Retrying.RetryingDependencies; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.Files; +import com.google.common.util.concurrent.UncheckedExecutionException; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.rpc.DebugInfo; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.WriteObjectRequest; +import io.grpc.Metadata; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.netty.shaded.io.netty.buffer.ByteBufUtil; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.protobuf.ProtoUtils; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import java.util.zip.GZIPOutputStream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.function.ThrowingRunnable; +import org.junit.runners.model.MultipleFailureException; + +public final class TestUtils { + + public static final Metadata.Key GRPC_STATUS_DETAILS_KEY = + Metadata.Key.of( + "grpc-status-details-bin", + ProtoUtils.metadataMarshaller(com.google.rpc.Status.getDefaultInstance())); + + private TestUtils() {} + + public static byte[] gzipBytes(byte[] bytes) { + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + try (OutputStream out = new GZIPOutputStream(byteArrayOutputStream)) { + out.write(bytes); + } catch (IOException ignore) { + // GZIPOutputStream will only throw if the underlying stream throws. + // ByteArrayOutputStream does not throw on write + } + + return byteArrayOutputStream.toByteArray(); + } + + public static ChecksummedData getChecksummedData(ByteString content) { + return ChecksummedData.newBuilder().setContent(content).build(); + } + + public static ChecksummedData getChecksummedData(ByteString content, Hasher hasher) { + ChecksummedData.Builder b = ChecksummedData.newBuilder().setContent(content); + Crc32cLengthKnown hash = hasher.hash(content.asReadOnlyByteBuffer()); + if (hash != null) { + int crc32c = hash.getValue(); + b.setCrc32C(crc32c); + } + return b.build(); + } + + public static ApiException apiException(Code code) { + return apiException(code, ""); + } + + public static ApiException apiException(Code code, String message) { + StatusRuntimeException statusRuntimeException = + code.toStatus().withDescription(message).asRuntimeException(); + DebugInfo debugInfo = DebugInfo.newBuilder().setDetail(message).build(); + ErrorDetails errorDetails = + ErrorDetails.builder().setRawErrorMessages(ImmutableList.of(Any.pack(debugInfo))).build(); + return ApiExceptionFactory.createException( + statusRuntimeException, GrpcStatusCode.of(code), true, errorDetails); + } + + public static ImmutableList subDivide(byte[] bytes, int division) { + int length = bytes.length; + int fullDivisions = length / division; + int x = division * fullDivisions; + int remaining = length - x; + + if ((fullDivisions == 1 && remaining == 0) || (fullDivisions == 0 && remaining == 1)) { + return ImmutableList.of(ByteBuffer.wrap(bytes)); + } else { + return Stream.of( + IntStream.iterate(0, i -> i + division) + .limit(fullDivisions) + .mapToObj(i -> ByteBuffer.wrap(bytes, i, division)), + Stream.of(ByteBuffer.wrap(bytes, x, remaining))) + .flatMap(Function.identity()) + .filter(Buffer::hasRemaining) + .collect(ImmutableList.toImmutableList()); + } + } + + static RetryingDependencies defaultRetryingDeps() { + return RetryingDependencies.simple( + NanoClock.getDefaultClock(), StorageOptions.getDefaultRetrySettings()); + } + + static Retrier defaultRetrier() { + return new DefaultRetrier(UnaryOperator.identity(), defaultRetryingDeps()); + } + + /** + * Search {@code t} for an instance of {@code T} either directly or via a cause + * + * @return The found instance of T or null if not found. + */ + @Nullable + public static T findThrowable(Class c, Throwable t) { + T found = null; + Throwable tmp = t; + while (tmp != null) { + if (c.isInstance(tmp)) { + found = c.cast(tmp); + break; + } else { + tmp = tmp.getCause(); + } + } + return found; + } + + public static T retry429s(Callable c, Storage storage) { + try { + return RetryHelper.runWithRetries( + c, + storage.getOptions().getRetrySettings(), + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + if (previousThrowable instanceof BaseHttpServiceException) { + BaseHttpServiceException httpException = + (BaseHttpServiceException) previousThrowable; + return httpException.getCode() == 429; + } + return false; + } + }, + storage.getOptions().getClock()); + } catch (RetryHelperException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw e; + } + } + } + + /** + * Return a function which when provided an {@code uploadId} will create a {@link + * WriteObjectRequest} with that {@code uploadId} + */ + @NonNull + public static Function onlyUploadId() { + return uId -> WriteObjectRequest.newBuilder().setUploadId(uId).build(); + } + + public static byte[] snapshotData(ByteBuffer buf) { + ByteBuffer dup = buf.duplicate(); + dup.flip(); + byte[] bytes = new byte[dup.remaining()]; + dup.get(bytes); + return bytes; + } + + public static byte[] slice(byte[] bs, int begin, int end) { + int len = end - begin; + byte[] dst = new byte[len]; + System.arraycopy(bs, begin, dst, 0, len); + return dst; + } + + public static String xxd(byte[] bytes) { + return ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(bytes)); + } + + public static String xxd(ByteBuffer bytes) { + return xxd(true, bytes); + } + + public static String xxd(ByteString bytes) { + return xxd(false, bytes.asReadOnlyByteBuffer()); + } + + public static String xxd(boolean flip, ByteBuffer bytes) { + ByteBuffer dup = bytes.duplicate(); + if (flip) dup.flip(); + return ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(dup)); + } + + public static String xxd(boolean flip, ByteBuffer[] buffers) { + ByteBuffer[] dups = + Arrays.stream(buffers) + .map(ByteBuffer::duplicate) + .peek( + byteBuffer -> { + if (flip) byteBuffer.flip(); + }) + .toArray(ByteBuffer[]::new); + return ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(dups)); + } + + public static void assertAll(ThrowingRunnable... trs) throws Exception { + List x = + Arrays.stream(trs) + .map( + tr -> { + try { + tr.run(); + return null; + } catch (Throwable e) { + return e; + } + }) + .filter(Objects::nonNull) + .collect(ImmutableList.toImmutableList()); + MultipleFailureException.assertEmpty(x); + } + + /** ImmutableMap does not allow null values, this method does */ + public static Map<@NonNull K, @Nullable V> hashMapOf(@NonNull K k1, @Nullable V v1) { + requireNonNull(k1, "k1 must be non null"); + HashMap map = new HashMap<>(); + map.put(k1, v1); + return Collections.unmodifiableMap(map); + } + + /** ImmutableMap does not allow null values, this method does */ + public static Map<@NonNull K, @Nullable V> hashMapOf( + @NonNull K k1, @Nullable V v1, @NonNull K k2, @Nullable V v2) { + requireNonNull(k1, "k1 must be non null"); + requireNonNull(k2, "k2 must be non null"); + HashMap map = new HashMap<>(); + map.put(k1, v1); + map.put(k2, v2); + return Collections.unmodifiableMap(map); + } + + // copied with minor modification from + // com.google.api.gax.grpc.InstantiatingGrpcChannelProvider#isOnComputeEngine + public static boolean isOnComputeEngine() { + String osName = System.getProperty("os.name"); + if ("Linux".equals(osName)) { + try { + String result = + Files.asCharSource(new File("/sys/class/dmi/id/product_name"), StandardCharsets.UTF_8) + .readFirstLine(); + return result != null && (result.contains("Google") || result.contains("Compute Engine")); + } catch (IOException ignored) { + return false; + } + } + return false; + } + + /** + * GRPC test methods will sometimes use all bucket fields, which can cause an error if any aren't + * supported by GRPC. todo: b/308194853 + */ + public static Storage.BucketField[] filterOutHttpOnlyBucketFields(Storage.BucketField[] fields) { + return Arrays.stream(fields) + .filter(f -> !f.equals(Storage.BucketField.OBJECT_RETENTION)) + .collect(ImmutableSet.toImmutableSet()) + .toArray(new Storage.BucketField[0]); + } + + public static Optional last(List l) { + if (l.isEmpty()) { + return Optional.empty(); + } else { + return Optional.of(l.get(l.size() - 1)); + } + } + + static String messagesToText(Throwable t) { + StringBuilder tmp = new StringBuilder(); + tmp.append(messagesToText(t, "")); + Throwable curr = t; + while ((curr = curr.getCause()) != null) { + tmp.append("\n").append(messagesToText(curr, "")); + } + return tmp.toString(); + } + + static T await(ApiFuture future, long timeout, TimeUnit unit) throws TimeoutException { + try { + return future.get(timeout, unit); + } catch (ExecutionException exception) { + if (exception.getCause() instanceof RuntimeException) { + RuntimeException cause = (RuntimeException) exception.getCause(); + cause.addSuppressed(new AsyncStorageTaskException()); + throw cause; + } + throw new UncheckedExecutionException(exception); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + static Retrier retrierFromStorageOptions(StorageOptions options) { + if (options instanceof HttpStorageOptions) { + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) options; + DefaultRetrier retrier = + new DefaultRetrier(UnaryOperator.identity(), httpStorageOptions.asRetryDependencies()); + return new HttpRetrier(retrier); + } else if (options instanceof GrpcStorageOptions) { + GrpcStorageOptions grpcStorageOptions = (GrpcStorageOptions) options; + + return new DefaultRetrier(UnaryOperator.identity(), grpcStorageOptions); + } else { + return Retrier.attemptOnce(); + } + } + + private static String messagesToText(Throwable t, String indent) { + if (t == null) { + return ""; + } + String nextIndent = indent + " "; + return Stream.of( + Stream.of(indent + t.getMessage()), + Arrays.stream(t.getSuppressed()).map(tt -> messagesToText(tt, nextIndent))) + .flatMap(s -> s) + .collect(Collectors.joining("\n")); + } + + public static void rmDashRf(Path path) throws IOException { + java.nio.file.Files.walkFileTree( + path, + new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) + throws IOException { + java.nio.file.Files.deleteIfExists(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + java.nio.file.Files.deleteIfExists(dir); + return FileVisitResult.CONTINUE; + } + }); + } + + public static StreamController nullStreamController() { + return NullStreamController.INSTANCE; + } + + static class NullStreamController implements StreamController { + private static final NullStreamController INSTANCE = new NullStreamController(); + + private NullStreamController() {} + + @Override + public void cancel() {} + + @Override + public void disableAutoInboundFlowControl() {} + + @Override + public void request(int count) {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputMovingWindowPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputMovingWindowPropertyTest.java new file mode 100644 index 000000000000..1dbcd9e5e336 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputMovingWindowPropertyTest.java @@ -0,0 +1,425 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._5TiB; +import static com.google.common.truth.Truth.assertWithMessage; +import static java.time.Instant.EPOCH; +import static java.time.Instant.ofEpochSecond; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.Tuple; +import net.jqwik.api.Tuple.Tuple1; +import net.jqwik.time.api.DateTimes; +import net.jqwik.time.api.Times; + +final class ThroughputMovingWindowPropertyTest { + + private static final double TOLERANCE = 0.001; + + @Example + void canned() { + test(CANNED_SCENARIO); + } + + @Example + void twoEntriesSameTimeDifferentThroughput() { + Duration ms = Duration.ofMillis(1); + ScenarioTimeline scenario = + new ScenarioTimeline( + ms, + ImmutableList.of( + new TimelineEntry(EPOCH, Throughput.of(1, ms), 1000.0), + new TimelineEntry(EPOCH, Throughput.of(0, ms), 1000.0))); + test(scenario); + } + + @Property + void test(@ForAll("Scenarios") ScenarioTimeline scenario) { + ThroughputMovingWindow window = ThroughputMovingWindow.of(scenario.d); + for (TimelineEntry timelineEntry : scenario.timelineEntries) { + window.add(timelineEntry.i, timelineEntry.t); + Throughput throughput = window.avg(timelineEntry.i); + assertWithMessage(timelineEntry.toString()) + .that(throughput.toBps()) + .isWithin(TOLERANCE) + .of(timelineEntry.expectedMovingAvgBytesPerSecond); + } + } + + @Provide("Scenarios") + static Arbitrary scenarioTimeline() { + return Times.durations() + .ofPrecision(ChronoUnit.MILLIS) + .between(Duration.ofMillis(1), Duration.ofMinutes(10)) + .flatMap( + d -> + Combinators.combine( + Arbitraries.just(d), + // pick an instant, then generate 1 to 100 values between it and d * 3 + DateTimes.instants() + .ofPrecision(ChronoUnit.MILLIS) + .flatMap( + i -> + DateTimes.instants() + .ofPrecision(ChronoUnit.MILLIS) + .between(i, i.plus(d.multipliedBy(3))) + .flatMap( + ii -> + Combinators.combine( + Arbitraries.just(ii), throughput()) + .as(Tuple::of)) + .list() + .ofMinSize(1) + .ofMaxSize(100))) + .as(Tuple::of)) + .map(ScenarioTimeline::create); + } + + static Arbitrary throughput() { + return Times.durations() + .ofPrecision(ChronoUnit.MILLIS) + .between(Duration.ofMillis(1), Duration.ofMinutes(10)) + .flatMap(d -> Arbitraries.longs().between(0, _5TiB).map(n -> Throughput.of(n, d))); + } + + private static final class ScenarioTimeline { + + private static final Comparator> COMP = + Comparator.comparing(Tuple1::get1); + private final Duration d; + private final List timelineEntries; + + private ScenarioTimeline(Duration d, List timelineEntries) { + this.d = d; + this.timelineEntries = timelineEntries; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("d", d) + .add("timelineEntries", timelineEntries) + .toString(); + } + + static ScenarioTimeline create( + Tuple.Tuple2>> tuples) { + + Duration d = tuples.get1(); + List> pairs = tuples.get2(); + + List> tmp = + pairs.stream().sorted(COMP).collect(Collectors.toList()); + + List>> windows = new ArrayList<>(); + int last = tmp.size() - 1; + for (int i = last; i >= 0; i--) { + List> window = new ArrayList<>(); + Tuple.Tuple2 t = tmp.get(i); + window.add(t); + Instant min = t.get1().minus(d); + for (int j = i - 1; j >= 0; j--) { + Tuple.Tuple2 r = tmp.get(j); + if (r.get1().isAfter(min)) { + window.add(r); + } + } + windows.add(ImmutableList.copyOf(window)); + } + + ImmutableList timelineEntries = + windows.stream() + .map( + w -> { + Tuple.Tuple2 max = w.get(0); + Throughput reduce = + w.stream() + .map(Tuple.Tuple2::get2) + .reduce(Throughput.zero(), Throughput::plus); + return new TimelineEntry( + max.get1(), max.get2(), Throughput.of(reduce.getNumBytes(), d).toBps()); + }) + .collect(ImmutableList.toImmutableList()); + return new ScenarioTimeline(d, timelineEntries.reverse()); + } + } + + private static final class TimelineEntry { + private final Instant i; + private final Throughput t; + private final double expectedMovingAvgBytesPerSecond; + + private TimelineEntry(Instant i, Throughput t, double expectedMovingAvgBytesPerSecond) { + this.i = i; + this.t = t; + this.expectedMovingAvgBytesPerSecond = expectedMovingAvgBytesPerSecond; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("i", i) + .add("t", t) + .add( + "tenSecMovingAvg", + String.format(Locale.US, "%,.03f", expectedMovingAvgBytesPerSecond)) + .toString(); + } + } + + private static final ScenarioTimeline CANNED_SCENARIO = + new ScenarioTimeline( + Duration.ofSeconds(10), + ImmutableList.builder() + .add(new TimelineEntry(ofEpochSecond(1), Throughput.bytesPerSecond(192), 19.2)) + .add(new TimelineEntry(ofEpochSecond(2), Throughput.bytesPerSecond(1185), 137.7)) + .add(new TimelineEntry(ofEpochSecond(3), Throughput.bytesPerSecond(1363), 274.)) + .add(new TimelineEntry(ofEpochSecond(4), Throughput.bytesPerSecond(234), 297.4)) + .add(new TimelineEntry(ofEpochSecond(5), Throughput.bytesPerSecond(1439), 441.3)) + .add(new TimelineEntry(ofEpochSecond(6), Throughput.bytesPerSecond(1269), 568.2)) + .add(new TimelineEntry(ofEpochSecond(7), Throughput.bytesPerSecond(692), 637.4)) + .add(new TimelineEntry(ofEpochSecond(8), Throughput.bytesPerSecond(667), 704.1)) + .add(new TimelineEntry(ofEpochSecond(9), Throughput.bytesPerSecond(1318), 835.9)) + .add(new TimelineEntry(ofEpochSecond(10), Throughput.bytesPerSecond(1125), 948.4)) + .add(new TimelineEntry(ofEpochSecond(11), Throughput.bytesPerSecond(1124), 1041.6)) + .add(new TimelineEntry(ofEpochSecond(12), Throughput.bytesPerSecond(3), 923.4)) + .add(new TimelineEntry(ofEpochSecond(13), Throughput.bytesPerSecond(185), 805.6)) + .add(new TimelineEntry(ofEpochSecond(14), Throughput.bytesPerSecond(726), 854.8)) + .add(new TimelineEntry(ofEpochSecond(15), Throughput.bytesPerSecond(630), 773.9)) + .add(new TimelineEntry(ofEpochSecond(16), Throughput.bytesPerSecond(874), 734.4)) + .add(new TimelineEntry(ofEpochSecond(17), Throughput.bytesPerSecond(1401), 805.3)) + .add(new TimelineEntry(ofEpochSecond(18), Throughput.bytesPerSecond(533), 791.9)) + .add(new TimelineEntry(ofEpochSecond(19), Throughput.bytesPerSecond(446), 704.7)) + .add(new TimelineEntry(ofEpochSecond(20), Throughput.bytesPerSecond(801), 672.3)) + .add(new TimelineEntry(ofEpochSecond(21), Throughput.bytesPerSecond(61), 566.)) + .add(new TimelineEntry(ofEpochSecond(22), Throughput.bytesPerSecond(1104), 676.1)) + .add(new TimelineEntry(ofEpochSecond(23), Throughput.bytesPerSecond(972), 754.8)) + .add(new TimelineEntry(ofEpochSecond(24), Throughput.bytesPerSecond(1310), 813.2)) + .add(new TimelineEntry(ofEpochSecond(25), Throughput.bytesPerSecond(408), 791.)) + .add(new TimelineEntry(ofEpochSecond(26), Throughput.bytesPerSecond(759), 779.5)) + .add(new TimelineEntry(ofEpochSecond(27), Throughput.bytesPerSecond(674), 706.8)) + .add(new TimelineEntry(ofEpochSecond(28), Throughput.bytesPerSecond(314), 684.9)) + .add(new TimelineEntry(ofEpochSecond(29), Throughput.bytesPerSecond(1311), 771.4)) + .add(new TimelineEntry(ofEpochSecond(30), Throughput.bytesPerSecond(449), 736.2)) + .add(new TimelineEntry(ofEpochSecond(31), Throughput.bytesPerSecond(1273), 857.4)) + .add(new TimelineEntry(ofEpochSecond(32), Throughput.bytesPerSecond(228), 769.8)) + .add(new TimelineEntry(ofEpochSecond(33), Throughput.bytesPerSecond(605), 733.1)) + .add(new TimelineEntry(ofEpochSecond(34), Throughput.bytesPerSecond(537), 655.8)) + .add(new TimelineEntry(ofEpochSecond(35), Throughput.bytesPerSecond(1498), 764.8)) + .add(new TimelineEntry(ofEpochSecond(36), Throughput.bytesPerSecond(694), 758.3)) + .add(new TimelineEntry(ofEpochSecond(37), Throughput.bytesPerSecond(155), 706.4)) + .add(new TimelineEntry(ofEpochSecond(38), Throughput.bytesPerSecond(983), 773.3)) + .add(new TimelineEntry(ofEpochSecond(39), Throughput.bytesPerSecond(1359), 778.1)) + .add(new TimelineEntry(ofEpochSecond(40), Throughput.bytesPerSecond(832), 816.4)) + .add(new TimelineEntry(ofEpochSecond(41), Throughput.bytesPerSecond(1041), 793.2)) + .add(new TimelineEntry(ofEpochSecond(42), Throughput.bytesPerSecond(1459), 916.3)) + .add(new TimelineEntry(ofEpochSecond(43), Throughput.bytesPerSecond(1128), 968.6)) + .add(new TimelineEntry(ofEpochSecond(44), Throughput.bytesPerSecond(1318), 1046.7)) + .add(new TimelineEntry(ofEpochSecond(45), Throughput.bytesPerSecond(620), 958.9)) + .add(new TimelineEntry(ofEpochSecond(46), Throughput.bytesPerSecond(1133), 1002.8)) + .add(new TimelineEntry(ofEpochSecond(47), Throughput.bytesPerSecond(568), 1044.1)) + .add(new TimelineEntry(ofEpochSecond(48), Throughput.bytesPerSecond(561), 1001.9)) + .add(new TimelineEntry(ofEpochSecond(49), Throughput.bytesPerSecond(1483), 1014.3)) + .add(new TimelineEntry(ofEpochSecond(50), Throughput.bytesPerSecond(1405), 1071.6)) + .add(new TimelineEntry(ofEpochSecond(51), Throughput.bytesPerSecond(435), 1011.)) + .add(new TimelineEntry(ofEpochSecond(52), Throughput.bytesPerSecond(664), 931.5)) + .add(new TimelineEntry(ofEpochSecond(53), Throughput.bytesPerSecond(1330), 951.7)) + .add(new TimelineEntry(ofEpochSecond(54), Throughput.bytesPerSecond(540), 873.9)) + .add(new TimelineEntry(ofEpochSecond(55), Throughput.bytesPerSecond(847), 896.6)) + .add(new TimelineEntry(ofEpochSecond(56), Throughput.bytesPerSecond(1231), 906.4)) + .add(new TimelineEntry(ofEpochSecond(57), Throughput.bytesPerSecond(1331), 982.7)) + .add(new TimelineEntry(ofEpochSecond(58), Throughput.bytesPerSecond(154), 942.)) + .add(new TimelineEntry(ofEpochSecond(59), Throughput.bytesPerSecond(801), 873.8)) + .add(new TimelineEntry(ofEpochSecond(60), Throughput.bytesPerSecond(499), 783.2)) + .add(new TimelineEntry(ofEpochSecond(61), Throughput.bytesPerSecond(766), 816.3)) + .add(new TimelineEntry(ofEpochSecond(62), Throughput.bytesPerSecond(1166), 866.5)) + .add(new TimelineEntry(ofEpochSecond(63), Throughput.bytesPerSecond(1408), 874.3)) + .add(new TimelineEntry(ofEpochSecond(64), Throughput.bytesPerSecond(1145), 934.8)) + .add(new TimelineEntry(ofEpochSecond(65), Throughput.bytesPerSecond(433), 893.4)) + .add(new TimelineEntry(ofEpochSecond(66), Throughput.bytesPerSecond(1256), 895.9)) + .add(new TimelineEntry(ofEpochSecond(67), Throughput.bytesPerSecond(847), 847.5)) + .add(new TimelineEntry(ofEpochSecond(68), Throughput.bytesPerSecond(1421), 974.2)) + .add(new TimelineEntry(ofEpochSecond(69), Throughput.bytesPerSecond(347), 928.8)) + .add(new TimelineEntry(ofEpochSecond(70), Throughput.bytesPerSecond(52), 884.1)) + .add(new TimelineEntry(ofEpochSecond(71), Throughput.bytesPerSecond(19), 809.4)) + .add(new TimelineEntry(ofEpochSecond(72), Throughput.bytesPerSecond(1191), 811.9)) + .add(new TimelineEntry(ofEpochSecond(73), Throughput.bytesPerSecond(104), 681.5)) + .add(new TimelineEntry(ofEpochSecond(74), Throughput.bytesPerSecond(640), 631.)) + .add(new TimelineEntry(ofEpochSecond(75), Throughput.bytesPerSecond(535), 641.2)) + .add(new TimelineEntry(ofEpochSecond(76), Throughput.bytesPerSecond(203), 535.9)) + .add(new TimelineEntry(ofEpochSecond(77), Throughput.bytesPerSecond(51), 456.3)) + .add(new TimelineEntry(ofEpochSecond(78), Throughput.bytesPerSecond(1117), 425.9)) + .add(new TimelineEntry(ofEpochSecond(79), Throughput.bytesPerSecond(1390), 530.2)) + .add(new TimelineEntry(ofEpochSecond(80), Throughput.bytesPerSecond(262), 551.2)) + .add(new TimelineEntry(ofEpochSecond(81), Throughput.bytesPerSecond(5), 549.8)) + .add(new TimelineEntry(ofEpochSecond(82), Throughput.bytesPerSecond(802), 510.9)) + .add(new TimelineEntry(ofEpochSecond(83), Throughput.bytesPerSecond(529), 553.4)) + .add(new TimelineEntry(ofEpochSecond(84), Throughput.bytesPerSecond(1261), 615.5)) + .add(new TimelineEntry(ofEpochSecond(85), Throughput.bytesPerSecond(1192), 681.2)) + .add(new TimelineEntry(ofEpochSecond(86), Throughput.bytesPerSecond(276), 688.5)) + .add(new TimelineEntry(ofEpochSecond(87), Throughput.bytesPerSecond(457), 729.1)) + .add(new TimelineEntry(ofEpochSecond(88), Throughput.bytesPerSecond(799), 697.3)) + .add(new TimelineEntry(ofEpochSecond(89), Throughput.bytesPerSecond(443), 602.6)) + .add(new TimelineEntry(ofEpochSecond(90), Throughput.bytesPerSecond(1281), 704.5)) + .add(new TimelineEntry(ofEpochSecond(91), Throughput.bytesPerSecond(97), 713.7)) + .add(new TimelineEntry(ofEpochSecond(92), Throughput.bytesPerSecond(895), 723.)) + .add(new TimelineEntry(ofEpochSecond(93), Throughput.bytesPerSecond(1338), 803.9)) + .add(new TimelineEntry(ofEpochSecond(94), Throughput.bytesPerSecond(554), 733.2)) + .add(new TimelineEntry(ofEpochSecond(95), Throughput.bytesPerSecond(302), 644.2)) + .add(new TimelineEntry(ofEpochSecond(96), Throughput.bytesPerSecond(518), 668.4)) + .add(new TimelineEntry(ofEpochSecond(97), Throughput.bytesPerSecond(502), 672.9)) + .add(new TimelineEntry(ofEpochSecond(98), Throughput.bytesPerSecond(517), 644.7)) + .add(new TimelineEntry(ofEpochSecond(99), Throughput.bytesPerSecond(172), 617.6)) + .add(new TimelineEntry(ofEpochSecond(100), Throughput.bytesPerSecond(909), 580.4)) + .add(new TimelineEntry(ofEpochSecond(101), Throughput.bytesPerSecond(1233), 694.)) + .add(new TimelineEntry(ofEpochSecond(102), Throughput.bytesPerSecond(189), 623.4)) + .add(new TimelineEntry(ofEpochSecond(103), Throughput.bytesPerSecond(244), 514.)) + .add(new TimelineEntry(ofEpochSecond(104), Throughput.bytesPerSecond(886), 547.2)) + .add(new TimelineEntry(ofEpochSecond(105), Throughput.bytesPerSecond(796), 596.6)) + .add(new TimelineEntry(ofEpochSecond(106), Throughput.bytesPerSecond(1072), 652.)) + .add(new TimelineEntry(ofEpochSecond(107), Throughput.bytesPerSecond(602), 662.)) + .add(new TimelineEntry(ofEpochSecond(108), Throughput.bytesPerSecond(507), 661.)) + .add(new TimelineEntry(ofEpochSecond(109), Throughput.bytesPerSecond(432), 687.)) + .add(new TimelineEntry(ofEpochSecond(110), Throughput.bytesPerSecond(661), 662.2)) + .add(new TimelineEntry(ofEpochSecond(111), Throughput.bytesPerSecond(1085), 647.4)) + .add(new TimelineEntry(ofEpochSecond(112), Throughput.bytesPerSecond(157), 644.2)) + .add(new TimelineEntry(ofEpochSecond(113), Throughput.bytesPerSecond(529), 672.7)) + .add(new TimelineEntry(ofEpochSecond(114), Throughput.bytesPerSecond(31), 587.2)) + .add(new TimelineEntry(ofEpochSecond(115), Throughput.bytesPerSecond(464), 554.)) + .add(new TimelineEntry(ofEpochSecond(116), Throughput.bytesPerSecond(1301), 576.9)) + .add(new TimelineEntry(ofEpochSecond(117), Throughput.bytesPerSecond(787), 595.4)) + .add(new TimelineEntry(ofEpochSecond(118), Throughput.bytesPerSecond(908), 635.5)) + .add(new TimelineEntry(ofEpochSecond(119), Throughput.bytesPerSecond(1316), 723.9)) + .add(new TimelineEntry(ofEpochSecond(120), Throughput.bytesPerSecond(764), 734.2)) + .add(new TimelineEntry(ofEpochSecond(121), Throughput.bytesPerSecond(1391), 764.8)) + .add(new TimelineEntry(ofEpochSecond(122), Throughput.bytesPerSecond(819), 831.)) + .add(new TimelineEntry(ofEpochSecond(123), Throughput.bytesPerSecond(219), 800.)) + .add(new TimelineEntry(ofEpochSecond(124), Throughput.bytesPerSecond(601), 857.)) + .add(new TimelineEntry(ofEpochSecond(125), Throughput.bytesPerSecond(1238), 934.4)) + .add(new TimelineEntry(ofEpochSecond(126), Throughput.bytesPerSecond(1392), 943.5)) + .add(new TimelineEntry(ofEpochSecond(127), Throughput.bytesPerSecond(499), 914.7)) + .add(new TimelineEntry(ofEpochSecond(128), Throughput.bytesPerSecond(1153), 939.2)) + .add(new TimelineEntry(ofEpochSecond(129), Throughput.bytesPerSecond(1219), 929.5)) + .add(new TimelineEntry(ofEpochSecond(130), Throughput.bytesPerSecond(519), 905.)) + .add(new TimelineEntry(ofEpochSecond(131), Throughput.bytesPerSecond(337), 799.6)) + .add(new TimelineEntry(ofEpochSecond(132), Throughput.bytesPerSecond(1065), 824.2)) + .add(new TimelineEntry(ofEpochSecond(133), Throughput.bytesPerSecond(789), 881.2)) + .add(new TimelineEntry(ofEpochSecond(134), Throughput.bytesPerSecond(32), 824.3)) + .add(new TimelineEntry(ofEpochSecond(135), Throughput.bytesPerSecond(893), 789.8)) + .add(new TimelineEntry(ofEpochSecond(136), Throughput.bytesPerSecond(1093), 759.9)) + .add(new TimelineEntry(ofEpochSecond(137), Throughput.bytesPerSecond(1218), 831.8)) + .add(new TimelineEntry(ofEpochSecond(138), Throughput.bytesPerSecond(159), 732.4)) + .add(new TimelineEntry(ofEpochSecond(139), Throughput.bytesPerSecond(407), 651.2)) + .add(new TimelineEntry(ofEpochSecond(140), Throughput.bytesPerSecond(615), 660.8)) + .add(new TimelineEntry(ofEpochSecond(141), Throughput.bytesPerSecond(1392), 766.3)) + .add(new TimelineEntry(ofEpochSecond(142), Throughput.bytesPerSecond(1431), 802.9)) + .add(new TimelineEntry(ofEpochSecond(143), Throughput.bytesPerSecond(270), 751.)) + .add(new TimelineEntry(ofEpochSecond(144), Throughput.bytesPerSecond(300), 777.8)) + .add(new TimelineEntry(ofEpochSecond(145), Throughput.bytesPerSecond(1402), 828.7)) + .add(new TimelineEntry(ofEpochSecond(146), Throughput.bytesPerSecond(308), 750.2)) + .add(new TimelineEntry(ofEpochSecond(147), Throughput.bytesPerSecond(125), 640.9)) + .add(new TimelineEntry(ofEpochSecond(148), Throughput.bytesPerSecond(467), 671.7)) + .add(new TimelineEntry(ofEpochSecond(149), Throughput.bytesPerSecond(1339), 764.9)) + .add(new TimelineEntry(ofEpochSecond(150), Throughput.bytesPerSecond(1146), 818.)) + .add(new TimelineEntry(ofEpochSecond(151), Throughput.bytesPerSecond(765), 755.3)) + .add(new TimelineEntry(ofEpochSecond(152), Throughput.bytesPerSecond(649), 677.1)) + .add(new TimelineEntry(ofEpochSecond(153), Throughput.bytesPerSecond(1318), 781.9)) + .add(new TimelineEntry(ofEpochSecond(154), Throughput.bytesPerSecond(199), 771.8)) + .add(new TimelineEntry(ofEpochSecond(155), Throughput.bytesPerSecond(923), 723.9)) + .add(new TimelineEntry(ofEpochSecond(156), Throughput.bytesPerSecond(430), 736.1)) + .add(new TimelineEntry(ofEpochSecond(157), Throughput.bytesPerSecond(158), 739.4)) + .add(new TimelineEntry(ofEpochSecond(158), Throughput.bytesPerSecond(187), 711.4)) + .add(new TimelineEntry(ofEpochSecond(159), Throughput.bytesPerSecond(442), 621.7)) + .add(new TimelineEntry(ofEpochSecond(160), Throughput.bytesPerSecond(82), 515.3)) + .add(new TimelineEntry(ofEpochSecond(161), Throughput.bytesPerSecond(951), 533.9)) + .add(new TimelineEntry(ofEpochSecond(162), Throughput.bytesPerSecond(976), 566.6)) + .add(new TimelineEntry(ofEpochSecond(163), Throughput.bytesPerSecond(1371), 571.9)) + .add(new TimelineEntry(ofEpochSecond(164), Throughput.bytesPerSecond(547), 606.7)) + .add(new TimelineEntry(ofEpochSecond(165), Throughput.bytesPerSecond(370), 551.4)) + .add(new TimelineEntry(ofEpochSecond(166), Throughput.bytesPerSecond(247), 533.1)) + .add(new TimelineEntry(ofEpochSecond(167), Throughput.bytesPerSecond(660), 583.3)) + .add(new TimelineEntry(ofEpochSecond(168), Throughput.bytesPerSecond(1222), 686.8)) + .add(new TimelineEntry(ofEpochSecond(169), Throughput.bytesPerSecond(130), 655.6)) + .add(new TimelineEntry(ofEpochSecond(170), Throughput.bytesPerSecond(512), 698.6)) + .add(new TimelineEntry(ofEpochSecond(171), Throughput.bytesPerSecond(873), 690.8)) + .add(new TimelineEntry(ofEpochSecond(172), Throughput.bytesPerSecond(18), 595.)) + .add(new TimelineEntry(ofEpochSecond(173), Throughput.bytesPerSecond(817), 539.6)) + .add(new TimelineEntry(ofEpochSecond(174), Throughput.bytesPerSecond(1090), 593.9)) + .add(new TimelineEntry(ofEpochSecond(175), Throughput.bytesPerSecond(1201), 677.)) + .add(new TimelineEntry(ofEpochSecond(176), Throughput.bytesPerSecond(1046), 756.9)) + .add(new TimelineEntry(ofEpochSecond(177), Throughput.bytesPerSecond(1075), 798.4)) + .add(new TimelineEntry(ofEpochSecond(178), Throughput.bytesPerSecond(679), 744.1)) + .add(new TimelineEntry(ofEpochSecond(179), Throughput.bytesPerSecond(1043), 835.4)) + .add(new TimelineEntry(ofEpochSecond(180), Throughput.bytesPerSecond(1206), 904.8)) + .add(new TimelineEntry(ofEpochSecond(181), Throughput.bytesPerSecond(701), 887.6)) + .add(new TimelineEntry(ofEpochSecond(182), Throughput.bytesPerSecond(849), 970.7)) + .add(new TimelineEntry(ofEpochSecond(183), Throughput.bytesPerSecond(457), 934.7)) + .add(new TimelineEntry(ofEpochSecond(184), Throughput.bytesPerSecond(400), 865.7)) + .add(new TimelineEntry(ofEpochSecond(185), Throughput.bytesPerSecond(1157), 861.3)) + .add(new TimelineEntry(ofEpochSecond(186), Throughput.bytesPerSecond(235), 780.2)) + .add(new TimelineEntry(ofEpochSecond(187), Throughput.bytesPerSecond(525), 725.2)) + .add(new TimelineEntry(ofEpochSecond(188), Throughput.bytesPerSecond(1415), 798.8)) + .add(new TimelineEntry(ofEpochSecond(189), Throughput.bytesPerSecond(796), 774.1)) + .add(new TimelineEntry(ofEpochSecond(190), Throughput.bytesPerSecond(428), 696.3)) + .add(new TimelineEntry(ofEpochSecond(191), Throughput.bytesPerSecond(417), 667.9)) + .add(new TimelineEntry(ofEpochSecond(192), Throughput.bytesPerSecond(436), 626.6)) + .add(new TimelineEntry(ofEpochSecond(193), Throughput.bytesPerSecond(781), 659.)) + .add(new TimelineEntry(ofEpochSecond(194), Throughput.bytesPerSecond(967), 715.7)) + .add(new TimelineEntry(ofEpochSecond(195), Throughput.bytesPerSecond(398), 639.8)) + .add(new TimelineEntry(ofEpochSecond(196), Throughput.bytesPerSecond(501), 666.4)) + .add(new TimelineEntry(ofEpochSecond(197), Throughput.bytesPerSecond(691), 683.)) + .add(new TimelineEntry(ofEpochSecond(198), Throughput.bytesPerSecond(1492), 690.7)) + .add(new TimelineEntry(ofEpochSecond(199), Throughput.bytesPerSecond(1493), 760.4)) + .add(new TimelineEntry(ofEpochSecond(200), Throughput.bytesPerSecond(5), 718.1)) + .add(new TimelineEntry(ofEpochSecond(201), Throughput.bytesPerSecond(679), 744.3)) + .add(new TimelineEntry(ofEpochSecond(202), Throughput.bytesPerSecond(1027), 803.4)) + .add(new TimelineEntry(ofEpochSecond(203), Throughput.bytesPerSecond(170), 742.3)) + .add(new TimelineEntry(ofEpochSecond(204), Throughput.bytesPerSecond(261), 671.7)) + .add(new TimelineEntry(ofEpochSecond(205), Throughput.bytesPerSecond(309), 662.8)) + .add(new TimelineEntry(ofEpochSecond(206), Throughput.bytesPerSecond(1483), 761.)) + .add(new TimelineEntry(ofEpochSecond(207), Throughput.bytesPerSecond(1154), 807.3)) + .add(new TimelineEntry(ofEpochSecond(208), Throughput.bytesPerSecond(857), 743.8)) + .add(new TimelineEntry(ofEpochSecond(209), Throughput.bytesPerSecond(792), 673.7)) + .add(new TimelineEntry(ofEpochSecond(210), Throughput.bytesPerSecond(819), 755.1)) + .add(new TimelineEntry(ofEpochSecond(211), Throughput.bytesPerSecond(763), 763.5)) + .add(new TimelineEntry(ofEpochSecond(212), Throughput.bytesPerSecond(386), 699.4)) + .add(new TimelineEntry(ofEpochSecond(213), Throughput.bytesPerSecond(789), 761.3)) + .add(new TimelineEntry(ofEpochSecond(214), Throughput.bytesPerSecond(1432), 878.4)) + .add(new TimelineEntry(ofEpochSecond(215), Throughput.bytesPerSecond(205), 868.)) + .add(new TimelineEntry(ofEpochSecond(216), Throughput.bytesPerSecond(905), 810.2)) + .add(new TimelineEntry(ofEpochSecond(217), Throughput.bytesPerSecond(1290), 823.8)) + .add(new TimelineEntry(ofEpochSecond(218), Throughput.bytesPerSecond(639), 802.)) + .add(new TimelineEntry(ofEpochSecond(219), Throughput.bytesPerSecond(1246), 847.4)) + .build()); +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputSinkTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputSinkTest.java new file mode 100644 index 000000000000..a00740063665 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputSinkTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.ThroughputSink.Record; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.WritableByteChannel; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.Assert; +import org.junit.Test; + +public final class ThroughputSinkTest { + + @Test + public void tee_record() { + AtomicReference r1 = new AtomicReference<>(null); + AtomicReference r2 = new AtomicReference<>(null); + ThroughputSink test = + ThroughputSink.tee( + new AbstractThroughputSink() { + @Override + public void recordThroughput(Record r) { + r1.compareAndSet(null, r); + } + }, + new AbstractThroughputSink() { + @Override + public void recordThroughput(Record r) { + r2.compareAndSet(null, r); + } + }); + + Record expected = Record.of(10, Instant.EPOCH, Instant.ofEpochSecond(1), false); + test.recordThroughput(expected); + + assertThat(r1.get()).isEqualTo(expected); + assertThat(r2.get()).isEqualTo(expected); + } + + @Test + public void tee_decorate() throws Exception { + AtomicReference b1 = new AtomicReference<>(null); + AtomicReference b2 = new AtomicReference<>(null); + AtomicReference b3 = new AtomicReference<>(null); + ThroughputSink test = + ThroughputSink.tee( + new AbstractThroughputSink() { + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return new WritableByteChannel() { + @Override + public int write(ByteBuffer src) throws IOException { + b1.compareAndSet(null, src.duplicate()); + return wbc.write(src); + } + + @Override + public boolean isOpen() { + return wbc.isOpen(); + } + + @Override + public void close() throws IOException { + wbc.close(); + } + }; + } + }, + new AbstractThroughputSink() { + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return new WritableByteChannel() { + @Override + public int write(ByteBuffer src) throws IOException { + ByteBuffer duplicate = src.duplicate(); + duplicate.position(src.limit()); + b2.compareAndSet(null, duplicate); + return wbc.write(src); + } + + @Override + public boolean isOpen() { + return wbc.isOpen(); + } + + @Override + public void close() throws IOException { + wbc.close(); + } + }; + } + }); + + AtomicBoolean callIsOpen = new AtomicBoolean(false); + AtomicBoolean callClose = new AtomicBoolean(false); + WritableByteChannel anon = + new WritableByteChannel() { + @Override + public int write(ByteBuffer src) { + int remaining = src.remaining(); + src.position(src.limit()); + b3.compareAndSet(null, src); + return remaining; + } + + @Override + public boolean isOpen() { + callIsOpen.compareAndSet(false, true); + return true; + } + + @Override + public void close() { + callClose.compareAndSet(false, true); + } + }; + + byte[] bytes = DataGenerator.base64Characters().genBytes(16); + + ByteBuffer expected1 = ByteBuffer.wrap(bytes); + ByteBuffer expected2 = ByteBuffer.wrap(bytes); + expected2.position(16); + + ByteBuffer buf = ByteBuffer.wrap(bytes); + try (WritableByteChannel decorated = test.decorate(anon)) { + if (decorated.isOpen()) { + decorated.write(buf); + } + } + + assertAll( + () -> assertThat(b1.get()).isEqualTo(expected1), + () -> assertThat(b2.get()).isEqualTo(expected2), + () -> assertThat(b3.get()).isSameInstanceAs(buf), + () -> assertThat(b3.get().hasRemaining()).isFalse(), + () -> assertThat(callIsOpen.get()).isTrue(), + () -> assertThat(callClose.get()).isTrue()); + } + + @Test + public void computeThroughput_noError() throws IOException { + // create a clock that will start at Epoch UTC, and will tick in one second increments + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + AtomicReference actual = new AtomicReference<>(null); + + ThroughputSink.computeThroughput( + clock, + new AbstractThroughputSink() { + @Override + public void recordThroughput(Record r) { + actual.compareAndSet(null, r); + } + }, + 300, + () -> {}); + + Record expected = Record.of(300, Instant.EPOCH, Instant.ofEpochSecond(1), false); + assertThat(actual.get()).isEqualTo(expected); + } + + @Test + public void computeThroughput_ioError() { + // create a clock that will start at Epoch UTC, and will tick in one second increments + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + AtomicReference actual = new AtomicReference<>(null); + + IOException ioException = + Assert.assertThrows( + IOException.class, + () -> + ThroughputSink.computeThroughput( + clock, + new AbstractThroughputSink() { + @Override + public void recordThroughput(Record r) { + actual.compareAndSet(null, r); + } + }, + 300, + () -> { + throw new IOException("kablamo!"); + })); + + Record expected = Record.of(300, Instant.EPOCH, Instant.ofEpochSecond(1), true); + assertThat(actual.get()).isEqualTo(expected); + + assertThat(ioException).hasMessageThat().isEqualTo("kablamo!"); + } + + @Test + public void windowed() throws IOException { + // create a clock that will start at Epoch UTC, and will tick in one second increments + TestClock clock = TestClock.tickBy(Instant.EPOCH, Duration.ofSeconds(1)); + + AtomicReference b3 = new AtomicReference<>(null); + WritableByteChannel anon = + new WritableByteChannel() { + @Override + public int write(ByteBuffer src) { + int remaining = src.remaining(); + src.position(src.limit()); + b3.compareAndSet(null, src); + return remaining; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + }; + + Duration windowDuration = Duration.ofMinutes(1); + ThroughputMovingWindow window = ThroughputMovingWindow.of(windowDuration); + ThroughputSink sink = ThroughputSink.windowed(window, clock); + + int numBytes = 120; + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(numBytes); + try (WritableByteChannel decorated = sink.decorate(anon)) { + decorated.write(buf); + } + + Throughput avg = window.avg(clock.instant()); + + assertThat(avg).isEqualTo(Throughput.of(numBytes, windowDuration)); + assertThat(avg).isEqualTo(Throughput.bytesPerSecond(2)); + } + + private abstract static class AbstractThroughputSink implements ThroughputSink { + + @Override + public void recordThroughput(Record r) {} + + @Override + public WritableByteChannel decorate(WritableByteChannel wbc) { + return null; + } + + @Override + public GatheringByteChannel decorate(GatheringByteChannel wbc) { + return null; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputTest.java new file mode 100644 index 000000000000..e44d4c1dd52a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ThroughputTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static java.time.Duration.ofSeconds; + +import org.junit.Test; + +public final class ThroughputTest { + + @Test + public void a() { + assertThat(Throughput.bytesPerSecond(1).toBps()).isEqualTo(1); + } + + @Test + public void b() { + assertThat(Throughput.of(10, ofSeconds(10)).toBps()).isEqualTo(1); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TimestampCodecPropertyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TimestampCodecPropertyTest.java new file mode 100644 index 000000000000..c5407134cc7b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TimestampCodecPropertyTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.JqwikTest.report; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Conversions.Codec; +import com.google.cloud.storage.jqwik.StorageArbitraries; +import com.google.protobuf.Timestamp; +import java.time.OffsetDateTime; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.ArbitrarySupplier; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.providers.TypeUsage; + +final class TimestampCodecPropertyTest { + + @Example + void edgeCases() { + report(TypeUsage.of(Timestamp.class), StorageArbitraries.timestamp()); + } + + @Property + void timestampCodecShouldRoundTrip(@ForAll(supplier = Supp.class) Timestamp ts) { + Codec codec = GrpcConversions.INSTANCE.timestampCodec; + OffsetDateTime decode = codec.decode(ts); + Timestamp encode = codec.encode(decode); + + assertThat(encode).isEqualTo(ts); + } + + private static final class Supp implements ArbitrarySupplier { + @Override + public Arbitrary get() { + return StorageArbitraries.timestamp(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpDir.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpDir.java new file mode 100644 index 000000000000..4cc29eac84a7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpDir.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class TmpDir implements AutoCloseable { + private static final Logger LOGGER = LoggerFactory.getLogger(TmpDir.class); + + private final Path path; + + private TmpDir(Path path) { + this.path = path; + } + + public Path getPath() { + return path; + } + + /** Delete the TmpFile this handle is holding */ + @Override + public void close() throws IOException { + TestUtils.rmDashRf(path); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("path", path).toString(); + } + + /** + * Create a temporary file, which will be deleted when close is called on the returned {@link + * TmpDir} + */ + public static TmpDir of(Path baseDir, String prefix) throws IOException { + LOGGER.trace("of(baseDir : {}, prefix : {})", baseDir, prefix); + Path path = Files.createTempDirectory(baseDir, prefix); + return new TmpDir(path); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpFile.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpFile.java new file mode 100644 index 000000000000..eef1b087d67f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TmpFile.java @@ -0,0 +1,71 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Set; + +public final class TmpFile implements AutoCloseable { + private static final Set writeOps = + ImmutableSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE); + private static final Set readOps = ImmutableSet.of(StandardOpenOption.READ); + + private final Path path; + + private TmpFile(Path path) { + this.path = path; + } + + public Path getPath() { + return path; + } + + public SeekableByteChannel reader() throws IOException { + return Files.newByteChannel(path, readOps); + } + + public SeekableByteChannel writer() throws IOException { + return Files.newByteChannel(path, writeOps); + } + + /** Delete the TmpFile this handle is holding */ + @Override + public void close() throws IOException { + Files.delete(path); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("path", path).toString(); + } + + /** + * Create a temporary file, which will be deleted when close is called on the returned {@link + * TmpFile} + */ + public static TmpFile of(Path baseDir, String prefix, String suffix) throws IOException { + Path path = Files.createTempFile(baseDir, prefix, suffix); + return new TmpFile(path); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TransportCompatibilityTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TransportCompatibilityTest.java new file mode 100644 index 000000000000..aa6c2a9359e2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/TransportCompatibilityTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.common.collect.ImmutableList; +import java.util.function.Supplier; +import java.util.stream.Stream; +import org.junit.Test; + +public final class TransportCompatibilityTest { + + @Test + public void verifyUnsupportedMethodsGenerateMeaningfulException() { + GrpcStorageOptions options = + StorageOptions.grpc() + .setProjectId("blank") + .setCredentials(NoCredentials.getInstance()) + .build(); + @SuppressWarnings("resource") + Storage s = + new GrpcStorageImpl( + options, null, null, ResponseContentLifecycleManager.noop(), null, null, Opts.empty()); + ImmutableList messages = + Stream.>of( + s::batch, + () -> s.writer(null), + () -> s.signUrl(null, 0, null), + () -> s.generateSignedPostPolicyV4(null, 0, null, null, null, null), + () -> s.generateSignedPostPolicyV4(null, 0, null, (PostFieldsV4) null), + () -> s.generateSignedPostPolicyV4(null, 0, null, (PostConditionsV4) null), + () -> s.generateSignedPostPolicyV4(null, 0, null)) + .map( + sup -> { + try { + sup.get(); + return null; + } catch (UnsupportedOperationException e) { + return e.getMessage(); + } + }) + .collect(ImmutableList.toImmutableList()); + + for (String message : messages) { + assertThat(message).contains("only supported for " + Transport.HTTP); + assertThat(message) + .contains("Please use StorageOptions.http() to construct a compatible instance"); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsGrpcTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsGrpcTest.java new file mode 100644 index 000000000000..38a40fd0546b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsGrpcTest.java @@ -0,0 +1,1422 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.UnifiedOpts.Headers; +import com.google.cloud.storage.UnifiedOpts.Mapper; +import com.google.cloud.storage.UnifiedOpts.NoOpObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.hash.Hashing; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.GetPolicyOptions; +import com.google.protobuf.ByteString; +import com.google.storage.v2.CommonObjectRequestParams; +import com.google.storage.v2.ComposeObjectRequest; +import com.google.storage.v2.CreateBucketRequest; +import com.google.storage.v2.DeleteBucketRequest; +import com.google.storage.v2.DeleteObjectRequest; +import com.google.storage.v2.GetBucketRequest; +import com.google.storage.v2.GetObjectRequest; +import com.google.storage.v2.ListBucketsRequest; +import com.google.storage.v2.ListObjectsRequest; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.RewriteObjectRequest; +import com.google.storage.v2.UpdateBucketRequest; +import com.google.storage.v2.UpdateObjectRequest; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectSpec; +import java.security.SecureRandom; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +@RunWith(Enclosed.class) +public final class UnifiedOptsGrpcTest { + + @RunWith(Enclosed.class) + public static final class Opt { + + public static final class DecryptionKeyTest { + @Test + public void readObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + ReadObjectRequest expected = + ReadObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + ReadObjectRequest actual = + UnifiedOpts.decryptionKey(k).readObject().apply(ReadObjectRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + GetObjectRequest expected = + GetObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + GetObjectRequest actual = + UnifiedOpts.decryptionKey(k).getObject().apply(GetObjectRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder() + .setCopySourceEncryptionAlgorithm(rand.getEncryptionAlgorithm()) + .setCopySourceEncryptionKeyBytes(rand.getEncryptionKeyBytes()) + .setCopySourceEncryptionKeySha256Bytes(rand.getEncryptionKeySha256Bytes()) + .build(); + RewriteObjectRequest actual = + UnifiedOpts.decryptionKey(k) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class DelimiterTest { + @Test + public void listObjects() { + ListObjectsRequest expected = ListObjectsRequest.newBuilder().setDelimiter("|~|").build(); + + ListObjectsRequest actual = + UnifiedOpts.delimiter("|~|") + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class EncryptionKeyTest { + @Test + public void writeObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + WriteObjectRequest expected = + WriteObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + WriteObjectRequest actual = + UnifiedOpts.encryptionKey(k) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + UpdateObjectRequest actual = + UnifiedOpts.encryptionKey(k) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + DeleteObjectRequest expected = + DeleteObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + DeleteObjectRequest actual = + UnifiedOpts.encryptionKey(k) + .deleteObject() + .apply(DeleteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void composeObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + ComposeObjectRequest expected = + ComposeObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + ComposeObjectRequest actual = + UnifiedOpts.encryptionKey(k) + .composeObject() + .apply(ComposeObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + CommonObjectRequestParams rand = randEncryption(); + SecretKeySpec k = + new SecretKeySpec( + rand.getEncryptionKeyBytes().toByteArray(), rand.getEncryptionAlgorithm()); + + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setCommonObjectRequestParams(rand).build(); + RewriteObjectRequest actual = + UnifiedOpts.encryptionKey(k) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class EndOffsetTest { + @Test + public void listObjects() { + ListObjectsRequest expected = + ListObjectsRequest.newBuilder().setLexicographicStart("start").build(); + + ListObjectsRequest actual = + UnifiedOpts.startOffset("start") + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class GenerationMatchTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfGenerationMatch(1L)) + .build(); + WriteObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void readObject() { + ReadObjectRequest expected = + ReadObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + ReadObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .readObject() + .apply(ReadObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getObject() { + GetObjectRequest expected = GetObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + GetObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .getObject() + .apply(GetObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + UpdateObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteObject() { + DeleteObjectRequest expected = + DeleteObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + DeleteObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .deleteObject() + .apply(DeleteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void composeObject() { + ComposeObjectRequest expected = + ComposeObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + ComposeObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .composeObject() + .apply(ComposeObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfGenerationMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class GenerationNotMatchTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfGenerationNotMatch(1L)) + .build(); + WriteObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void readObject() { + ReadObjectRequest expected = + ReadObjectRequest.newBuilder().setIfGenerationNotMatch(1L).build(); + ReadObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .readObject() + .apply(ReadObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getObject() { + GetObjectRequest expected = + GetObjectRequest.newBuilder().setIfGenerationNotMatch(1L).build(); + GetObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .getObject() + .apply(GetObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setIfGenerationNotMatch(1L).build(); + UpdateObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteObject() { + DeleteObjectRequest expected = + DeleteObjectRequest.newBuilder().setIfGenerationNotMatch(1L).build(); + DeleteObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .deleteObject() + .apply(DeleteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfGenerationNotMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class KmsKeyNameTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource(Object.newBuilder().setKmsKey("key").build()) + .build()) + .build(); + + WriteObjectRequest actual = + UnifiedOpts.kmsKeyName("key") + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void composeObject() { + ComposeObjectRequest expected = ComposeObjectRequest.newBuilder().setKmsKey("key").build(); + + ComposeObjectRequest actual = + UnifiedOpts.kmsKeyName("key") + .composeObject() + .apply(ComposeObjectRequest.newBuilder()) + .build(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setDestinationKmsKey("key").build(); + + RewriteObjectRequest actual = + UnifiedOpts.kmsKeyName("key") + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + + assertThat(actual).isEqualTo(expected); + } + } + + public static final class MetagenerationMatchTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfMetagenerationMatch(1L)) + .build(); + WriteObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void readObject() { + ReadObjectRequest expected = + ReadObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + ReadObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .readObject() + .apply(ReadObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getObject() { + GetObjectRequest expected = + GetObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + GetObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .getObject() + .apply(GetObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + UpdateObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteObject() { + DeleteObjectRequest expected = + DeleteObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + DeleteObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .deleteObject() + .apply(DeleteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void composeObject() { + ComposeObjectRequest expected = + ComposeObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + ComposeObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .composeObject() + .apply(ComposeObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateBucket() { + UpdateBucketRequest expected = + UpdateBucketRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + UpdateBucketRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .updateBucket() + .apply(UpdateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteBucket() { + DeleteBucketRequest expected = + DeleteBucketRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + DeleteBucketRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .deleteBucket() + .apply(DeleteBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getBucket() { + GetBucketRequest expected = + GetBucketRequest.newBuilder().setIfMetagenerationMatch(1L).build(); + GetBucketRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .getBucket() + .apply(GetBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class MetagenerationNotMatchTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfMetagenerationNotMatch(1L)) + .build(); + WriteObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void readObject() { + ReadObjectRequest expected = + ReadObjectRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + ReadObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .readObject() + .apply(ReadObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getObject() { + GetObjectRequest expected = + GetObjectRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + GetObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .getObject() + .apply(GetObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + UpdateObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteObject() { + DeleteObjectRequest expected = + DeleteObjectRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + DeleteObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .deleteObject() + .apply(DeleteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateBucket() { + UpdateBucketRequest expected = + UpdateBucketRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + UpdateBucketRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .updateBucket() + .apply(UpdateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void deleteBucket() { + DeleteBucketRequest expected = + DeleteBucketRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + DeleteBucketRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .deleteBucket() + .apply(DeleteBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getBucket() { + GetBucketRequest expected = + GetBucketRequest.newBuilder().setIfMetagenerationNotMatch(1L).build(); + GetBucketRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .getBucket() + .apply(GetBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class PageSizeTest { + @Test + public void listBuckets() { + ListBucketsRequest expected = ListBucketsRequest.newBuilder().setPageSize(5).build(); + + ListBucketsRequest actual = + UnifiedOpts.pageSize(5).listBuckets().apply(ListBucketsRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void listObjects() { + ListObjectsRequest expected = ListObjectsRequest.newBuilder().setPageSize(5).build(); + + ListObjectsRequest actual = + UnifiedOpts.pageSize(5).listObjects().apply(ListObjectsRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class PageTokenTest { + @Test + public void listBuckets() { + ListBucketsRequest expected = + ListBucketsRequest.newBuilder().setPageToken("pageToken").build(); + + ListBucketsRequest actual = + UnifiedOpts.pageToken("pageToken") + .listBuckets() + .apply(ListBucketsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void listObjects() { + ListObjectsRequest expected = + ListObjectsRequest.newBuilder().setPageToken("pageToken").build(); + + ListObjectsRequest actual = + UnifiedOpts.pageToken("pageToken") + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class PredefinedAclTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setPredefinedAcl("private")) + .build(); + WriteObjectRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder().setPredefinedAcl("private").build(); + UpdateObjectRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void composeObject() { + ComposeObjectRequest expected = + ComposeObjectRequest.newBuilder().setDestinationPredefinedAcl("private").build(); + ComposeObjectRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .composeObject() + .apply(ComposeObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setDestinationPredefinedAcl("private").build(); + RewriteObjectRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateBucket() { + UpdateBucketRequest expected = + UpdateBucketRequest.newBuilder().setPredefinedAcl("private").build(); + UpdateBucketRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .updateBucket() + .apply(UpdateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void createBucket() { + CreateBucketRequest expected = + CreateBucketRequest.newBuilder().setPredefinedAcl("private").build(); + CreateBucketRequest actual = + UnifiedOpts.predefinedAcl(PredefinedAcl.PRIVATE) + .createBucket() + .apply(CreateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class PredefinedDefaultObjectAclTest { + @Test + public void createBucket() { + CreateBucketRequest expected = + CreateBucketRequest.newBuilder().setPredefinedDefaultObjectAcl("private").build(); + CreateBucketRequest actual = + UnifiedOpts.predefinedDefaultObjectAcl(PredefinedAcl.PRIVATE) + .createBucket() + .apply(CreateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateBucket() { + UpdateBucketRequest expected = + UpdateBucketRequest.newBuilder().setPredefinedDefaultObjectAcl("private").build(); + UpdateBucketRequest actual = + UnifiedOpts.predefinedDefaultObjectAcl(PredefinedAcl.PRIVATE) + .updateBucket() + .apply(UpdateBucketRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class PrefixTest { + @Test + public void listBuckets() { + ListBucketsRequest expected = ListBucketsRequest.newBuilder().setPrefix("prefix").build(); + + ListBucketsRequest actual = + UnifiedOpts.prefix("prefix") + .listBuckets() + .apply(ListBucketsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void listObjects() { + ListObjectsRequest expected = ListObjectsRequest.newBuilder().setPrefix("prefix").build(); + + ListObjectsRequest actual = + UnifiedOpts.prefix("prefix") + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class SourceGenerationMatchTest { + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfSourceGenerationMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.generationMatch(1L) + .asSource() + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class SourceGenerationNotMatchTest { + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfSourceGenerationNotMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.generationNotMatch(1L) + .asSource() + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class SourceMetagenerationMatchTest { + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfSourceMetagenerationMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.metagenerationMatch(1L) + .asSource() + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class SourceMetagenerationNotMatchTest { + @Test + public void rewriteObject() { + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder().setIfSourceMetagenerationNotMatch(1L).build(); + RewriteObjectRequest actual = + UnifiedOpts.metagenerationNotMatch(1L) + .asSource() + .rewriteObject() + .apply(RewriteObjectRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class RequestedPolicyVersionTest { + @Test + public void getIamPolicy() { + GetIamPolicyRequest expected = + GetIamPolicyRequest.newBuilder() + .setOptions(GetPolicyOptions.newBuilder().setRequestedPolicyVersion(3).build()) + .build(); + GetIamPolicyRequest actual = + UnifiedOpts.requestedPolicyVersion(3) + .getIamPolicy() + .apply(GetIamPolicyRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class SetContentTypeTest { + @Test + public void writeObject() { + WriteObjectRequest expected = + WriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource(Object.newBuilder().setContentType("text/plain").build()) + .build()) + .build(); + + WriteObjectRequest actual = + UnifiedOpts.setContentType("text/plain") + .writeObject() + .apply(WriteObjectRequest.newBuilder()) + .build(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void updateObject() { + UpdateObjectRequest expected = + UpdateObjectRequest.newBuilder() + .setObject(Object.newBuilder().setContentType("text/plain").build()) + .build(); + + UpdateObjectRequest actual = + UnifiedOpts.setContentType("text/plain") + .updateObject() + .apply(UpdateObjectRequest.newBuilder()) + .build(); + + assertThat(actual).isEqualTo(expected); + } + } + + public static final class StartOffsetTest { + @Test + public void listObjects() { + ListObjectsRequest expected = + ListObjectsRequest.newBuilder().setLexicographicStart("start").build(); + + ListObjectsRequest actual = + UnifiedOpts.startOffset("start") + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class UserProjectTest { + @Test + public void grpcCallContext() { + GrpcCallContext expected = + GrpcCallContext.createDefault() + .withExtraHeaders( + ImmutableMap.of("X-Goog-User-Project", ImmutableList.of("user-project"))); + GrpcCallContext actual = + UnifiedOpts.userProject("user-project") + .getGrpcMetadataMapper() + .apply(GrpcCallContext.createDefault()); + + assertThat(actual.getExtraHeaders()).isEqualTo(expected.getExtraHeaders()); + assertThat(actual).isEqualTo(expected); + } + } + + public static final class VersionsFilterTest { + @Test + public void listObjects() { + ListObjectsRequest expected = ListObjectsRequest.newBuilder().setVersions(true).build(); + + ListObjectsRequest actual = + UnifiedOpts.versionsFilter(true) + .listObjects() + .apply(ListObjectsRequest.newBuilder()) + .build(); + assertThat(actual).isEqualTo(expected); + } + } + } + + @RunWith(Enclosed.class) + public static final class Extractor { + + public static final class Crc32cMatchExtractorTest { + + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o").setCrc32c("crc32c").build(); + ObjectTargetOpt opt = UnifiedOpts.crc32cMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.crc32cMatch("crc32c")); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + ObjectTargetOpt opt = UnifiedOpts.crc32cMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void extractFromBlobId_noop() { + ObjectTargetOpt opt = UnifiedOpts.crc32cMatchExtractor().extractFromBlobId(null); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + } + + public static final class DetectContentTypeTest { + + @Test + public void hasExtension() { + BlobInfo info1 = BlobInfo.newBuilder("bucket", "obj.txt").build(); + ObjectTargetOpt opt = UnifiedOpts.detectContentType().extractFromBlobInfo(info1); + + assertThat(opt).isEqualTo(UnifiedOpts.setContentType("text/plain")); + } + + @Test + public void noopIfAlreadySpecified() { + BlobInfo info1 = + BlobInfo.newBuilder("bucket", "obj.txt").setContentType("text/plain").build(); + ObjectTargetOpt opt = UnifiedOpts.detectContentType().extractFromBlobInfo(info1); + + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void baseCaseIsApplicationOctetStream() { + BlobInfo info1 = BlobInfo.newBuilder("bucket", "obj").build(); + ObjectTargetOpt opt = UnifiedOpts.detectContentType().extractFromBlobInfo(info1); + + assertThat(opt).isEqualTo(UnifiedOpts.setContentType("application/octet-stream")); + } + } + + public static final class GenerationMatchExtractorTest { + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o", 3L).build(); + ObjectTargetOpt opt = UnifiedOpts.generationMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.generationMatch(3L)); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.generationMatchExtractor().extractFromBlobInfo(info))) + .hasMessageThat() + .contains("ifGenerationMatch"); + } + + @Test + public void extractFromBlobId_nonNull() { + BlobId id = BlobId.of("b", "o", 3L); + ObjectTargetOpt opt = UnifiedOpts.generationMatchExtractor().extractFromBlobId(id); + assertThat(opt).isEqualTo(UnifiedOpts.generationMatch(3L)); + } + + @Test + public void extractFromBlobId_null() { + BlobId id = BlobId.of("b", "o"); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.generationMatchExtractor().extractFromBlobId(id))) + .hasMessageThat() + .contains("ifGenerationMatch"); + } + } + + public static final class GenerationNotMatchExtractorTest { + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o", 3L).build(); + ObjectTargetOpt opt = UnifiedOpts.generationNotMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.generationNotMatch(3L)); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.generationNotMatchExtractor().extractFromBlobInfo(info))) + .hasMessageThat() + .contains("ifGenerationNotMatch"); + } + + @Test + public void extractFromBlobId_nonNull() { + BlobId id = BlobId.of("b", "o", 3L); + ObjectTargetOpt opt = UnifiedOpts.generationNotMatchExtractor().extractFromBlobId(id); + assertThat(opt).isEqualTo(UnifiedOpts.generationNotMatch(3L)); + } + + @Test + public void extractFromBlobId_null() { + BlobId id = BlobId.of("b", "o"); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.generationNotMatchExtractor().extractFromBlobId(id))) + .hasMessageThat() + .contains("ifGenerationNotMatch"); + } + } + + public static final class Md5MatchExtractorTest { + + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o").setMd5("md5").build(); + ObjectTargetOpt opt = UnifiedOpts.md5MatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.md5Match("md5")); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + ObjectTargetOpt opt = UnifiedOpts.md5MatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void extractFromBlobId_noop() { + ObjectTargetOpt opt = UnifiedOpts.md5MatchExtractor().extractFromBlobId(null); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + } + + public static final class MetagenerationMatchExtractorTest { + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o").setMetageneration(1L).build(); + ObjectTargetOpt opt = UnifiedOpts.metagenerationMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.metagenerationMatch(1L)); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.metagenerationMatchExtractor().extractFromBlobInfo(info))) + .hasMessageThat() + .contains("ifMetagenerationMatch"); + } + + @Test + public void extractFromBlobId_noop() { + ObjectTargetOpt opt = UnifiedOpts.metagenerationMatchExtractor().extractFromBlobId(null); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void extractFromBucketInfo_nonNull() { + BucketInfo info = BucketInfo.newBuilder("b").setMetageneration(1L).build(); + ObjectTargetOpt opt = + UnifiedOpts.metagenerationMatchExtractor().extractFromBucketInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.metagenerationMatch(1L)); + } + + @Test + public void extractFromBucketInfo_null() { + BucketInfo info = BucketInfo.newBuilder("b").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.metagenerationMatchExtractor().extractFromBucketInfo(info))) + .hasMessageThat() + .contains("ifMetagenerationMatch"); + } + } + + public static final class MetagenerationNotMatchExtractorTest { + @Test + public void extractFromBlobInfo_nonNull() { + BlobInfo info = BlobInfo.newBuilder("b", "o").setMetageneration(1L).build(); + ObjectTargetOpt opt = + UnifiedOpts.metagenerationNotMatchExtractor().extractFromBlobInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.metagenerationNotMatch(1L)); + } + + @Test + public void extractFromBlobInfo_null() { + BlobInfo info = BlobInfo.newBuilder("b", "o").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> UnifiedOpts.metagenerationNotMatchExtractor().extractFromBlobInfo(info))) + .hasMessageThat() + .contains("ifMetagenerationNotMatch"); + } + + @Test + public void extractFromBlobId_noop() { + ObjectTargetOpt opt = UnifiedOpts.metagenerationNotMatchExtractor().extractFromBlobId(null); + assertThat(opt).isEqualTo(NoOpObjectTargetOpt.INSTANCE); + } + + @Test + public void extractFromBucketInfo_nonNull() { + BucketInfo info = BucketInfo.newBuilder("b").setMetageneration(1L).build(); + ObjectTargetOpt opt = + UnifiedOpts.metagenerationNotMatchExtractor().extractFromBucketInfo(info); + assertThat(opt).isEqualTo(UnifiedOpts.metagenerationNotMatch(1L)); + } + + @Test + public void extractFromBucketInfo_null() { + BucketInfo info = BucketInfo.newBuilder("b").build(); + assertThat( + assertThrows( + IllegalArgumentException.class, + () -> + UnifiedOpts.metagenerationNotMatchExtractor().extractFromBucketInfo(info))) + .hasMessageThat() + .contains("ifMetagenerationNotMatch"); + } + } + } + + public static final class NoOpObjectTargetOptTest { + @Test + public void grpcMetadataMapper() { + GrpcCallContext ctx1 = GrpcCallContext.createDefault(); + GrpcCallContext ctx2 = NoOpObjectTargetOpt.INSTANCE.getGrpcMetadataMapper().apply(ctx1); + assertThat(ctx2).isEqualTo(ctx1); + assertThat(ctx2).isSameInstanceAs(ctx1); + } + + @Test + public void blobInfo() { + BlobInfo.Builder b1 = BlobInfo.newBuilder("b", "o"); + // "checkpoint" our builder to allow for comparison + BlobInfo expected = b1.build(); + BlobInfo.Builder b2 = NoOpObjectTargetOpt.INSTANCE.blobInfo().apply(b1); + assertThat(b2.build()).isEqualTo(expected); + assertThat(b2).isSameInstanceAs(b1); + } + } + + public static final class SourceAndTargetOpt { + @Test + public void rewriteObject_decryptAndEncrypt() { + CommonObjectRequestParams source = randEncryption(); + CommonObjectRequestParams dest = randEncryption(); + SecretKeySpec keySource = + new SecretKeySpec( + source.getEncryptionKeyBytes().toByteArray(), source.getEncryptionAlgorithm()); + SecretKeySpec keyDest = + new SecretKeySpec( + dest.getEncryptionKeyBytes().toByteArray(), dest.getEncryptionAlgorithm()); + + RewriteObjectRequest expected = + RewriteObjectRequest.newBuilder() + .setCopySourceEncryptionAlgorithm(source.getEncryptionAlgorithm()) + .setCopySourceEncryptionKeyBytes(source.getEncryptionKeyBytes()) + .setCopySourceEncryptionKeySha256Bytes(source.getEncryptionKeySha256Bytes()) + .setCommonObjectRequestParams(dest) + .build(); + Opts opts = + Opts.from(UnifiedOpts.decryptionKey(keySource), UnifiedOpts.encryptionKey(keyDest)); + Mapper mapper = opts.rewriteObjectsRequest(); + + RewriteObjectRequest actual = mapper.apply(RewriteObjectRequest.newBuilder()).build(); + assertThat(actual).isEqualTo(expected); + } + } + + @SuppressWarnings("unchecked") + public static final class HeadersOptTest { + + @Test + public void duplicateHeaderValues_firstWins_grpc() { + Opts o1 = Opts.from(UnifiedOpts.extraHeaders(ImmutableMap.of("k", "1"))); + Opts o2 = Opts.from(UnifiedOpts.extraHeaders(ImmutableMap.of("k", "2"))); + + Mapper mapper = o1.grpcMetadataMapper().andThen(o2.grpcMetadataMapper()); + + GrpcCallContext grpcCallContext = mapper.apply(GrpcCallContext.createDefault()); + assertThat(grpcCallContext.getExtraHeaders()) + .containsExactlyEntriesIn(ImmutableMap.of("k", ImmutableList.of("1"))); + } + + @Test + public void duplicateHeaderValues_firstWins_rpcOptions() { + Opts o1 = + Opts.from( + UnifiedOpts.extraHeaders(ImmutableMap.of("k", "1", "a", "A")), + UnifiedOpts.extraHeaders(ImmutableMap.of("k", "2", "b", "B"))); + + ImmutableMap rpcOptions = o1.getRpcOptions(); + + ImmutableMap extraHeaders = + (ImmutableMap) rpcOptions.get(StorageRpc.Option.EXTRA_HEADERS); + assertThat(extraHeaders).isNotNull(); + assertThat(extraHeaders) + .containsExactlyEntriesIn(ImmutableMap.of("k", "1", "a", "A", "b", "B")); + } + + @Test + public void duplicateHeaderValues_firstWins_keyComparisonIsCaseInsensitive() { + Opts o1 = + Opts.from( + UnifiedOpts.extraHeaders(ImmutableMap.of("K", "1", "a", "A")), + UnifiedOpts.extraHeaders(ImmutableMap.of("k", "2", "b", "B"))); + + ImmutableMap rpcOptions = o1.getRpcOptions(); + + ImmutableMap extraHeaders = + (ImmutableMap) rpcOptions.get(StorageRpc.Option.EXTRA_HEADERS); + assertThat(extraHeaders).isNotNull(); + assertThat(extraHeaders) + .containsExactlyEntriesIn(ImmutableMap.of("k", "1", "a", "A", "b", "B")); + } + + @Test + public void headersOnBlocklistResultInIllegalArgumentException() { + IllegalArgumentException expected = + assertThrows( + IllegalArgumentException.class, + () -> + UnifiedOpts.extraHeaders( + ImmutableMap.builder() + .put("Accept-Encoding", "a") + .put("Cache-Control", "a") + .put("Connection", "a") + .put("Content-ID", "a") + .put("Content-Length", "a") + .put("Content-Range", "a") + .put("Content-Transfer-Encoding", "a") + .put("Content-Type", "a") + .put("Date", "a") + .put("ETag", "a") + .put("If-Match", "a") + .put("If-None-Match", "a") + .put("Keep-Alive", "a") + .put("Range", "a") + .put("TE", "a") + .put("Trailer", "a") + .put("Transfer-Encoding", "a") + .put("User-Agent", "a") + .put("X-Goog-Api-Client", "a") + .put("X-Goog-Content-Length-Range", "a") + .put("X-Goog-Copy-Source-Encryption-Algorithm", "a") + .put("X-Goog-Copy-Source-Encryption-Key", "a") + .put("X-Goog-Copy-Source-Encryption-Key-Sha256", "a") + .put("X-Goog-Encryption-Algorithm", "a") + .put("X-Goog-Encryption-Key", "a") + .put("X-Goog-Encryption-Key-Sha256", "a") + .put("X-Goog-Meta-A", "a") + .put("X-Goog-Request-Params", "a") + .put("X-Goog-User-Project", "a") + .put("X-HTTP-Method-Override", "a") + .put("X-Upload-Content-Length", "a") + .put("X-Upload-Content-Type", "a") + .put("A", "a") + .build())); + assertThat(expected) + .hasMessageThat() + .contains( + "[accept-encoding, cache-control, connection, content-id, content-length," + + " content-range, content-transfer-encoding, content-type, date, etag, if-match," + + " if-none-match, keep-alive, range, te, trailer, transfer-encoding, user-agent," + + " x-goog-api-client, x-goog-content-length-range," + + " x-goog-copy-source-encryption-algorithm, x-goog-copy-source-encryption-key," + + " x-goog-copy-source-encryption-key-sha256, x-goog-encryption-algorithm," + + " x-goog-encryption-key, x-goog-encryption-key-sha256, x-goog-meta-a," + + " x-goog-request-params, x-goog-user-project, x-http-method-override," + + " x-upload-content-length, x-upload-content-type]"); + } + } + + private static CommonObjectRequestParams randEncryption() { + byte[] bytes = new byte[32]; + new SecureRandom().nextBytes(bytes); + return CommonObjectRequestParams.newBuilder() + .setEncryptionAlgorithm("AES256") + .setEncryptionKeyBytes(ByteString.copyFrom(bytes)) + .setEncryptionKeySha256Bytes( + ByteString.copyFrom(Hashing.sha256().hashBytes(bytes).asBytes())) + .build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsTest.java new file mode 100644 index 000000000000..e5a38b4365ca --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UnifiedOptsTest.java @@ -0,0 +1,264 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.UnifiedOpts.Crc32cMatch; +import com.google.cloud.storage.UnifiedOpts.DefaultHasherSelector; +import com.google.cloud.storage.UnifiedOpts.HasherSelector; +import com.google.cloud.storage.UnifiedOpts.Md5Match; +import com.google.cloud.storage.UnifiedOpts.ObjectSourceOpt; +import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; +import com.google.cloud.storage.UnifiedOpts.Opt; +import com.google.cloud.storage.UnifiedOpts.Opts; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.stream.Collectors; +import javax.crypto.SecretKey; +import org.junit.Test; +import org.junit.runners.model.MultipleFailureException; + +public final class UnifiedOptsTest { + + @Test + public void opts_validation_uniqueKeys() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> + Opts.from(UnifiedOpts.generationMatch(1), UnifiedOpts.generationMatch(2)) + .getRpcOptions()); + + assertThat(iae).hasMessageThat().contains("GENERATION_MATCH"); + } + + @Test + public void validateFactoryMethodEnforceNonNull_unifiedOpts() throws Exception { + validateFactoryMethodEnforceNonNull(UnifiedOpts.class, Opt.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_blobGetOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BlobGetOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_blobListOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BlobListOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_blobSourceOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BlobSourceOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_blobTargetOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BlobTargetOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_blobWriteOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BlobWriteOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_bucket_blobTargetOption() throws Exception { + validateFactoryMethodEnforceNonNull(Bucket.BlobTargetOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_bucket_blobWriteOption() throws Exception { + validateFactoryMethodEnforceNonNull(Bucket.BlobWriteOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_blob_blobSourceOption() throws Exception { + validateFactoryMethodEnforceNonNull(Blob.BlobSourceOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_bucketGetOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BucketGetOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_bucketListOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BucketListOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_bucketSourceOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BucketSourceOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_bucket_bucketSourceOption() throws Exception { + validateFactoryMethodEnforceNonNull(Bucket.BucketSourceOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_bucketTargetOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.BucketTargetOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_createHmacKeyOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.CreateHmacKeyOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_deleteHmacKeyOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.DeleteHmacKeyOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_getHmacKeyOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.GetHmacKeyOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_listHmacKeyOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.ListHmacKeysOption.class); + } + + @Test + public void validateFactoryMethodEnforceNonNull_storage_updateHmacKeyOption() throws Exception { + validateFactoryMethodEnforceNonNull(Storage.UpdateHmacKeyOption.class); + } + + @Test + public void getHasher_selectsLastValue() { + DefaultHasherSelector first = UnifiedOpts.defaultHasherSelector(); + Md5Match second = UnifiedOpts.md5Match("asdf"); + Crc32cMatch third = UnifiedOpts.crc32cMatch(3); + Opts hasherOpts = Opts.from(first, second, third); + + HasherSelector actual = hasherOpts.getHasherSelector(); + assertThat(actual).isSameInstanceAs(third); + } + + @Test + public void hasher_md5Match_noop() { + assertThat(UnifiedOpts.md5Match("xyz").getHasher()).isEqualTo(Hasher.noop()); + } + + @Test + public void hasher_crc32cMatch_noop() { + assertThat(UnifiedOpts.crc32cMatch(77).getHasher()).isEqualTo(Hasher.noop()); + } + + @Test + public void transformTo() { + SecretKey key = + new SecretKey() { + @Override + public String getAlgorithm() { + return "fake"; + } + + @Override + public String getFormat() { + return null; + } + + @Override + public byte[] getEncoded() { + return "fake".getBytes(StandardCharsets.UTF_8); + } + }; + + Opts targetOpts = + Opts.from( + // encryptionKey should project as a decryptionKey + UnifiedOpts.encryptionKey(key), + // userProject implements both target and source + UnifiedOpts.userProject("user-project"), + // contentType is not a source opt or a ProjectToSource opt, it should be excluded + UnifiedOpts.setContentType("application/octet-stream")); + Opts sourceOpts = targetOpts.transformTo(ObjectSourceOpt.class); + + Opts expected = + Opts.from(UnifiedOpts.decryptionKey(key), UnifiedOpts.userProject("user-project")); + assertThat(sourceOpts).isEqualTo(expected); + } + + private static void validateFactoryMethodEnforceNonNull(Class classToSearch) throws Exception { + validateFactoryMethodEnforceNonNull(classToSearch, classToSearch); + } + + private static void validateFactoryMethodEnforceNonNull( + Class classToSearch, Class returnSuperType) throws Exception { + List methods = findFactoryMethodsWithNullableParam(classToSearch, returnSuperType); + assertThat(methods).isNotEmpty(); + List errors = + methods.stream() + .map( + m -> { + try { + String msg = + String.format( + Locale.US, "Method %s did not throw expected NullPointerException", m); + try { + m.invoke(null, new Object[] {null}); + return new AssertionError(msg); + } catch (InvocationTargetException e) { + if (e.getCause() instanceof NullPointerException) { + NullPointerException cause = (NullPointerException) e.getCause(); + assertWithMessage(msg) + .that(cause) + .hasMessageThat() + .contains("must be non null"); + return null; + } + return new AssertionError(msg, e); + } catch (Throwable t) { + return new AssertionError(msg, t); + } + } catch (Throwable e) { + return e; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + MultipleFailureException.assertEmpty(errors); + } + + private static List findFactoryMethodsWithNullableParam( + Class classToSearch, Class returnSuperType) { + Method[] methods = classToSearch.getDeclaredMethods(); + return Arrays.stream(methods) + .filter( + m -> { + boolean isStatic = Modifier.isStatic(m.getModifiers()); + boolean isOpt = returnSuperType.isAssignableFrom(m.getReturnType()); + boolean hasParam = m.getParameterCount() == 1; + boolean isParamNonPrimitive = hasParam && !m.getParameterTypes()[0].isPrimitive(); + return isStatic && isOpt && hasParam && isParamNonPrimitive; + }) + .collect(Collectors.toList()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java new file mode 100644 index 000000000000..5d54ad0d2cf9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java @@ -0,0 +1,302 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._256KiB; +import static com.google.cloud.storage.ByteSizeConstants._512KiB; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.UploadFailureScenario.SCENARIO_1; +import static com.google.cloud.storage.UploadFailureScenario.isContinue; +import static com.google.cloud.storage.UploadFailureScenario.isOk; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.http.EmptyContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import io.grpc.Metadata; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.internal.GrpcUtil; +import java.io.IOException; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import org.junit.Test; + +public final class UploadFailureScenarioTest { + private static final GsonFactory gson = GsonFactory.getDefaultInstance(); + + @Test + public void isOk_200() { + assertThat(isOk(200)).isTrue(); + } + + @Test + public void isOk_201() { + assertThat(isOk(201)).isTrue(); + } + + @Test + public void isContinue_308() { + assertThat(isContinue(308)).isTrue(); + } + + @Test + public void toStorageException_ioExceptionDuringContentResolutionAddedAsSuppressed() + throws IOException { + HttpRequest req = + new MockHttpTransport() + .createRequestFactory() + .buildPutRequest(new GenericUrl("http://localhost:80980"), new EmptyContent()); + req.getHeaders().setContentLength(0L).setContentRange(HttpContentRange.of(0).getHeaderValue()); + + HttpResponse resp = req.execute(); + resp.getHeaders().setContentType("text/plain; charset=utf-8").setContentLength(5L); + + StorageException storageException = + UploadFailureScenario.SCENARIO_1.toStorageException( + "uploadId", + resp, + new Cause(), + () -> { + throw new Kaboom(); + }); + + assertThat(storageException.getCode()).isEqualTo(0); + assertThat(storageException).hasCauseThat().isInstanceOf(Cause.class); + assertThat(storageException.getSuppressed()).isNotEmpty(); + assertThat(storageException.getSuppressed()[0]).isInstanceOf(StorageException.class); + assertThat(storageException.getSuppressed()[0]).hasCauseThat().isInstanceOf(Kaboom.class); + } + + @Test + public void multilineResponseBodyIsProperlyPrefixed() throws Exception { + StorageObject so = new StorageObject(); + so.setName("object-name") + .setSize(BigInteger.ZERO) + .setGeneration(1L) + .setMetageneration(2L) + .setMetadata( + ImmutableMap.of( + "k1", "v1", + "k2", "v2")); + final String json = gson.toPrettyString(so); + + byte[] bytes = json.getBytes(StandardCharsets.UTF_8); + HttpRequest req = + new MockHttpTransport() + .createRequestFactory() + .buildPutRequest(new GenericUrl("http://localhost:80980"), new EmptyContent()); + req.getHeaders().setContentLength(0L); + + HttpResponse resp = req.execute(); + resp.getHeaders() + .setContentType("application/json; charset=utf-8") + .setContentLength((long) bytes.length); + + StorageException storageException = + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> json); + + assertThat(storageException.getCode()).isEqualTo(0); + assertThat(storageException).hasMessageThat().contains("\t|< \"generation\": \"1\",\n"); + } + + @Test + public void xGoogStoredHeadersIncludedIfPresent() throws IOException { + HttpRequest req = + new MockHttpTransport() + .createRequestFactory() + .buildPutRequest(new GenericUrl("http://localhost:80980"), new EmptyContent()); + req.getHeaders().setContentLength(0L); + + HttpResponse resp = req.execute(); + resp.getHeaders() + .set("X-Goog-Stored-Content-Length", "5") + .set("x-goog-stored-content-encoding", "identity") + .set("X-GOOG-STORED-SOMETHING", "blah") + .setContentLength(0L); + + StorageException storageException = + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> null); + + assertThat(storageException.getCode()).isEqualTo(0); + assertThat(storageException).hasMessageThat().contains("|< x-goog-stored-content-length: 5"); + assertThat(storageException) + .hasMessageThat() + .contains("|< x-goog-stored-content-encoding: identity"); + assertThat(storageException).hasMessageThat().contains("|< x-goog-stored-something: blah"); + } + + @Test + public void xGoogGcsIdempotencyTokenHeadersIncludedIfPresent() throws IOException { + HttpRequest req = + new MockHttpTransport() + .createRequestFactory() + .buildPutRequest(new GenericUrl("http://localhost:80980"), new EmptyContent()); + req.getHeaders().setContentLength(0L); + + HttpResponse resp = req.execute(); + resp.getHeaders().set("X-Goog-Gcs-Idempotency-Token", "5").setContentLength(0L); + + StorageException storageException = + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> null); + + assertThat(storageException.getCode()).isEqualTo(0); + assertThat(storageException).hasMessageThat().contains("|< x-goog-gcs-idempotency-token: 5"); + } + + @Test + public void grpc_response() throws Exception { + ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(_256KiB)); + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId("uploadId") + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .build(); + WriteObjectRequest req2 = + WriteObjectRequest.newBuilder() + .setWriteOffset(_256KiB) + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent(ByteString.copyFrom(content.getBytes())) + .setCrc32C(content.getCrc32c()) + .build()) + .build(); + WriteObjectRequest req3 = + WriteObjectRequest.newBuilder() + .setWriteOffset(_512KiB) + .setFinishWrite(true) + .setObjectChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(345) + .setMd5Hash(ByteString.copyFromUtf8("asdf")) + .build()) + .build(); + WriteObjectResponse resp1 = + WriteObjectResponse.newBuilder() + .setResource(Object.newBuilder().setName("obj").setSize(_512KiB).build()) + .build(); + GrpcCallContext context = + GrpcCallContext.createDefault() + .withExtraHeaders( + ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of("bucket=projects/_/bucket/bucket-name"))); + StorageException se = + SCENARIO_1.toStorageException(ImmutableList.of(req1, req2, req3), resp1, context, null); + assertAll( + () -> + assertThat(se) + .hasMessageThat() + .contains("x-goog-request-params: bucket=projects/_/bucket/bucket-name"), + () -> assertThat(se).hasMessageThat().contains("upload_id: "), + () -> assertThat(se).hasMessageThat().contains("0:262144"), + () -> assertThat(se).hasMessageThat().contains(", crc32c: "), // from ChecksummedData + () -> assertThat(se).hasMessageThat().contains("write_offset: "), + () -> assertThat(se).hasMessageThat().contains("finish_write: "), + () -> assertThat(se).hasMessageThat().contains("object_checksums: "), + () -> assertThat(se).hasMessageThat().contains("crc32c: "), // from object_checksums + () -> assertThat(se).hasMessageThat().contains("md5_hash: "), + () -> assertThat(se).hasMessageThat().contains("resource {")); + } + + @Test + public void grpc_apiException() throws Exception { + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId("uploadId") + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .build(); + GrpcCallContext context = Retrying.newCallContext(); + Code code = Code.FAILED_PRECONDITION; + Metadata trailers = new Metadata(); + trailers.put(GrpcUtil.USER_AGENT_KEY, "test-class/"); + StatusRuntimeException statusRuntimeException = + code.toStatus().withDescription("precondition did not hold").asRuntimeException(trailers); + ApiException apiException = + ApiExceptionFactory.createException(statusRuntimeException, GrpcStatusCode.of(code), true); + + StorageException se = + SCENARIO_1.toStorageException(ImmutableList.of(req1), null, context, apiException); + assertAll( + () -> assertThat(se).hasMessageThat().contains("upload_id: "), + () -> assertThat(se).hasMessageThat().contains("0:262144"), + () -> assertThat(se).hasMessageThat().doesNotContain("WriteObjectResponse"), + () -> assertThat(se).hasMessageThat().contains("Status{code=FAILED_PRECONDITION"), + () -> assertThat(se).hasMessageThat().contains("user-agent=test-class/")); + } + + @Test + public void grpc_nonApiException() throws Exception { + WriteObjectRequest req1 = + WriteObjectRequest.newBuilder() + .setUploadId("uploadId") + .setChecksummedData( + ChecksummedData.newBuilder() + .setContent( + ByteString.copyFrom(DataGenerator.base64Characters().genBytes(_256KiB))) + .build()) + .build(); + GrpcCallContext context = Retrying.newCallContext(); + Cause cause = new Cause(); + StorageException se = + SCENARIO_1.toStorageException(ImmutableList.of(req1), null, context, cause); + assertAll( + () -> assertThat(se).hasMessageThat().contains("upload_id: "), + () -> assertThat(se).hasMessageThat().contains("0:262144"), + () -> assertThat(se).hasMessageThat().doesNotContain("WriteObjectResponse")); + } + + private static final class Cause extends RuntimeException { + + private Cause() { + super("Cause"); + } + } + + private static final class Kaboom extends IOException { + + private Kaboom() { + super("Kaboom!!!"); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4PostPolicyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4PostPolicyTest.java new file mode 100644 index 000000000000..b01a0783f6cc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4PostPolicyTest.java @@ -0,0 +1,244 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.api.core.ApiClock; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.conformance.storage.v1.PolicyConditions; +import com.google.cloud.conformance.storage.v1.PolicyInput; +import com.google.cloud.conformance.storage.v1.PostPolicyV4Test; +import com.google.cloud.conformance.storage.v1.TestFile; +import com.google.cloud.conformance.storage.v1.UrlStyle; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.common.base.Charsets; +import com.google.common.io.BaseEncoding; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class V4PostPolicyTest { + + private static final String SERVICE_ACCOUNT_JSON_RESOURCE = + "com/google/cloud/conformance/storage/v1/test_service_account.not-a-test.json"; + private static final String TEST_DATA_JSON_RESOURCE = + "com/google/cloud/conformance/storage/v1/v4_signatures.json"; + + private static class FakeClock implements ApiClock { + private final AtomicLong currentNanoTime; + + public FakeClock(Timestamp timestamp) { + this.currentNanoTime = + new AtomicLong( + TimeUnit.NANOSECONDS.convert(timestamp.getSeconds(), TimeUnit.SECONDS) + + timestamp.getNanos()); + } + + public long nanoTime() { + return this.currentNanoTime.get(); + } + + public long millisTime() { + return TimeUnit.MILLISECONDS.convert(this.nanoTime(), TimeUnit.NANOSECONDS); + } + } + + @Rule public TestName testName = new TestName(); + + private final PostPolicyV4Test testData; + private final ServiceAccountCredentials serviceAccountCredentials; + + /** + * @param testData the serialized test data representing the test case. + * @param serviceAccountCredentials The credentials to use in this test. + * @param description Not used by the test, but used by the parameterized test runner as the name + * of the test. + */ + public V4PostPolicyTest( + PostPolicyV4Test testData, + ServiceAccountCredentials serviceAccountCredentials, + @SuppressWarnings("unused") String description) { + this.testData = testData; + this.serviceAccountCredentials = serviceAccountCredentials; + } + + @Test + public void test() { + Storage storage = + RemoteStorageHelper.create().getOptions().toBuilder() + .setCredentials(serviceAccountCredentials) + .setClock(new FakeClock(testData.getPolicyInput().getTimestamp())) + .build() + .getService(); + + BlobInfo blob = + BlobInfo.newBuilder( + testData.getPolicyInput().getBucket(), testData.getPolicyInput().getObject()) + .build(); + + PolicyInput policyInput = testData.getPolicyInput(); + PostPolicyV4.PostConditionsV4.Builder builder = PostPolicyV4.PostConditionsV4.newBuilder(); + + Map fields = policyInput.getFieldsMap(); + + PolicyConditions conditions = policyInput.getConditions(); + + if (conditions != null) { + if (!conditions.getStartsWithList().isEmpty()) { + builder.addCustomCondition( + PostPolicyV4.ConditionV4Type.STARTS_WITH, + conditions.getStartsWith(0).replace("$", ""), + conditions.getStartsWith(1)); + } + if (!conditions.getContentLengthRangeList().isEmpty()) { + builder.addContentLengthRangeCondition( + conditions.getContentLengthRange(0), conditions.getContentLengthRange(1)); + } + } + + PostPolicyV4.PostFieldsV4 v4Fields = PostPolicyV4.PostFieldsV4.of(fields); + + Storage.PostPolicyV4Option style = Storage.PostPolicyV4Option.withPathStyle(); + + if (policyInput.getUrlStyle().equals(UrlStyle.VIRTUAL_HOSTED_STYLE)) { + style = Storage.PostPolicyV4Option.withVirtualHostedStyle(); + } else if (policyInput.getUrlStyle().equals(UrlStyle.PATH_STYLE)) { + style = Storage.PostPolicyV4Option.withPathStyle(); + } else if (policyInput.getUrlStyle().equals(UrlStyle.BUCKET_BOUND_HOSTNAME)) { + style = + Storage.PostPolicyV4Option.withBucketBoundHostname( + policyInput.getBucketBoundHostname(), + Storage.UriScheme.valueOf(policyInput.getScheme().toUpperCase())); + } + + PostPolicyV4 policy = + storage.generateSignedPostPolicyV4( + blob, + testData.getPolicyInput().getExpiration(), + TimeUnit.SECONDS, + v4Fields, + builder.build(), + style); + + String expectedPolicy = testData.getPolicyOutput().getExpectedDecodedPolicy(); + + StringBuilder escapedPolicy = new StringBuilder(); + + // Java automatically unescapes the unicode escapes in the conformance tests, so we need to + // manually re-escape them + char[] expectedPolicyArray = expectedPolicy.toCharArray(); + for (int i = 0; i < expectedPolicyArray.length; i++) { + char c = expectedPolicyArray[i]; + if (c >= 128) { + escapedPolicy.append(String.format(Locale.US, "\\u%04x", (int) c)); + } else { + switch (c) { + case '\\': + // quotes aren't unescaped, so leave any "\"" found alone + if (expectedPolicyArray[i + 1] == '"') { + escapedPolicy.append("\\"); + } else { + escapedPolicy.append("\\\\"); + } + break; + case '\b': + escapedPolicy.append("\\b"); + break; + case '\f': + escapedPolicy.append("\\f"); + break; + case '\n': + escapedPolicy.append("\\n"); + break; + case '\r': + escapedPolicy.append("\\r"); + break; + case '\t': + escapedPolicy.append("\\t"); + break; + case '\u000b': + escapedPolicy.append("\\v"); + break; + default: + escapedPolicy.append(c); + } + } + } + assertEquals(testData.getPolicyOutput().getFieldsMap(), policy.getFields()); + + assertEquals( + escapedPolicy.toString(), + new String(BaseEncoding.base64().decode(policy.getFields().get("policy")))); + assertEquals(testData.getPolicyOutput().getUrl(), policy.getUrl()); + } + + /** + * Loads all of the tests and return a {@code Collection} representing the set of tests. + * Each entry in the returned collection is the set of parameters to the constructor of this test + * class. + * + *

The results of this method will then be run by JUnit's Parameterized test runner + */ + @Parameters(name = "{2}") + public static Collection testCases() throws IOException { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + + InputStream credentialsStream = cl.getResourceAsStream(SERVICE_ACCOUNT_JSON_RESOURCE); + assertNotNull( + String.format( + Locale.US, "Unable to load service account json: %s", SERVICE_ACCOUNT_JSON_RESOURCE), + credentialsStream); + + InputStream dataJson = cl.getResourceAsStream(TEST_DATA_JSON_RESOURCE); + assertNotNull( + String.format(Locale.US, "Unable to load test definition: %s", TEST_DATA_JSON_RESOURCE), + dataJson); + + ServiceAccountCredentials serviceAccountCredentials = + ServiceAccountCredentials.fromStream(credentialsStream); + + InputStreamReader reader = new InputStreamReader(dataJson, Charsets.UTF_8); + TestFile.Builder testBuilder = TestFile.newBuilder(); + JsonFormat.parser().merge(reader, testBuilder); + TestFile testFile = testBuilder.build(); + + List tests = testFile.getPostPolicyV4TestsList(); + ArrayList data = new ArrayList<>(tests.size()); + for (PostPolicyV4Test test : tests) { + data.add(new Object[] {test, serviceAccountCredentials, test.getDescription()}); + } + return data; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4SigningTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4SigningTest.java new file mode 100644 index 000000000000..6a9c8d5291dc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/V4SigningTest.java @@ -0,0 +1,230 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; + +import com.google.api.core.ApiClock; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.Tuple; +import com.google.cloud.conformance.storage.v1.SigningV4Test; +import com.google.cloud.conformance.storage.v1.TestFile; +import com.google.cloud.conformance.storage.v1.UrlStyle; +import com.google.cloud.storage.Storage.SignUrlOption; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.common.base.Charsets; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class V4SigningTest { + + private static final String SERVICE_ACCOUNT_JSON_RESOURCE = + "com/google/cloud/conformance/storage/v1/test_service_account.not-a-test.json"; + private static final String TEST_DATA_JSON_RESOURCE = + "com/google/cloud/conformance/storage/v1/v4_signatures.json"; + + private static class FakeClock implements ApiClock { + private final AtomicLong currentNanoTime; + + public FakeClock(Timestamp timestamp) { + this.currentNanoTime = + new AtomicLong( + TimeUnit.NANOSECONDS.convert(timestamp.getSeconds(), TimeUnit.SECONDS) + + timestamp.getNanos()); + } + + public long nanoTime() { + return this.currentNanoTime.get(); + } + + public long millisTime() { + return TimeUnit.MILLISECONDS.convert(this.nanoTime(), TimeUnit.NANOSECONDS); + } + } + + @Rule public TestName testName = new TestName(); + + private final SigningV4Test testData; + private final ServiceAccountCredentials serviceAccountCredentials; + + /** + * @param testData The serialized test data representing the test case. + * @param serviceAccountCredentials The credentials to use in this test. + * @param description Not used by the test, but used by the parameterized test runner as the name + * of the test. + */ + public V4SigningTest( + SigningV4Test testData, + ServiceAccountCredentials serviceAccountCredentials, + @SuppressWarnings("unused") String description) { + this.testData = testData; + this.serviceAccountCredentials = serviceAccountCredentials; + } + + @Test + public void test() { + Storage storage = + RemoteStorageHelper.create().getOptions().toBuilder() + .setCredentials(serviceAccountCredentials) + .setClock(new FakeClock(testData.getTimestamp())) + .build() + .getService(); + + BlobInfo blob = BlobInfo.newBuilder(testData.getBucket(), testData.getObject()).build(); + + SignUrlOption style = SignUrlOption.withPathStyle(); + + if (testData.getUrlStyle().equals(UrlStyle.VIRTUAL_HOSTED_STYLE)) { + style = SignUrlOption.withVirtualHostedStyle(); + } else if (testData.getUrlStyle().equals(UrlStyle.PATH_STYLE)) { + style = SignUrlOption.withPathStyle(); + } else if (testData.getUrlStyle().equals(UrlStyle.BUCKET_BOUND_HOSTNAME)) { + style = + SignUrlOption.withBucketBoundHostname( + testData.getBucketBoundHostname(), + Storage.UriScheme.valueOf(testData.getScheme().toUpperCase())); + } + + final String signedUrl = + storage + .signUrl( + blob, + testData.getExpiration(), + TimeUnit.SECONDS, + SignUrlOption.httpMethod(HttpMethod.valueOf(testData.getMethod())), + SignUrlOption.withExtHeaders(testData.getHeadersMap()), + SignUrlOption.withV4Signature(), + SignUrlOption.withQueryParams(testData.getQueryParametersMap()), + style) + .toString(); + SmarterUrl expected = SmarterUrl.of(URI.create(testData.getExpectedUrl())); + SmarterUrl actual = SmarterUrl.of(URI.create(signedUrl)); + assertThat(actual).isEqualTo(expected); + } + + /** + * Load all of the tests and return a {@code Collection} representing the set of tests. + * Each entry in the returned collection is the set of parameters to the constructor of this test + * class. + * + *

The results of this method will then be run by JUnit's Parameterized test runner + */ + @Parameters(name = "{2}") + public static Collection testCases() throws IOException { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + + InputStream credentialsStream = cl.getResourceAsStream(SERVICE_ACCOUNT_JSON_RESOURCE); + assertNotNull( + String.format( + Locale.US, "Unable to load service account json: %s", SERVICE_ACCOUNT_JSON_RESOURCE), + credentialsStream); + + InputStream dataJson = cl.getResourceAsStream(TEST_DATA_JSON_RESOURCE); + assertNotNull( + String.format(Locale.US, "Unable to load test definition: %s", TEST_DATA_JSON_RESOURCE), + dataJson); + + ServiceAccountCredentials serviceAccountCredentials = + ServiceAccountCredentials.fromStream(credentialsStream); + + InputStreamReader reader = new InputStreamReader(dataJson, Charsets.UTF_8); + TestFile.Builder testBuilder = TestFile.newBuilder(); + JsonFormat.parser().merge(reader, testBuilder); + TestFile testFile = testBuilder.build(); + + List tests = testFile.getSigningV4TestsList(); + ArrayList data = new ArrayList<>(tests.size()); + for (SigningV4Test test : tests) { + data.add(new Object[] {test, serviceAccountCredentials, test.getDescription()}); + } + return data; + } + + /** + * Equals on {@link URI} or {@link java.net.URL} perform string comparison on the full query + * string. However, query strings are not order dependent. This class essentially provides a + * smarter equals and hashcode for a url taking into account a query string is not order + * dependent. + */ + private static final class SmarterUrl { + private final String path; + private final Map queryStringParameters; + + private SmarterUrl(String path, Map queryStringParameters) { + this.path = path; + this.queryStringParameters = queryStringParameters; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SmarterUrl)) { + return false; + } + SmarterUrl that = (SmarterUrl) o; + return Objects.equals(path, that.path) + && Objects.equals(queryStringParameters, that.queryStringParameters); + } + + @Override + public int hashCode() { + return Objects.hash(path, queryStringParameters); + } + + private static SmarterUrl of(URI uri) { + String path = uri.getRawPath(); + String rawQuery = uri.getRawQuery(); + String[] split = rawQuery.split("&"); + Map queryStringParameters = + Arrays.stream(split) + .map( + qp -> { + // use indexOf instead of split, just in case an equals is part of the value + int i = qp.indexOf('='); + String k = qp.substring(0, i); + String v = qp.substring(i + 1); + return Tuple.of(k, v); + }) + .collect(Collectors.toMap(Tuple::x, Tuple::y)); + return new SmarterUrl(path, queryStringParameters); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XGoogApiClientHeaderProviderTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XGoogApiClientHeaderProviderTest.java new file mode 100644 index 000000000000..29b3f456ed71 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XGoogApiClientHeaderProviderTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.storage.v2.StorageSettings; +import java.util.Map; +import org.junit.Test; + +public final class XGoogApiClientHeaderProviderTest { + + @Test + public void union_ignoreKeyCase() { + ImmutableMap l = ImmutableMap.of("a", "1"); + ImmutableMap r = ImmutableMap.of("A", "2"); + + ImmutableMap union = XGoogApiClientHeaderProvider.union(l, r); + assertThat(union).isEqualTo(ImmutableMap.of("a", "1 2")); + } + + @Test + public void union_diff_full() { + ImmutableMap l = ImmutableMap.of("a", "1"); + ImmutableMap r = ImmutableMap.of("b", "2"); + + ImmutableMap union = XGoogApiClientHeaderProvider.union(l, r); + assertThat(union).isEqualTo(ImmutableMap.of("a", "1", "b", "2")); + } + + @Test + public void union_equal() { + ImmutableMap l = ImmutableMap.of("a", "1"); + ImmutableMap r = ImmutableMap.of("a", "1"); + + ImmutableMap union = XGoogApiClientHeaderProvider.union(l, r); + assertThat(union).isEqualTo(ImmutableMap.of("a", "1")); + } + + @Test + public void union_equal_ignoreCase() { + ImmutableMap l = ImmutableMap.of("a", "1"); + ImmutableMap r = ImmutableMap.of("A", "1"); + + ImmutableMap union = XGoogApiClientHeaderProvider.union(l, r); + assertThat(union).isEqualTo(ImmutableMap.of("a", "1")); + } + + @Test + public void union_mixed() { + ImmutableMap l = ImmutableMap.of("a", "1", "b", "5", "d", "22"); + ImmutableMap r = ImmutableMap.of("A", "2", "c", "300", "d", "22"); + + ImmutableMap union = XGoogApiClientHeaderProvider.union(l, r); + assertThat(union).isEqualTo(ImmutableMap.of("a", "1 2", "b", "5", "c", "300", "d", "22")); + } + + @Test + public void getHeaders_sameInstance() { + XGoogApiClientHeaderProvider provider = + XGoogApiClientHeaderProvider.of( + () -> ImmutableMap.of("x-goog-api-client", "java-storage"), + ImmutableList.of("some/thing")); + + Map headers1 = provider.getHeaders(); + Map headers2 = provider.getHeaders(); + assertThat(headers2).isSameInstanceAs(headers1); + + assertThat(headers1).isEqualTo(ImmutableMap.of("x-goog-api-client", "java-storage some/thing")); + } + + @Test + public void emptyAdditionalEntries() { + ImmutableMap base = ImmutableMap.of("x-goog-api-client", "java-storage"); + XGoogApiClientHeaderProvider provider = + XGoogApiClientHeaderProvider.of(() -> base, ImmutableList.of()); + + Map headers1 = provider.getHeaders(); + assertThat(headers1).isSameInstanceAs(base); + } + + @Test + public void apiClientProvider() { + ApiClientHeaderProvider base = StorageSettings.defaultApiClientHeaderProviderBuilder().build(); + XGoogApiClientHeaderProvider provider = + XGoogApiClientHeaderProvider.of(base, ImmutableList.of("some/thing")); + + Map headers = provider.getHeaders(); + assertThat(headers).isNotNull(); + + assertThat(headers).containsKey("x-goog-api-client"); + assertThat(headers.get("x-goog-api-client")).contains("gl-java/"); + assertThat(headers.get("x-goog-api-client")).contains("some/thing"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XmlObjectParserTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XmlObjectParserTest.java new file mode 100644 index 000000000000..f3619e77be55 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/XmlObjectParserTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.common.base.MoreObjects; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.StringReader; +import java.nio.charset.StandardCharsets; +import java.util.Objects; +import org.junit.Before; +import org.junit.Test; + +public class XmlObjectParserTest { + + private XmlObjectParser xmlObjectParser; + + @Before + public void setUp() { + xmlObjectParser = new XmlObjectParser(new XmlMapper()); + } + + @Test + public void testParseStringValueEnum() throws IOException { + // language=xml + String xml = + "\n" + " STANDARD" + ""; + InputStream in = new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)); + TestXmlObject2 expected = new TestXmlObject2(StorageClass.STANDARD); + TestXmlObject2 actual = + xmlObjectParser.parseAndClose(in, StandardCharsets.UTF_8, TestXmlObject2.class); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void testParseDoesNotFailOnUnknownFields() throws IOException { + // language=xml + String xml = + "\n" + + " STANDARD" + + " blah" + + ""; + InputStream in = new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)); + TestXmlObject2 expected = new TestXmlObject2(StorageClass.STANDARD); + TestXmlObject2 actual = + xmlObjectParser.parseAndClose(in, StandardCharsets.UTF_8, TestXmlObject2.class); + assertThat(actual).isEqualTo(expected); + } + + @Test + public void testNestedParseStringValueEnum_undefined() throws IOException { + // language=xml + String xml = + "\n" + + " false\n" + + " bucket\n" + + " key\n" + + " \n" + + " 0\n" + + " 0\n" + + " 0\n" + + " false\n" + + " \n" + + " 1\n" + + " etag\n" + + " 33\n" + + " \n" + + " \n" + + ""; + ListPartsResponse listPartsResponse = + xmlObjectParser.parseAndClose(new StringReader(xml), ListPartsResponse.class); + assertThat(listPartsResponse.storageClass()).isNull(); + } + + @Test + public void testNestedParseStringValueEnum_null() throws IOException { + // language=xml + String xml = + "\n" + + " false\n" + + " bucket\n" + + " key\n" + + " \n" + + " 0\n" + + " 0\n" + + " 0\n" + + " false\n" + + " " + + " \n" + + " 1\n" + + " etag\n" + + " 33\n" + + " \n" + + " \n" + + ""; + ListPartsResponse listPartsResponse = + xmlObjectParser.parseAndClose(new StringReader(xml), ListPartsResponse.class); + assertThat(listPartsResponse.storageClass()).isNull(); + } + + @Test + public void testNestedParseStringValueEnum_nonNull() throws IOException { + // language=xml + String xml = + "\n" + + " false\n" + + " bucket\n" + + " key\n" + + " \n" + + " 0\n" + + " 0\n" + + " 0\n" + + " false\n" + + " STANDARD" + + " \n" + + " 1\n" + + " etag\n" + + " 33\n" + + " \n" + + " \n" + + ""; + ListPartsResponse listPartsResponse = + xmlObjectParser.parseAndClose(new StringReader(xml), ListPartsResponse.class); + assertThat(listPartsResponse.storageClass()).isEqualTo(StorageClass.STANDARD); + } + + private static class TestXmlObject {} + + private static final class TestXmlObject2 { + @JacksonXmlProperty(localName = "storageClass") + private StorageClass storageClass; + + private TestXmlObject2() {} + + public TestXmlObject2(StorageClass storageClass) { + this.storageClass = storageClass; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TestXmlObject2)) { + return false; + } + TestXmlObject2 that = (TestXmlObject2) o; + return Objects.equals(storageClass, that.storageClass); + } + + @Override + public int hashCode() { + return Objects.hashCode(storageClass); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("storageClass", storageClass).toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ZeroCopyMarshallerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ZeroCopyMarshallerTest.java new file mode 100644 index 000000000000..2cafae7c3664 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/ZeroCopyMarshallerTest.java @@ -0,0 +1,355 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.getChecksummedData; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.GrpcStorageOptions.ZeroCopyResponseMarshaller; +import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.ContentRange; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.ReadObjectResponse; +import io.grpc.Detachable; +import io.grpc.HasByteBuffer; +import io.grpc.KnownLength; +import io.grpc.StatusRuntimeException; +import io.grpc.internal.ReadableBuffer; +import io.grpc.internal.ReadableBuffers; +import java.io.Closeable; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.junit.Test; + +@SuppressWarnings({"rawtypes", "resource", "unchecked"}) +public class ZeroCopyMarshallerTest { + private final byte[] bytes = DataGenerator.base64Characters().genBytes(40); + private final ByteString data = ByteString.copyFrom(bytes, 0, 10); + private final ReadObjectResponse response = + ReadObjectResponse.newBuilder() + .setMetadata( + Object.newBuilder() + .setName("name") + .setGeneration(3L) + .setContentType("application/octet-stream") + .build()) + .setContentRange(ContentRange.newBuilder().setStart(0).build()) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(Hashing.crc32c().hashBytes(bytes).asInt())) + .setChecksummedData(getChecksummedData(data, Hasher.enabled())) + .build(); + + private ZeroCopyResponseMarshaller createMarshaller() { + return new ZeroCopyResponseMarshaller<>(ReadObjectResponse.getDefaultInstance()); + } + + private byte[] dropLastOneByte(byte[] bytes) { + return Arrays.copyOfRange(bytes, 0, bytes.length - 1); + } + + private IS createInputStream( + byte[] bytes, boolean isZeroCopyable) { + ReadableBuffer buffer = + isZeroCopyable ? ReadableBuffers.wrap(ByteBuffer.wrap(bytes)) : ReadableBuffers.wrap(bytes); + return (IS) ReadableBuffers.openStream(buffer, true); + } + + @Test + public void testParseOnFastPath() throws IOException { + InputStream stream = createInputStream(response.toByteArray(), true); + ZeroCopyResponseMarshaller marshaller = createMarshaller(); + ReadObjectResponse response = marshaller.parse(stream); + assertEquals(response, this.response); + ResponseContentLifecycleHandle stream2 = marshaller.get(response); + assertNotNull(stream2); + stream2.close(); + ResponseContentLifecycleHandle stream3 = marshaller.get(response); + assertNotNull(stream3); + stream3.close(); + } + + @Test + public void testParseOnSlowPath() throws IOException { + InputStream stream = createInputStream(response.toByteArray(), false); + ZeroCopyResponseMarshaller marshaller = createMarshaller(); + ReadObjectResponse response = marshaller.parse(stream); + assertEquals(response, this.response); + ResponseContentLifecycleHandle stream2 = marshaller.get(response); + assertNotNull(stream2); + stream2.close(); + } + + @Test + public void testParseBrokenMessageOnFastPath() { + InputStream stream = createInputStream(dropLastOneByte(response.toByteArray()), true); + ZeroCopyResponseMarshaller marshaller = createMarshaller(); + assertThrows( + StatusRuntimeException.class, + () -> { + marshaller.parse(stream); + }); + } + + @Test + public void testParseBrokenMessageOnSlowPath() { + InputStream stream = createInputStream(dropLastOneByte(response.toByteArray()), false); + ZeroCopyResponseMarshaller marshaller = createMarshaller(); + assertThrows( + StatusRuntimeException.class, + () -> { + marshaller.parse(stream); + }); + } + + @Test + public void testResponseContentLifecycleHandle() throws IOException { + AtomicBoolean wasClosedCalled = new AtomicBoolean(false); + Closeable verifyClosed = () -> wasClosedCalled.set(true); + + ResponseContentLifecycleHandle handle = + ResponseContentLifecycleHandle.create(response, verifyClosed); + handle.close(); + + assertTrue(wasClosedCalled.get()); + + ResponseContentLifecycleHandle nullHandle = + ResponseContentLifecycleHandle.create(response, null); + nullHandle.close(); + // No NullPointerException means test passes + } + + @Test + public void testMarshallerClose_clean() throws IOException { + CloseAuditingInputStream stream1 = + CloseAuditingInputStream.of(createInputStream(response.toByteArray(), true)); + CloseAuditingInputStream stream2 = + CloseAuditingInputStream.of(createInputStream(response.toByteArray(), true)); + CloseAuditingInputStream stream3 = + CloseAuditingInputStream.of(createInputStream(response.toByteArray(), true)); + + GrpcUtils.closeAll(ImmutableList.of(stream1, stream2, stream3)); + + assertThat(stream1.closed).isTrue(); + assertThat(stream2.closed).isTrue(); + assertThat(stream3.closed).isTrue(); + } + + @SuppressWarnings("resource") + @Test + public void testMarshallerClose_multipleIoExceptions() { + CloseAuditingInputStream stream1 = + new CloseAuditingInputStream(null) { + @Override + void onClose() throws IOException { + throw new IOException("Kaboom stream1"); + } + }; + CloseAuditingInputStream stream2 = + new CloseAuditingInputStream(null) { + @Override + void onClose() throws IOException { + throw new IOException("Kaboom stream2"); + } + }; + CloseAuditingInputStream stream3 = + new CloseAuditingInputStream(null) { + @Override + void onClose() throws IOException { + throw new IOException("Kaboom stream3"); + } + }; + + IOException ioException = + assertThrows( + IOException.class, + () -> GrpcUtils.closeAll(ImmutableList.of(stream1, stream2, stream3))); + + assertThat(stream1.closed).isTrue(); + assertThat(stream2.closed).isTrue(); + assertThat(stream3.closed).isTrue(); + + assertThat(ioException).hasMessageThat().isEqualTo("Kaboom stream1"); + List messages = + Arrays.stream(ioException.getSuppressed()) + .map(Throwable::getMessage) + .collect(Collectors.toList()); + assertThat(messages).isEqualTo(ImmutableList.of("Kaboom stream2", "Kaboom stream3")); + } + + @Test + public void refCounting_closingLastBorrowedChildRefShouldCloseHandleWhenHandlePreviouslyClosed() + throws IOException { + try (ZeroCopyResponseMarshaller marshaller = + new ZeroCopyResponseMarshaller<>(ChecksummedData.getDefaultInstance())) { + + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(17)); + + ChecksummedData orig = testContent.asChecksummedData(); + + // load our proto message into the marshaller + byte[] serialized = orig.toByteArray(); + CloseAuditingInputStream inputStream = + CloseAuditingInputStream.of(createInputStream(serialized, true)); + + ChecksummedData parsed = marshaller.parse(inputStream); + assertThat(inputStream.closed).isFalse(); + + // now get the lifecycle management handle for the parsed instance + ResponseContentLifecycleHandle handle = marshaller.get(parsed); + assertThat(inputStream.closed).isFalse(); + + DisposableByteString ref1 = handle.borrow(ChecksummedData::getContent); + DisposableByteString ref2 = handle.borrow(ChecksummedData::getContent); + DisposableByteString ref3 = handle.borrow(ChecksummedData::getContent); + assertThat(inputStream.closed).isFalse(); + handle.close(); + assertThat(inputStream.closed).isFalse(); + ref1.close(); + assertThat(inputStream.closed).isFalse(); + ref2.close(); + assertThat(inputStream.closed).isFalse(); + ref3.close(); + assertThat(inputStream.closed).isTrue(); + } + } + + @Test + public void refCounting_mustBeOpenToBorrow() throws IOException { + try (ZeroCopyResponseMarshaller marshaller = + new ZeroCopyResponseMarshaller<>(ChecksummedData.getDefaultInstance())) { + + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(17)); + + ChecksummedData orig = testContent.asChecksummedData(); + + // load our proto message into the marshaller + byte[] serialized = orig.toByteArray(); + CloseAuditingInputStream inputStream = + CloseAuditingInputStream.of(createInputStream(serialized, true)); + + ChecksummedData parsed = marshaller.parse(inputStream); + assertThat(inputStream.closed).isFalse(); + + // now get the lifecycle management handle for the parsed instance + ResponseContentLifecycleHandle handle = marshaller.get(parsed); + handle.close(); + assertThat(inputStream.closed).isTrue(); + + assertThrows(IllegalStateException.class, () -> handle.borrow(ChecksummedData::getContent)); + } + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Test + public void refCounting_handleCloseOnlyHappensIfOpen() throws IOException { + try (ZeroCopyResponseMarshaller marshaller = + new ZeroCopyResponseMarshaller<>(ChecksummedData.getDefaultInstance())) { + + AtomicInteger closeCount = new AtomicInteger(0); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(17)); + + ChecksummedData orig = testContent.asChecksummedData(); + + // load our proto message into the marshaller + byte[] serialized = orig.toByteArray(); + CloseAuditingInputStream inputStream = + new CloseAuditingInputStream(createInputStream(serialized, true)) { + @Override + public void close() throws IOException { + closeCount.getAndIncrement(); + super.close(); + } + }; + + ChecksummedData parsed = marshaller.parse(inputStream); + assertThat(inputStream.closed).isFalse(); + + // now get the lifecycle management handle for the parsed instance + ResponseContentLifecycleHandle handle = marshaller.get(parsed); + handle.close(); + assertThat(inputStream.closed).isTrue(); + handle.close(); + assertThat(closeCount.get()).isEqualTo(1); + } + } + + // gRPC doesn't have a public InputStream subclass that implements all of these interfaces + // use generics to constrain things using multiple inheritance notation. Then, our class + // implements the same interfaces to allow use within zero-copy marshaller. + private static class CloseAuditingInputStream< + IS extends InputStream & KnownLength & Detachable & HasByteBuffer> + extends FilterInputStream implements KnownLength, Detachable, HasByteBuffer { + + private boolean closed = false; + private final IS delegate; + + private CloseAuditingInputStream(IS in) { + super(in); + this.delegate = in; + } + + @Override + public InputStream detach() { + return this; + } + + @Override + public boolean byteBufferSupported() { + return delegate.byteBufferSupported(); + } + + @Nullable + @Override + public ByteBuffer getByteBuffer() { + return delegate.getByteBuffer(); + } + + public static + CloseAuditingInputStream of(IS in) { + return new CloseAuditingInputStream<>(in); + } + + @Override + public void close() throws IOException { + closed = true; + onClose(); + super.close(); + } + + void onClose() throws IOException {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CleanupStrategy.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CleanupStrategy.java new file mode 100644 index 000000000000..60185dff1b12 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CleanupStrategy.java @@ -0,0 +1,23 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +public enum CleanupStrategy { + ALWAYS, + ONLY_ON_SUCCESS, + NEVER +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Ctx.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Ctx.java new file mode 100644 index 000000000000..fa24be23c4a4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Ctx.java @@ -0,0 +1,78 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.conformance.retry.Functions.EConsumer; +import com.google.cloud.storage.conformance.retry.Functions.EFunction; +import com.google.errorprone.annotations.Immutable; + +/** + * A simple context object used to track an instance of {@link Storage} along with {@link State} and + * provide some convenience methods for creating new instances. + */ +@Immutable +final class Ctx { + + private final Storage storage; + private final State state; + + private Ctx(Storage s, State t) { + this.storage = s; + this.state = t; + } + + /** Create a new instance of {@link Ctx} */ + static Ctx ctx(Storage storage, State state) { + return new Ctx(storage, state); + } + + public Storage getStorage() { + return storage; + } + + public State getState() { + return state; + } + + /** + * Create a new instance of {@link Ctx} by first applying {@code f} to {@code this.storage}. + * {@code this.state} is passed along unchanged. + */ + public Ctx leftMap(EFunction f) throws Throwable { + return new Ctx(f.apply(storage), state); + } + + /** + * Create a new instance of {@link Ctx} by first applying {@code f} to {@code this.state}. {@code + * this.storage} is passed along unchanged. + */ + public Ctx map(EFunction f) throws Throwable { + return new Ctx(storage, f.apply(state)); + } + + /** + * Apply {@code f} by providing {@code this.state}. + * + *

This method is provided as convenience for those methods which have void return. In general + * {@link Ctx#map(EFunction)} should be used. + */ + public Ctx peek(EConsumer f) throws Throwable { + f.consume(state); + return this; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CtxFunctions.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CtxFunctions.java new file mode 100644 index 000000000000..3f3e4942f36e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/CtxFunctions.java @@ -0,0 +1,298 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static com.google.common.collect.Sets.newHashSet; + +import com.google.api.gax.paging.Page; +import com.google.cloud.conformance.storage.v1.Resource; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.conformance.retry.Functions.CtxFunction; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableMap; +import com.google.pubsub.v1.TopicName; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; + +/** + * Define a set of {@link CtxFunction} which are used in mappings as well as general setup/tear down + * of specific tests. + * + *

Functions are grouped into nested classes which try to hint at the area they operate within. + * Client side-only, or performing an RPC, setup or tear down and so on. + * + * @see RpcMethodMapping + * @see RpcMethodMapping.Builder + * @see RpcMethodMappings + */ +final class CtxFunctions { + + static final class Local { + + /** + * Populate a copy destination for the state present in the ctx. + * + * @see State#getCopyDest() + */ + static final CtxFunction blobCopy = + (ctx, c) -> ctx.map(s -> s.withCopyDest(BlobId.of(c.getBucketName2(), c.getObjectName()))); + + /** + * Populate a bucket info for the state present in the ctx. + * + *

this is primarily useful in the case when you want to insert a bucket during the test + * + * @see State#getBucketInfo() + */ + static final CtxFunction bucketInfo = + (ctx, c) -> ctx.map(s -> s.with(BucketInfo.of(c.getBucketName()))); + + /** + * Populate a compose request for the state present in the ctx. + * + * @see State#getComposeRequest() + */ + static final CtxFunction composeRequest = + (ctx, c) -> + ctx.map( + state -> { + Blob blob = state.getBlob(); + String bucket = blob.getBucket(); + final BlobInfo target; + if (c.isPreconditionsProvided()) { + target = BlobInfo.newBuilder(BlobId.of(bucket, "blob-full", 0L)).build(); + } else { + target = BlobInfo.newBuilder(BlobId.of(bucket, "blob-full")).build(); + } + ComposeRequest.Builder builder = + ComposeRequest.newBuilder() + // source bucket is resolved from the target, as compose must be within + // the same bucket + .addSource(blob.getName(), blob.getGeneration()) + .addSource(blob.getName(), blob.getGeneration()) + .setTarget(target); + if (c.isPreconditionsProvided()) { + builder = builder.setTargetOptions(BlobTargetOption.generationMatch()); + } + ComposeRequest r = builder.build(); + return state.with(r); + }); + + private static final CtxFunction blobIdAndBlobInfo = + (ctx, c) -> ctx.map(state -> state.with(BlobInfo.newBuilder(state.getBlobId()).build())); + private static final CtxFunction blobIdWithoutGeneration = + (ctx, c) -> ctx.map(s -> s.with(BlobId.of(c.getBucketName(), c.getObjectName()))); + private static final CtxFunction blobIdWithGenerationZero = + (ctx, c) -> ctx.map(s -> s.with(BlobId.of(c.getBucketName(), c.getObjectName(), 0L))); + + /** + * Populate a blobId and blob info for the state present in the ctx which specifies a null + * generation. Use when a generation value shouldn't be part of a request or other evaluation. + * + * @see State#getBlobId() + * @see State#getBlobInfo() + */ + static final CtxFunction blobInfoWithoutGeneration = + blobIdWithoutGeneration.andThen(blobIdAndBlobInfo); + + /** + * Populate a blobId and blob info for the state present in the ctx which specifies a generation + * of 0 (zero). + * + * @see State#getBlobId() + * @see State#getBlobInfo() + */ + static final CtxFunction blobInfoWithGenerationZero = + blobIdWithGenerationZero.andThen(blobIdAndBlobInfo); + } + + static final class Rpc { + static final CtxFunction createEmptyBlob = + (ctx, c) -> ctx.map(state -> state.with(ctx.getStorage().create(state.getBlobInfo()))); + static final CtxFunction bucketIamPolicy = + (ctx, c) -> + ctx.map( + state -> state.with(ctx.getStorage().getIamPolicy(state.getBucket().getName()))); + } + + static final class ResourceSetup { + private static final CtxFunction bucket = + (ctx, c) -> { + BucketInfo bucketInfo = BucketInfo.newBuilder(c.getBucketName()).build(); + Bucket resolvedBucket = ctx.getStorage().create(bucketInfo); + return ctx.map(s -> s.with(resolvedBucket)); + }; + + /** + * Create a new object in the {@link State#getBucket()} and populate a blobId, blob info and + * blob for the state present in the ctx. + * + *

This method will issue an RPC. + * + * @see State#getBlob() + * @see State#getBlobId() + * @see State#getBlobInfo() + */ + static final CtxFunction object = + (ctx, c) -> { + BlobInfo blobInfo = + BlobInfo.newBuilder(ctx.getState().getBucket().getName(), c.getObjectName()).build(); + Blob resolvedBlob = ctx.getStorage().create(blobInfo, c.getHelloWorldUtf8Bytes()); + return ctx.map( + s -> + s.with(resolvedBlob) + .with((BlobInfo) resolvedBlob) + .with(resolvedBlob.getBlobId())); + }; + + static final CtxFunction serviceAccount = + (ctx, c) -> + ctx.map(s -> s.with(ServiceAccount.of(c.getServiceAccountSigner().getAccount()))); + private static final CtxFunction hmacKey = + (ctx, c) -> + ctx.map( + s -> { + HmacKey hmacKey1 = ctx.getStorage().createHmacKey(s.getServiceAccount()); + return s.withHmacKey(hmacKey1).with(hmacKey1.getMetadata()); + }); + + static final CtxFunction pubsubTopic = + (ctx, c) -> { + String projectId = c.getProjectId(); + TopicName name = TopicName.of(projectId, c.getTopicName()); + return ctx.map(s -> s.with(name)); + }; + + static final CtxFunction notification = + (ctx, c) -> + ctx.map( + state -> { + PayloadFormat format = PayloadFormat.JSON_API_V1; + Map attributes = ImmutableMap.of("label1", "value1"); + NotificationInfo notificationInfo = + NotificationInfo.newBuilder(state.getTopicName().toString()) + .setCustomAttributes(attributes) + .setPayloadFormat(format) + .build(); + return state.with( + ctx.getStorage().createNotification(c.getBucketName(), notificationInfo)); + }); + + private static final CtxFunction processResources = + (ctx, c) -> { + HashSet resources = newHashSet(c.getMethod().getResourcesList()); + CtxFunction f = CtxFunction.identity(); + if (resources.contains(Resource.BUCKET)) { + f = f.andThen(ResourceSetup.bucket); + resources.remove(Resource.BUCKET); + } + + if (resources.contains(Resource.OBJECT)) { + f = f.andThen(ResourceSetup.object); + resources.remove(Resource.OBJECT); + } + + if (resources.contains(Resource.HMAC_KEY)) { + f = f.andThen(serviceAccount).andThen(hmacKey); + resources.remove(Resource.HMAC_KEY); + } + + if (resources.contains(Resource.NOTIFICATION)) { + f = f.andThen(pubsubTopic).andThen(notification); + resources.remove(Resource.NOTIFICATION); + } + + if (!resources.isEmpty()) { + throw new IllegalStateException( + String.format( + Locale.US, "Unhandled Method Resource [%s]", Joiner.on(", ").join(resources))); + } + + return f.apply(ctx, c); + }; + + private static final CtxFunction allUsersReaderAcl = + (ctx, c) -> ctx.map(s -> s.with(Acl.of(User.ofAllUsers(), Role.READER))); + + static final CtxFunction defaultSetup = processResources.andThen(allUsersReaderAcl); + + static final CtxFunction pubsubTopicSetup = defaultSetup.andThen(pubsubTopic); + + static final CtxFunction notificationSetup = pubsubTopicSetup.andThen(notification); + } + + static final class ResourceTeardown { + private static final CtxFunction deleteAllObjects = + (ctx, c) -> + ctx.map( + s -> { + Storage storage = ctx.getStorage(); + deleteBucket(storage, c.getBucketName()); + deleteBucket(storage, c.getBucketName2()); + State newState = + s.with((Blob) null) + .with((BlobInfo) null) + .with((BlobId) null) + .with((Bucket) null); + + if (s.hasHmacKeyMetadata()) { + HmacKeyMetadata metadata = s.getHmacKeyMetadata(); + if (metadata.getState() == HmacKeyState.ACTIVE) { + metadata = storage.updateHmacKeyState(metadata, HmacKeyState.INACTIVE); + } + storage.deleteHmacKey(metadata); + newState.with((HmacKeyMetadata) null).withHmacKey(null); + } + + return newState; + }); + + static final CtxFunction defaultTeardown = deleteAllObjects; + + private static void deleteBucket(Storage storage, String bucketName) { + Bucket bucket = storage.get(bucketName); + if (bucket != null) { + emptyBucket(storage, bucketName); + bucket.delete(); + } + } + + private static void emptyBucket(Storage storage, String bucketName) { + Page blobs = storage.list(bucketName); + for (Blob blob : blobs.iterateAll()) { + blob.delete(); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Functions.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Functions.java new file mode 100644 index 000000000000..47ab7a825122 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/Functions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +/** + * A set of Functional interface types which are used in Retry Conformance tests. + * + *

All functions allow checked exceptions to be thrown, whereas their siblings in {@code + * java.util.function} do not. + */ +final class Functions { + + /** + * A specialized BiFunction which cuts down on boilerplate and provides an {@link + * CtxFunction#andThen(CtxFunction) andThen} which carries through the BiFunction-ness. + */ + @FunctionalInterface + interface CtxFunction { + + Ctx apply(Ctx ctx, TestRetryConformance trc) throws Throwable; + + default CtxFunction andThen(CtxFunction f) { + return (Ctx ctx, TestRetryConformance trc) -> f.apply(apply(ctx, trc), trc); + } + + default CtxFunction compose(CtxFunction f) { + return (Ctx ctx, TestRetryConformance trc) -> apply(f.apply(ctx, trc), trc); + } + + static CtxFunction identity() { + return (ctx, c) -> ctx; + } + } + + /** + * Define a Function which can throw, this simplifies the code where a checked exception is + * declared. These Functions only exist in the context of tests so if a throw happens it will be + * handled at a per-test level. + */ + @FunctionalInterface + interface EFunction { + B apply(A a) throws Throwable; + } + + /** + * Define a Consumer which can throw, this simplifies the code where a checked exception is + * declared. These Consumers only exist in the context of tests so if a throw happens it will be + * handled at a per-test level. + */ + @FunctionalInterface + interface EConsumer { + void consume(A a) throws Throwable; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java new file mode 100644 index 000000000000..1cfd80cd692e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java @@ -0,0 +1,555 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.blobCopyWithStorage; +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.bucketCopyWithStorage; +import static com.google.cloud.storage.conformance.retry.Ctx.ctx; +import static com.google.cloud.storage.conformance.retry.State.empty; +import static com.google.common.truth.Truth.assertThat; +import static java.util.Objects.requireNonNull; +import static org.junit.Assert.assertNotNull; + +import com.google.cloud.conformance.storage.v1.InstructionList; +import com.google.cloud.conformance.storage.v1.Method; +import com.google.cloud.conformance.storage.v1.RetryTest; +import com.google.cloud.conformance.storage.v1.RetryTests; +import com.google.cloud.storage.CIUtils; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.conformance.retry.Functions.CtxFunction; +import com.google.cloud.storage.conformance.retry.ITRetryConformanceTest.RetryConformanceParameterProvider; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.TestBench; +import com.google.common.base.Charsets; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.math.BigInteger; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.Random; +import java.util.Set; +import java.util.function.BiPredicate; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.After; +import org.junit.AssumptionViolatedException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Load and dynamically generate a series of test cases to verify if the {@link Storage} and + * associated high level classes adhere to expected retry behavior. + * + *

This class dynamically generates test cases based on resources from the + * google-cloud-conformance-tests artifact and a set of defined mappings from {@link + * RpcMethodMappings}. + */ +@RunWith(StorageITRunner.class) +// @CrossRun(transports = Transport.HTTP, backends = Backend.TEST_BENCH) +@SingleBackend(Backend.TEST_BENCH) +@Parameterized(RetryConformanceParameterProvider.class) +@ParallelFriendly +public class ITRetryConformanceTest { + private static final Logger LOGGER = LoggerFactory.getLogger(ITRetryConformanceTest.class); + + private RetryTestFixture retryTestFixture; + + @Parameter public RetryParameter retryParameter; + + private TestRetryConformance testRetryConformance; + private RpcMethodMapping mapping; + private Storage nonTestStorage; + private Storage testStorage; + @Nullable private Ctx ctx; + + @Before + public void setUp() throws Throwable { + LOGGER.trace("Running setup..."); + retryTestFixture = retryParameter.retryTestFixture; + testRetryConformance = retryParameter.testRetryConformance; + mapping = retryParameter.rpcMethodMapping; + retryTestFixture.starting(null); + nonTestStorage = retryTestFixture.getNonTestStorage(); + testStorage = retryTestFixture.getTestStorage(); + // it's important to keep these two ctx assignments separate to allow for teardown to work in + // the case setup fails for some reason + ctx = ctx(nonTestStorage, empty()); + ctx = mapping.getSetup().apply(ctx, testRetryConformance).leftMap(s -> testStorage); + LOGGER.trace("Running setup complete"); + } + + @After + public void tearDown() throws Throwable { + LOGGER.trace("Running teardown..."); + if (ctx != null) { + ctx = ctx.leftMap(s -> nonTestStorage); + getReplaceStorageInObjectsFromCtx() + .andThen(mapping.getTearDown()) + .apply(ctx, testRetryConformance); + } + retryTestFixture.finished(null); + LOGGER.trace("Running teardown complete"); + } + + /** + * Run an individual test case. 1. Create two storage clients, one for setup/teardown and one for + * test execution 2. Run setup 3. Run test 4. Run teardown + */ + @Test + public void test() throws Throwable { + LOGGER.trace("Running test..."); + assertThat(ctx).isNotNull(); + try { + ctx = + getReplaceStorageInObjectsFromCtx() + .andThen(mapping.getTest()) + .apply(ctx, testRetryConformance) + .leftMap(s -> nonTestStorage); + retryTestFixture.succeeded(null); + } catch (Throwable e) { + retryTestFixture.failed(e, null); + throw e; + } + LOGGER.trace("Running test complete"); + } + + /** + * Load all of the tests and return a {@code Collection} representing the set of tests. + * Each entry in the returned collection is the set of parameters to the constructor of this test + * class. + * + *

The results of this method will then be run by JUnit's Parameterized test runner + */ + public static final class RetryConformanceParameterProvider implements ParametersProvider { + @Inject public TestBench testBench; + + @Override + public ImmutableList parameters() { + RetryTestCaseResolver resolver = + RetryTestCaseResolver.newBuilder() + .setRetryTestsJsonResourcePath( + "com/google/cloud/conformance/storage/v1/retry_tests.json") + .setMappings(new RpcMethodMappings()) + .setProjectId("conformance-tests") + .setHost(testBench.getBaseUri().replaceAll("https?://", "")) + .setTestAllowFilter(RetryTestCaseResolver.includeAll()) + .build(); + + List retryTestCases; + try { + retryTestCases = resolver.getRetryTestCases(); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThat(retryTestCases).isNotEmpty(); + return retryTestCases.stream() + .map( + rtc -> + RetryParameter.of( + rtc, + new RetryTestFixture( + CleanupStrategy.ALWAYS, testBench, rtc.testRetryConformance))) + .collect(ImmutableList.toImmutableList()); + } + } + + /** + * When a "higher level object" ({@link com.google.cloud.storage.Bucket}, {@link + * com.google.cloud.storage.Blob}, etc.) is created as part of setup it keeps a reference to the + * instance of {@link Storage} used to create it. When we run our tests we need the instance of + * {@link Storage} to be the instance with the headers to signal the retry test. + * + *

The function returned will inspect the {@link State} and create copies of any "higher level + * objects" which are present replacing the instance of {@link Storage} from the provided ctx. + */ + private static CtxFunction getReplaceStorageInObjectsFromCtx() { + return (ctx, c) -> { + State s = ctx.getState(); + if (s.hasBucket()) { + s = s.with(bucketCopyWithStorage(s.getBucket(), ctx.getStorage())); + } + if (s.hasBlob()) { + s = s.with(blobCopyWithStorage(s.getBlob(), ctx.getStorage())); + } + final State state = s; + return ctx.map(x -> state); + }; + } + + /** + * Helper class which encapsulates all the logic necessary to resolve and crete a test case for + * each defined scenario from google-cloud-conformance-tests and our defined {@link + * RpcMethodMappings}. + */ + static final class RetryTestCaseResolver { + private static final String HEX_SHUFFLE_SEED_OVERRIDE = + System.getProperty("HEX_SHUFFLE_SEED_OVERRIDE"); + + private final String retryTestsJsonResourcePath; + private final RpcMethodMappings mappings; + private final BiPredicate testAllowFilter; + private final Random rand; + private final String host; + private final String projectId; + + private RetryTestCaseResolver( + String retryTestsJsonResourcePath, + RpcMethodMappings mappings, + BiPredicate testAllowFilter, + Random rand, + String host, + String projectId) { + this.retryTestsJsonResourcePath = retryTestsJsonResourcePath; + this.mappings = mappings; + this.testAllowFilter = testAllowFilter; + this.rand = rand; + this.host = host; + this.projectId = projectId; + } + + /** Load, permute and generate all RetryTestCases which are to be run in this suite */ + List getRetryTestCases() throws IOException { + RetryTests retryTests = loadRetryTestsDefinition(); + + // sort the defined RetryTest by id, so we have a stable ordering while generating cases. + List retryTestCases = + retryTests.getRetryTestsList().stream() + .sorted(Comparator.comparingInt(RetryTest::getId)) + .collect(Collectors.toList()); + + List testCases = generateTestCases(mappings, retryTestCases); + + // Shuffle our test cases to ensure we don't have any between case ordering weirdness + Collections.shuffle(testCases, rand); + + validateGeneratedTestCases(mappings, testCases); + + return testCases; + } + + /** Load the defined scenarios from google-cloud-conformance-tests */ + private RetryTests loadRetryTestsDefinition() throws IOException { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + + InputStream dataJson = cl.getResourceAsStream(retryTestsJsonResourcePath); + assertNotNull( + String.format( + Locale.US, "Unable to load test definition: %s", retryTestsJsonResourcePath), + dataJson); + + InputStreamReader reader = new InputStreamReader(dataJson, Charsets.UTF_8); + RetryTests.Builder testBuilder = RetryTests.newBuilder(); + JsonFormat.parser().merge(reader, testBuilder); + return testBuilder.build(); + } + + /** Permute the RetryTest, Instructions and methods with our mappings */ + private List generateTestCases( + RpcMethodMappings rpcMethodMappings, List retryTests) { + + List testCases = new ArrayList<>(); + Transport[] values = Transport.values(); + for (Transport transport : values) { + for (RetryTest testCase : retryTests) { + for (InstructionList instructionList : testCase.getCasesList()) { + for (Method method : testCase.getMethodsList()) { + String methodName = method.getName(); + RpcMethod key = RpcMethod.storage.lookup.get(methodName); + assertNotNull( + String.format( + Locale.US, "Unable to resolve RpcMethod for value '%s'", methodName), + key); + // get all RpcMethodMappings which are defined for key + List mappings = + rpcMethodMappings.get(key).stream() + .sorted(Comparator.comparingInt(RpcMethodMapping::getMappingId)) + .collect(Collectors.toList()); + // if we don't have any mappings defined for the provide key, generate a case that + // when + // run reports an ignored test. This is done for the sake of completeness and to be + // aware of a lack of mapping. + if (mappings.isEmpty() && CIUtils.verbose()) { + TestRetryConformance testRetryConformance = + new TestRetryConformance( + transport, + projectId, + host, + testCase.getId(), + method, + instructionList, + testCase.getPreconditionProvided(), + false); + if (testAllowFilter.test(key, testRetryConformance)) { + testCases.add( + new RetryTestCase( + testRetryConformance, RpcMethodMapping.notImplemented(key))); + } + } else { + for (RpcMethodMapping mapping : mappings) { + TestRetryConformance testRetryConformance = + new TestRetryConformance( + transport, + projectId, + host, + testCase.getId(), + method, + instructionList, + testCase.getPreconditionProvided(), + testCase.getExpectSuccess(), + mapping.getMappingId()); + // check that this case is allowed based on the provided filter + if (testAllowFilter.test(key, testRetryConformance)) { + // check that the defined mapping is applicable to the case we've resolved. + // Many mappings are conditionally valid and depend on the defined case. + if (mapping.getApplicable().test(testRetryConformance)) { + testCases.add(new RetryTestCase(testRetryConformance, mapping)); + } else if (CIUtils.verbose()) { + // when the mapping is determined to not be applicable to this case, generate + // a synthetic mapping which will report as an ignored test. This is done for + // the sake of completeness. + RpcMethodMapping build = + mapping.toBuilder() + .withSetup(CtxFunction.identity()) + .withTest( + (s, c) -> { + throw new AssumptionViolatedException( + "applicability predicate evaluated to false"); + }) + .withTearDown(CtxFunction.identity()) + .build(); + testCases.add(new RetryTestCase(testRetryConformance, build)); + } + } + } + } + } + } + } + } + return testCases; + } + + private void validateGeneratedTestCases( + RpcMethodMappings rpcMethodMappings, List data) { + Set unusedMappings = + rpcMethodMappings.differenceMappingIds( + data.stream() + .map(rtc -> rtc.testRetryConformance.getMappingId()) + .collect(Collectors.toSet())); + + if (!unusedMappings.isEmpty()) { + LOGGER.warn( + String.format( + Locale.US, + "Declared but unused mappings with ids: [%s]", + Joiner.on(", ").join(unusedMappings))); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + /** Filtering predicate in which all test cases will be included and run. */ + static BiPredicate includeAll() { + return (m, c) -> true; + } + + /** + * Filtering predicate in which only those test cases which match up to the specified {@code + * mappingIds} will be included and run. + */ + static BiPredicate specificMappings(int... mappingIds) { + ImmutableSet set = + Arrays.stream(mappingIds).boxed().collect(ImmutableSet.toImmutableSet()); + return (m, c) -> set.contains(c.getMappingId()); + } + + static BiPredicate lift(Predicate p) { + return (m, trc) -> p.test(trc); + } + + static Predicate instructionsAre(String... instructions) { + return trc -> + trc.getInstruction().getInstructionsList().equals(ImmutableList.copyOf(instructions)); + } + + static BiPredicate scenarioIdIs(int scenarioId) { + return (m, trc) -> trc.getScenarioId() == scenarioId; + } + + static BiPredicate mappingIdIn(Integer... mappingIds) { + ImmutableSet ids = ImmutableSet.copyOf(mappingIds); + return (m, trc) -> ids.contains(trc.getMappingId()); + } + + static final class Builder { + private String retryTestsJsonResourcePath; + private RpcMethodMappings mappings; + private String host; + private BiPredicate testAllowFilter; + private final Random rand; + private String projectId; + + public Builder() { + this.rand = resolveRand(); + } + + /** + * Set the resource path of where to resolve the retry_tests.json from + * google-cloud-conformance-tests + */ + public Builder setRetryTestsJsonResourcePath(String retryTestsJsonResourcePath) { + this.retryTestsJsonResourcePath = retryTestsJsonResourcePath; + return this; + } + + /** Set the defined mappings which are to be used in test generation */ + public Builder setMappings(RpcMethodMappings mappings) { + this.mappings = requireNonNull(mappings, "mappings must be non null"); + return this; + } + + /** Set the host string of where the testbench will be available during a test run */ + public Builder setHost(String host) { + this.host = host; + return this; + } + + public Builder setProjectId(String projectId) { + this.projectId = projectId; + return this; + } + + /** + * Set the allow filter for determining if a particular {@link RpcMethod} and {@link + * TestRetryConformance} should be included in the generated test suite. + */ + public Builder setTestAllowFilter( + BiPredicate testAllowFilter) { + this.testAllowFilter = requireNonNull(testAllowFilter, "testAllowFilter must be non null"); + return this; + } + + public RetryTestCaseResolver build() { + return new RetryTestCaseResolver( + requireNonNull( + retryTestsJsonResourcePath, "retryTestsJsonResourcePath must be non null"), + requireNonNull(mappings, "mappings must be non null"), + requireNonNull(testAllowFilter, "testAllowList must be non null"), + rand, + requireNonNull(host, "host must be non null"), + requireNonNull(projectId, "projectId must be non null")); + } + + /** + * As part of test generation and execution we are shuffling the order to ensure there is no + * ordering dependency between individual cases. Given this fact, we report the seed used for + * performing the shuffle. If an explicit seed is provided via environment variable that will + * take precedence. + */ + private static Random resolveRand() { + try { + long seed; + if (HEX_SHUFFLE_SEED_OVERRIDE != null) { + LOGGER.info( + "Shuffling test order using Random with override seed: " + + HEX_SHUFFLE_SEED_OVERRIDE); + seed = new BigInteger(HEX_SHUFFLE_SEED_OVERRIDE.replace("0x", ""), 16).longValue(); + } else { + seed = + SecureRandom.getInstanceStrong() + .longs(100) + .reduce((first, second) -> second) + .orElseThrow( + () -> { + throw new IllegalStateException("Unable to generate seed"); + }); + String msg = + String.format( + Locale.US, "Shuffling test order using Random with seed: 0x%016X", seed); + LOGGER.info(msg); + } + return new Random(seed); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + } + } + + /** + * Simple typed tuple class to bind together a {@link TestRetryConformance} and {@link + * RpcMethodMapping} during resolution. + */ + private static final class RetryTestCase { + private final TestRetryConformance testRetryConformance; + private final RpcMethodMapping rpcMethodMapping; + + RetryTestCase(TestRetryConformance testRetryConformance, RpcMethodMapping rpcMethodMapping) { + this.testRetryConformance = testRetryConformance; + this.rpcMethodMapping = rpcMethodMapping; + } + } + + private static final class RetryParameter { + private final TestRetryConformance testRetryConformance; + private final RpcMethodMapping rpcMethodMapping; + private final RetryTestFixture retryTestFixture; + + private RetryParameter( + TestRetryConformance testRetryConformance, + RpcMethodMapping rpcMethodMapping, + RetryTestFixture retryTestFixture) { + this.testRetryConformance = testRetryConformance; + this.rpcMethodMapping = rpcMethodMapping; + this.retryTestFixture = retryTestFixture; + } + + public static RetryParameter of(RetryTestCase rtc, RetryTestFixture retryTestFixture) { + return new RetryParameter(rtc.testRetryConformance, rtc.rpcMethodMapping, retryTestFixture); + } + + @Override + public String toString() { + return testRetryConformance.toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RetryTestFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RetryTestFixture.java new file mode 100644 index 000000000000..c5a61c79f89e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RetryTestFixture.java @@ -0,0 +1,208 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static org.junit.Assert.assertTrue; + +import com.google.api.client.http.HttpResponseException; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.runner.registry.TestBench; +import com.google.cloud.storage.it.runner.registry.TestBench.RetryTestResource; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.util.Locale; +import org.junit.AssumptionViolatedException; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A JUnit 4 {@link TestRule} which integrates with {@link TestBench} and {@link + * TestRetryConformance} to provide transparent lifecycle integration of setup/validation/cleanup of + * {@code /retry_test} resources. This rule expects to be bound as an {@link org.junit.Rule @Rule} + * field. + * + *

Provides pre-configured instances of {@link Storage} for setup/teardown & test. + */ +final class RetryTestFixture extends TestWatcher { + private static final Logger LOGGER = LoggerFactory.getLogger(RetryTestFixture.class); + private static final int STATUS_CODE_NOT_IMPLEMENTED = 501; + + private final CleanupStrategy cleanupStrategy; + private final TestBench testBench; + private final TestRetryConformance testRetryConformance; + + boolean testSuccess = false; + boolean testSkipped = false; + + private RetryTestResource retryTest; + private Storage nonTestStorage; + private Storage testStorage; + + RetryTestFixture( + CleanupStrategy cleanupStrategy, + TestBench testBench, + TestRetryConformance testRetryConformance) { + this.cleanupStrategy = cleanupStrategy; + this.testBench = testBench; + this.testRetryConformance = testRetryConformance; + } + + public Storage getNonTestStorage() { + if (nonTestStorage == null) { + this.nonTestStorage = newStorage(false); + } + return nonTestStorage; + } + + public Storage getTestStorage() { + if (testStorage == null) { + this.testStorage = newStorage(true); + } + return testStorage; + } + + @Override + protected void starting(Description description) { + LOGGER.trace("Setting up retry_test resource..."); + RetryTestResource retryTestResource = + RetryTestResource.newRetryTestResource( + testRetryConformance.getMethod(), + testRetryConformance.getInstruction(), + testRetryConformance.getTransport().name()); + try { + retryTest = testBench.createRetryTest(retryTestResource); + } catch (HttpResponseException e) { + if (e.getStatusCode() == STATUS_CODE_NOT_IMPLEMENTED) { + AssumptionViolatedException exception = + new AssumptionViolatedException( + "Testbench not yet implemented for " + retryTestResource); + // make skips due to not implemented more terse + // we know where this comes from, we don't need the full stack trace for each of the + // 200+ occurrences. + exception.setStackTrace(new StackTraceElement[0]); + throw exception; + } else { + throw new RuntimeException(e); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + LOGGER.trace("Setting up retry_test resource complete"); + } + + @Override + protected void finished(Description description) { + LOGGER.trace("Verifying end state of retry_test resource..."); + try (Storage ignore1 = nonTestStorage; + Storage ignore2 = testStorage) { // use try-with to shut down grpc resources + try { + if (retryTest != null) { + RetryTestResource postTestState = testBench.getRetryTest(retryTest); + if (testSuccess) { + assertTrue("expected completed to be true, but was false", postTestState.completed); + } + } + } finally { + LOGGER.trace("Verifying end state of retry_test resource complete"); + if ((shouldCleanup(testSuccess, testSkipped)) && retryTest != null) { + testBench.deleteRetryTest(retryTest); + retryTest = null; + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + protected void succeeded(Description description) { + testSuccess = true; + } + + @Override + protected void failed(Throwable e, Description description) { + super.failed(e, description); + } + + @Override + protected void skipped(AssumptionViolatedException e, Description description) { + testSkipped = true; + } + + private boolean shouldCleanup(boolean testSuccess, boolean testSkipped) { + return cleanupStrategy == CleanupStrategy.ALWAYS + || ((testSuccess || testSkipped) && cleanupStrategy == CleanupStrategy.ONLY_ON_SUCCESS); + } + + private Storage newStorage(boolean forTest) { + RetrySettings.Builder retrySettingsBuilder = + StorageOptions.getDefaultRetrySettings().toBuilder(); + if (forTest) { + StorageOptions.Builder builder; + switch (testRetryConformance.getTransport()) { + case HTTP: + builder = StorageOptions.http().setHost(testBench.getBaseUri()); + break; + case GRPC: + builder = + StorageOptions.grpc() + .setHost(testBench.getGRPCBaseUri()) + .setEnableGrpcClientMetrics(false) + .setAttemptDirectPath(false); + break; + default: + throw new IllegalStateException( + "Enum switch exhaustion checking would be nice. Unhandled case: " + + testRetryConformance.getTransport()); + } + builder + .setCredentials(NoCredentials.getInstance()) + .setProjectId(testRetryConformance.getProjectId()) + .setHeaderProvider( + FixedHeaderProvider.create( + ImmutableMap.of( + "x-retry-test-id", retryTest.id, "User-Agent", fmtUserAgent("test")))) + .setRetrySettings(retrySettingsBuilder.setMaxAttempts(3).build()); + return builder.build().getService(); + } else { + return StorageOptions.http() + .setHost(testBench.getBaseUri()) + .setCredentials(NoCredentials.getInstance()) + .setProjectId(testRetryConformance.getProjectId()) + .setHeaderProvider( + FixedHeaderProvider.create(ImmutableMap.of("User-Agent", fmtUserAgent("non-test")))) + .setRetrySettings(retrySettingsBuilder.setMaxAttempts(1).build()) + .build() + .getService(); + } + } + + private String fmtUserAgent(String testDescriptor) { + return String.format( + Locale.US, + "%s/ (%s) java-conformance-tests/", + testDescriptor, + testRetryConformance.getTestName()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethod.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethod.java new file mode 100644 index 000000000000..4a0f8d50dfb7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethod.java @@ -0,0 +1,167 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +interface RpcMethod { + + String getFullyQualifiedMethodName(); + + /** + * Enumerate the hierarchy of storage rpc methods. + * + *

These class names intentionally do not follow java convention, because they are mapping + * directly to lower level values. + */ + final class storage { + private static String getFullQualifiedMethodName(Enum e) { + return String.format(Locale.US, "storage.%s.%s", e.getClass().getSimpleName(), e.name()); + } + + enum bucket_acl implements RpcMethod { + delete, + get, + insert, + list, + patch, + update; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum buckets implements RpcMethod { + delete, + get, + insert, + list, + patch, + update, + getIamPolicy, + lockRetentionPolicy, + setIamPolicy, + testIamPermissions; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum default_object_acl implements RpcMethod { + delete, + get, + insert, + list, + patch, + update; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum hmacKey implements RpcMethod { + delete, + get, + list, + update, + create; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum notifications implements RpcMethod { + delete, + get, + insert, + list; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum object_acl implements RpcMethod { + delete, + get, + insert, + list, + patch, + update; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum objects implements RpcMethod { + delete, + get, + insert, + list, + patch, + update, + compose, + rewrite, + copy; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + enum serviceaccount implements RpcMethod { + get; + + @Override + public String getFullyQualifiedMethodName() { + return getFullQualifiedMethodName(this); + } + } + + // create a map, which can be used to do a reverse lookup of an RpcMethod by its associated + // string value. + static final Map lookup = + Stream.>of( + Arrays.stream(bucket_acl.values()), + Arrays.stream(buckets.values()), + Arrays.stream(default_object_acl.values()), + Arrays.stream(hmacKey.values()), + Arrays.stream(notifications.values()), + Arrays.stream(object_acl.values()), + Arrays.stream(objects.values()), + Arrays.stream(serviceaccount.values())) + .flatMap(Function.identity()) // .flatten() + .collect(Collectors.toMap(RpcMethod::getFullyQualifiedMethodName, Function.identity())); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java new file mode 100644 index 000000000000..6ecc0068d03d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java @@ -0,0 +1,215 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static com.google.common.collect.Sets.newHashSet; +import static java.util.Objects.requireNonNull; +import static org.junit.Assert.fail; + +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup; +import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceTeardown; +import com.google.cloud.storage.conformance.retry.Functions.CtxFunction; +import com.google.common.base.Preconditions; +import com.google.errorprone.annotations.Immutable; +import java.util.HashSet; +import java.util.function.Predicate; +import org.junit.AssumptionViolatedException; + +/** + * Immutable class which represents a mapping between an {@link RpcMethod} and a method in the + * public {@code com.google.cloud.storage} API. + * + *

This class defines a semi-declarative why in which mappings can be declared independent of the + * actual environment & state necessary to actually invoke a method. + * + * @see ITRetryConformanceTest#test() + * @see RpcMethodMappings + */ +@Immutable +final class RpcMethodMapping { + private final int mappingId; + private final RpcMethod method; + private final Predicate applicable; + private final CtxFunction setup; + private final CtxFunction test; + private final CtxFunction tearDown; + + RpcMethodMapping( + int mappingId, + RpcMethod method, + Predicate applicable, + CtxFunction setup, + CtxFunction test, + CtxFunction tearDown) { + this.mappingId = mappingId; + this.method = method; + this.applicable = applicable; + this.setup = setup; + this.test = test; + this.tearDown = tearDown; + } + + public int getMappingId() { + return mappingId; + } + + public RpcMethod getMethod() { + return method; + } + + public Predicate getApplicable() { + return applicable; + } + + public CtxFunction getSetup() { + return setup; + } + + public CtxFunction getTest() { + return (ctx, c) -> { + if (c.isExpectSuccess()) { + return test.apply(ctx, c); + } else { + try { + test.apply(ctx, c); + fail("expected failure, but succeeded"); + } catch (StorageException e) { + // We expect an exception to be thrown by mapping and test retry conformance config + // Verify that the exception we received is actually what we expect. + boolean matchExpectedCode = false; + int code = e.getCode(); + HashSet instructions = newHashSet(c.getInstruction().getInstructionsList()); + if (instructions.contains("return-503") && code == 503) { + matchExpectedCode = true; + } + if (instructions.contains("return-400") && code == 400) { + matchExpectedCode = true; + } + if (instructions.contains("return-401") && code == 401) { + matchExpectedCode = true; + } + if (instructions.contains("return-reset-connection") && code == 0) { + matchExpectedCode = true; + } + // testbench resetting the connection is turned into an UNAVAILABLE in grpc, which we then + // map to 503. Add graceful handling here, since we can't disambiguate between reset + // connection and 503 from the service. + if (c.getTransport() == Transport.GRPC + && instructions.contains("return-reset-connection") + && code == 503) { + matchExpectedCode = true; + } + + if (matchExpectedCode) { + return ctx; + } else { + throw e; + } + } + } + throw new IllegalStateException( + "Unable to determine applicability of mapping for provided TestCaseConfig"); + }; + } + + public CtxFunction getTearDown() { + return tearDown; + } + + public Builder toBuilder() { + return new Builder(mappingId, method, applicable, setup, test, tearDown); + } + + static Builder newBuilder(int mappingId, RpcMethod method) { + Preconditions.checkArgument(mappingId >= 1, "mappingId must be >= 1, but was %d", mappingId); + return new Builder(mappingId, method); + } + + static RpcMethodMapping notImplemented(RpcMethod method) { + return new Builder(0, method) + .withSetup( + (s, c) -> { + throw new AssumptionViolatedException("not implemented"); + }) + .withTest(CtxFunction.identity()) + .build(); + } + + static final class Builder { + + private final int mappingId; + private final RpcMethod method; + private final Predicate applicable; + private final CtxFunction setup; + private final CtxFunction test; + private CtxFunction tearDown; + + Builder(int mappingId, RpcMethod method) { + this( + mappingId, + method, + x -> true, + ResourceSetup.defaultSetup, + null, + ResourceTeardown.defaultTeardown); + } + + private Builder( + int mappingId, + RpcMethod method, + Predicate applicable, + CtxFunction setup, + CtxFunction test, + CtxFunction tearDown) { + this.mappingId = mappingId; + this.method = method; + this.applicable = applicable; + this.setup = setup; + this.test = test; + this.tearDown = tearDown; + } + + public Builder withApplicable(Predicate applicable) { + return new Builder(mappingId, method, applicable, setup, test, tearDown); + } + + public Builder withSetup(CtxFunction setup) { + return new Builder(mappingId, method, applicable, setup, null, tearDown); + } + + public Builder withTest(CtxFunction test) { + return new Builder(mappingId, method, applicable, setup, test, tearDown); + } + + public Builder withTearDown(CtxFunction tearDown) { + this.tearDown = tearDown; + return this; + } + + public RpcMethodMapping build() { + return new RpcMethodMapping( + mappingId, + requireNonNull(method, "method must be non null"), + requireNonNull(applicable, "applicable must be non null"), + requireNonNull(setup, "setup must be non null"), + requireNonNull(test, "test must be non null"), + requireNonNull(tearDown, "tearDown must be non null")); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java new file mode 100644 index 000000000000..9ed0177e354a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java @@ -0,0 +1,2153 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup.defaultSetup; +import static com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup.notificationSetup; +import static com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup.pubsubTopicSetup; +import static com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup.serviceAccount; +import static com.google.common.base.Predicates.and; +import static com.google.common.base.Predicates.not; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.BaseServiceException; +import com.google.cloud.Binding; +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketSourceOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.Storage.SignUrlOption; +import com.google.cloud.storage.Storage.UriScheme; +import com.google.cloud.storage.StorageRoles; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.conformance.retry.CtxFunctions.Local; +import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup; +import com.google.cloud.storage.conformance.retry.CtxFunctions.Rpc; +import com.google.cloud.storage.conformance.retry.Functions.CtxFunction; +import com.google.cloud.storage.conformance.retry.Functions.EConsumer; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.bucket_acl; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.buckets; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.default_object_acl; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.hmacKey; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.notifications; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.object_acl; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.objects; +import com.google.cloud.storage.conformance.retry.RpcMethod.storage.serviceaccount; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.BucketAcl; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.Buckets; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.DefaultObjectAcl; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.HmacKey; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.Notification; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.ObjectAcl; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.Objects; +import com.google.cloud.storage.conformance.retry.RpcMethodMappings.Mappings.ServiceAccount; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.MultimapBuilder; +import com.google.common.collect.Multimaps; +import com.google.common.collect.Sets; +import com.google.common.io.ByteStreams; +import com.google.errorprone.annotations.Immutable; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.OptionalInt; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A class which serves to try and organize all of the {@link RpcMethodMapping} for the retry + * conformance tests. + * + *

Individual mappings are grouped via inner classes corresponding to the {@link RpcMethod} for + * which they are defined. + * + *

As part of construction mappingIds are enforced to be unique, throwing an error if not. + */ +@Immutable +@SuppressWarnings("Guava") +final class RpcMethodMappings { + private static final Logger LOGGER = LoggerFactory.getLogger(RpcMethodMappings.class); + + private static final Predicate groupIsDownload = + methodGroupIs("storage.objects.download"); + private static final Predicate groupIsResumableUpload = + methodGroupIs("storage.resumable.upload"); + + static final int _2MiB = 2 * 1024 * 1024; + private static final ImmutableMap MODIFY = ImmutableMap.of("a", "b"); + final Multimap funcMap; + + RpcMethodMappings() { + ArrayList a = new ArrayList<>(); + + BucketAcl.delete(a); + BucketAcl.get(a); + BucketAcl.insert(a); + BucketAcl.list(a); + BucketAcl.patch(a); + + Buckets.delete(a); + Buckets.get(a); + Buckets.insert(a); + Buckets.list(a); + Buckets.patch(a); + Buckets.update(a); + Buckets.getIamPolicy(a); + Buckets.lockRetentionPolicy(a); + Buckets.setIamPolicy(a); + Buckets.testIamPermission(a); + + DefaultObjectAcl.delete(a); + DefaultObjectAcl.get(a); + DefaultObjectAcl.insert(a); + DefaultObjectAcl.list(a); + DefaultObjectAcl.patch(a); + DefaultObjectAcl.update(a); + + HmacKey.delete(a); + HmacKey.get(a); + HmacKey.list(a); + HmacKey.update(a); + HmacKey.create(a); + + Notification.delete(a); + Notification.get(a); + Notification.insert(a); + Notification.list(a); + + ObjectAcl.delete(a); + ObjectAcl.get(a); + ObjectAcl.insert(a); + ObjectAcl.list(a); + ObjectAcl.patch(a); + ObjectAcl.update(a); + + Objects.delete(a); + Objects.get(a); + Objects.insert(a); + Objects.list(a); + Objects.patch(a); + Objects.update(a); + Objects.compose(a); + Objects.rewrite(a); + Objects.copy(a); + + ServiceAccount.get(a); + ServiceAccount.put(a); + + validateMappingDefinitions(a); + + funcMap = Multimaps.index(a, RpcMethodMapping::getMethod); + reportMappingSummary(); + } + + public Collection get(RpcMethod key) { + return funcMap.get(key); + } + + public Set differenceMappingIds(Set usedMappingIds) { + return Sets.difference( + funcMap.values().stream().map(RpcMethodMapping::getMappingId).collect(Collectors.toSet()), + usedMappingIds); + } + + private void validateMappingDefinitions(ArrayList a) { + ListMultimap idMappings = + MultimapBuilder.hashKeys() + .arrayListValues() + .build(Multimaps.index(a, RpcMethodMapping::getMappingId)); + String duplicateIds = + idMappings.asMap().entrySet().stream() + .filter(e -> e.getValue().size() > 1) + .map(Entry::getKey) + .map(i -> Integer.toString(i)) + .collect(Collectors.joining(", ")); + if (!duplicateIds.isEmpty()) { + String message = "duplicate mapping ids present: [" + duplicateIds + "]"; + throw new IllegalStateException(message); + } + } + + private void reportMappingSummary() { + int mappingCount = funcMap.values().stream().mapToInt(m -> 1).sum(); + LOGGER.info("Current total number of mappings defined: {}", mappingCount); + String counts = + funcMap.asMap().entrySet().stream() + .map( + e -> { + RpcMethod rpcMethod = e.getKey(); + Collection mappings = e.getValue(); + return String.format( + Locale.US, + "\t%s.%s: %d", + rpcMethod + .getClass() + .getName() + .replace("com.google.cloud.storage.conformance.retry.RpcMethod$", "") + .replace("$", "."), + rpcMethod, + mappings.size()); + }) + .sorted() + .collect(Collectors.joining("\n", "\n", "")); + LOGGER.info("Current number of mappings per rpc method: {}", counts); + OptionalInt max = + funcMap.values().stream().map(RpcMethodMapping::getMappingId).mapToInt(i -> i).max(); + if (max.isPresent()) { + LOGGER.info(String.format(Locale.US, "Current max mapping index is: %d%n", max.getAsInt())); + } else { + throw new IllegalStateException("No mappings defined"); + } + } + + private static void withTempFile(String prefix, String suffix, EConsumer f) + throws Throwable { + Path tmpOutFile = Files.createTempFile(prefix, suffix); + try { + f.consume(tmpOutFile); + } finally { + Files.delete(tmpOutFile); + } + } + + static final class Mappings { + + static final class BucketAcl { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(1, bucket_acl.delete) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean deleteSuccess = + ctx.getStorage().deleteAcl(c.getBucketName(), User.ofAllUsers()); + assertTrue(deleteSuccess); + return state.with(deleteSuccess); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(2, bucket_acl.delete) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .deleteAcl( + c.getBucketName(), + User.ofAllUsers(), + BucketSourceOption.userProject(c.getUserProject())); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(87, bucket_acl.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + state.getBucket().deleteAcl(state.getAcl().getEntity()); + assertTrue(success); + return state.with(success); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(3, bucket_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage().getAcl(c.getBucketName(), User.ofAllUsers())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(4, bucket_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .getAcl( + c.getBucketName(), + User.ofAllUsers(), + BucketSourceOption.userProject(c.getUserProject()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(88, bucket_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(state.getBucket().getAcl(state.getAcl().getEntity())))) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(5, bucket_acl.insert) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage().createAcl(c.getBucketName(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(6, bucket_acl.insert) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createAcl( + c.getBucketName(), + state.getAcl(), + BucketSourceOption.userProject(c.getUserProject()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(89, bucket_acl.insert) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBucket().createAcl(state.getAcl())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(7, bucket_acl.list) + .withTest( + (ctx, c) -> + ctx.map( + state -> state.withAcls(ctx.getStorage().listAcls(c.getBucketName())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(8, bucket_acl.list) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.withAcls( + ctx.getStorage() + .listAcls( + c.getBucketName(), + BucketSourceOption.userProject(c.getUserProject()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(90, bucket_acl.list) + .withTest( + (ctx, c) -> ctx.map(state -> state.withAcls(state.getBucket().listAcls()))) + .build()); + } + + private static void patch(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(9, bucket_acl.patch) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage().updateAcl(c.getBucketName(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(10, bucket_acl.patch) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .updateAcl( + c.getBucketName(), + state.getAcl(), + BucketSourceOption.userProject(c.getUserProject()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(91, bucket_acl.patch) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBucket().updateAcl(state.getAcl())))) + .build()); + } + } + + static final class Buckets { + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(11, buckets.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .delete( + c.getBucketName(), + BucketSourceOption.userProject(c.getUserProject())); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(92, buckets.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = state.getBucket().delete(); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(93, buckets.delete) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + state + .getBucket() + .delete(Bucket.BucketSourceOption.metagenerationMatch()); + assertTrue(success); + return state.with(success); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(12, buckets.get) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(ctx.getStorage().get(c.getBucketName())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(94, buckets.get) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = state.getBucket().exists(); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(95, buckets.get) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + state + .getBucket() + .exists(Bucket.BucketSourceOption.metagenerationMatch()); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(96, buckets.get) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest((ctx, c) -> ctx.map(state -> state.with(state.getBucket().reload()))) + .build()); + a.add( + RpcMethodMapping.newBuilder(97, buckets.get) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .reload(Bucket.BucketSourceOption.metagenerationMatch())))) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(14, buckets.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.bucketInfo)) + .withTest( + (ctx, c) -> + ctx.map( + state -> state.with(ctx.getStorage().create(state.getBucketInfo())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(15, buckets.list) + .withTest((ctx, c) -> ctx.map(state -> state.consume(ctx.getStorage().list()))) + .build()); + } + + private static void patch(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(17, buckets.patch) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .update( + state.getBucket().toBuilder() + .setLabels(MODIFY) + .build())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(122, buckets.patch) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .update( + state.getBucket().toBuilder().setLabels(MODIFY).build(), + BucketTargetOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(101, buckets.patch) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state.getBucket().toBuilder() + .setLabels(MODIFY) + .build() + .update(BucketTargetOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(243, buckets.patch) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state.getBucket().toBuilder() + .setLabels(MODIFY) + .build() + .update()))) + .build()); + } + + private static void update(ArrayList a) {} + + private static void getIamPolicy(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(13, buckets.getIamPolicy) + .withTest( + (ctx, c) -> + ctx.map( + state -> state.with(ctx.getStorage().getIamPolicy(c.getBucketName())))) + .build()); + } + + private static void lockRetentionPolicy(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(16, buckets.lockRetentionPolicy) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .lockRetentionPolicy( + state.getBucket(), + BucketTargetOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(100, buckets.lockRetentionPolicy) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .lockRetentionPolicy( + BucketTargetOption.metagenerationMatch())))) + .build()); + } + + private static void setIamPolicy(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(18, buckets.setIamPolicy) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .setIamPolicy( + state.getBucket().getName(), + Policy.newBuilder() + .setVersion(3) + .setBindings( + ImmutableList.of( + Binding.newBuilder() + .setRole( + StorageRoles.legacyBucketOwner() + .toString()) + .setMembers( + ImmutableList.of( + Identity.projectOwner( + c.getProjectId()) + .getValue())) + .build())) + .build())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(240, buckets.setIamPolicy) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(ResourceSetup.defaultSetup.andThen(Rpc.bucketIamPolicy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .setIamPolicy( + state.getBucket().getName(), + Policy.newBuilder() + .setEtag(state.getPolicy().getEtag()) + .setVersion(3) + .setBindings( + ImmutableList.of( + Binding.newBuilder() + .setRole( + StorageRoles.legacyBucketOwner() + .toString()) + .setMembers( + ImmutableList.of( + Identity.projectOwner( + c.getProjectId()) + .getValue())) + .build())) + .build(), + BucketSourceOption.userProject(c.getProjectId()))))) + .build()); + } + + private static void testIamPermission(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(19, buckets.testIamPermissions) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.withTestIamPermissionsResults( + ctx.getStorage() + .testIamPermissions( + c.getBucketName(), + Collections.singletonList("todo: permissions"))))) + .build()); + } + } + + static final class DefaultObjectAcl { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(20, default_object_acl.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .deleteDefaultAcl( + c.getBucketName(), state.getAcl().getEntity()); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(102, default_object_acl.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + state.getBucket().deleteDefaultAcl(state.getAcl().getEntity()); + assertTrue(success); + return state.with(success); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(21, default_object_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .getDefaultAcl( + c.getBucketName(), state.getAcl().getEntity())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(103, default_object_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state.getBucket().getDefaultAcl(state.getAcl().getEntity())))) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(22, default_object_acl.insert) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createDefaultAcl(c.getBucketName(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(104, default_object_acl.insert) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(state.getBucket().createDefaultAcl(state.getAcl())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(23, default_object_acl.list) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.withAcls( + ctx.getStorage().listDefaultAcls(c.getBucketName())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(105, default_object_acl.list) + .withTest( + (ctx, c) -> + ctx.map(state -> state.withAcls(state.getBucket().listDefaultAcls()))) + .build()); + } + + private static void patch(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(24, default_object_acl.patch) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .updateDefaultAcl(c.getBucketName(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(106, default_object_acl.patch) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(state.getBucket().updateDefaultAcl(state.getAcl())))) + .build()); + } + + private static void update(ArrayList a) {} + } + + static final class HmacKey { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(26, hmacKey.delete) + .withApplicable(TestRetryConformance.transportIs(Transport.HTTP)) + .withSetup( + defaultSetup.andThen( + (ctx, c) -> + ctx.map( + state -> { + Storage storage = ctx.getStorage(); + HmacKeyMetadata metadata = state.getHmacKey().getMetadata(); + // for delete we're only using the metadata, clear the key that + // was populated in defaultSetup and specify the updated metadata + return state + .withHmacKey(null) + .with( + storage.updateHmacKeyState( + metadata, HmacKeyState.INACTIVE)); + }))) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + ctx.getStorage().deleteHmacKey(state.getHmacKeyMetadata()); + // clear the metadata from the state now that we've deleted it + return state.with((HmacKeyMetadata) null); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(27, hmacKey.get) + .withApplicable(TestRetryConformance.transportIs(Transport.HTTP)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .getHmacKey( + state.getHmacKey().getMetadata().getAccessId())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(28, hmacKey.list) + .withApplicable(TestRetryConformance.transportIs(Transport.HTTP)) + .withTest( + (ctx, c) -> ctx.map(state -> state.consume(ctx.getStorage().listHmacKeys()))) + .build()); + } + + private static void update(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(29, hmacKey.update) + .withApplicable( + not(TestRetryConformance::isPreconditionsProvided) + .and(TestRetryConformance.transportIs(Transport.HTTP))) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .updateHmacKeyState( + state.getHmacKey().getMetadata(), + HmacKeyState.ACTIVE)))) + .build()); + } + + private static void create(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(25, hmacKey.create) + .withApplicable(TestRetryConformance.transportIs(Transport.HTTP)) + .withSetup(defaultSetup.andThen(serviceAccount)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.withHmacKey( + ctx.getStorage().createHmacKey(state.getServiceAccount())))) + .build()); + } + } + + static final class Notification { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(248, notifications.delete) + .withSetup(notificationSetup) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .deleteNotification( + state.getBucket().getName(), + state.getNotification().getNotificationId()); + assertTrue(success); + return state.with(success); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(246, notifications.get) + .withSetup(notificationSetup) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + com.google.cloud.storage.Notification notification = + ctx.getStorage() + .getNotification( + state.getBucket().getName(), + state.getNotification().getNotificationId()); + return state.with(notification); + })) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(247, notifications.insert) + .withSetup(pubsubTopicSetup) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + PayloadFormat format = PayloadFormat.JSON_API_V1; + Map attributes = ImmutableMap.of("label1", "value1"); + NotificationInfo info = + NotificationInfo.newBuilder(state.getTopicName().toString()) + .setPayloadFormat(format) + .setCustomAttributes(attributes) + .build(); + com.google.cloud.storage.Notification notification = + ctx.getStorage() + .createNotification(state.getBucket().getName(), info); + return state.with(notification); + })) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(249, notifications.list) + .withSetup(pubsubTopicSetup) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + List notifications = + ctx.getStorage().listNotifications(state.getBucket().getName()); + return state.with(notifications); + })) + .build()); + } + } + + static final class ObjectAcl { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(30, object_acl.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .deleteAcl( + state.getBlob().getBlobId(), state.getAcl().getEntity()); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(62, object_acl.delete) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + state.getBlob().deleteAcl(state.getAcl().getEntity()); + assertTrue(success); + return state.with(success); + })) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(31, object_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .getAcl( + state.getBlob().getBlobId(), + state.getAcl().getEntity())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(63, object_acl.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(state.getBlob().getAcl(state.getAcl().getEntity())))) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(32, object_acl.insert) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createAcl(state.getBlob().getBlobId(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(64, object_acl.insert) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBlob().createAcl(state.getAcl())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(33, object_acl.list) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.withAcls( + ctx.getStorage().listAcls(state.getBlob().getBlobId())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(65, object_acl.list) + .withTest((ctx, c) -> ctx.map(state -> state.withAcls(state.getBlob().listAcls()))) + .build()); + } + + private static void patch(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(34, object_acl.patch) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .updateAcl(state.getBlob().getBlobId(), state.getAcl())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(66, object_acl.patch) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBlob().updateAcl(state.getAcl())))) + .build()); + } + + private static void update(ArrayList a) {} + } + + static final class Objects { + + private static void delete(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(36, objects.delete) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + BlobId id = state.getBlob().getBlobId(); + BlobId idWithoutGeneration = BlobId.of(id.getBucket(), id.getName()); + boolean success = ctx.getStorage().delete(idWithoutGeneration); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(37, objects.delete) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .delete( + state.getBlob().getBlobId(), + BlobSourceOption.generationMatch()); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(38, objects.delete) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + boolean success = + ctx.getStorage() + .delete( + state.getBlob().getBlobId().getBucket(), + state.getBlob().getBlobId().getName(), + BlobSourceOption.generationMatch( + state.getBlob().getGeneration())); + assertTrue(success); + return state.with(success); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(67, objects.delete) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + Blob blob = state.getBlob(); + Blob blobWithoutGeneration = + blob.toBuilder() + .setBlobId(BlobId.of(blob.getBucket(), blob.getName())) + .build(); + blobWithoutGeneration.delete(); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(68, objects.delete) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + state.getBlob().delete(Blob.BlobSourceOption.generationMatch()))) + .build()); + } + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(39, objects.get) + .withApplicable( + and(not(TestRetryConformance::isPreconditionsProvided), not(groupIsDownload))) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> state.with(ctx.getStorage().get(state.getBlob().getBlobId())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(239, objects.get) + .withApplicable( + and(TestRetryConformance::isPreconditionsProvided, not(groupIsDownload))) + .withTest( + (ctx, c) -> + ctx.peek(state -> ctx.getStorage().get(state.getBlob().getBlobId()))) + .build()); + a.add( + RpcMethodMapping.newBuilder(40, objects.get) + .withApplicable(not(groupIsDownload)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .get( + state.getBlob().getBlobId(), + BlobGetOption.metagenerationMatch( + state.getBlob().getMetageneration()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(41, objects.get) + .withApplicable(not(groupIsDownload)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .get( + state.getBlob().getBlobId().getBucket(), + state.getBlob().getBlobId().getName(), + BlobGetOption.metagenerationMatch( + state.getBlob().getMetageneration()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(42, objects.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .readAllBytes( + state.getBlob().getBlobId(), + BlobSourceOption.metagenerationMatch( + state.getBlob().getMetageneration()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(43, objects.get) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .readAllBytes( + state.getBlob().getBlobId().getBucket(), + state.getBlob().getBlobId().getName(), + BlobSourceOption.metagenerationMatch( + state.getBlob().getMetageneration()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(44, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = + ctx.getStorage().reader(ctx.getState().getBlob().getBlobId())) { + WritableByteChannel write = Channels.newChannel(baos); + ByteStreams.copy(reader, write); + } catch (IOException e) { + if (e.getCause() instanceof BaseServiceException) { + throw e.getCause(); + } + } + assertThat(xxd(baos.toByteArray())) + .isEqualTo(xxd(c.getHelloWorldUtf8Bytes())); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(45, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = + ctx.getStorage() + .reader( + ctx.getState().getBlob().getBlobId().getBucket(), + ctx.getState().getBlob().getBlobId().getName())) { + WritableByteChannel write = Channels.newChannel(baos); + ByteStreams.copy(reader, write); + } catch (IOException e) { + if (e.getCause() instanceof BaseServiceException) { + throw e.getCause(); + } + } + + assertThat(xxd(baos.toByteArray())) + .isEqualTo(xxd(c.getHelloWorldUtf8Bytes())); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(250, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = + ctx.getStorage() + .reader( + ctx.getState().getBlob().getBlobId(), + BlobSourceOption.shouldReturnRawInputStream(false))) { + WritableByteChannel write = Channels.newChannel(baos); + ByteStreams.copy(reader, write); + } catch (IOException e) { + if (e.getCause() instanceof BaseServiceException) { + throw e.getCause(); + } + } + + assertThat(xxd(baos.toByteArray())) + .isEqualTo(xxd(c.getHelloWorldUtf8Bytes())); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(60, objects.get) + .withApplicable( + and(not(TestRetryConformance::isPreconditionsProvided), not(groupIsDownload))) + .withTest((ctx, c) -> ctx.peek(state -> assertTrue(state.getBlob().exists()))) + .build()); + a.add( + RpcMethodMapping.newBuilder(61, objects.get) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + assertTrue( + state + .getBlob() + .exists(Blob.BlobSourceOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(69, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + withTempFile( + c.getMethod().getName(), + ".txt", + (tmpOutFile) -> { + state.getBlob().downloadTo(tmpOutFile); + byte[] downloadedBytes = Files.readAllBytes(tmpOutFile); + assertThat(downloadedBytes) + .isEqualTo(c.getHelloWorldUtf8Bytes()); + }))) + .build()); + a.add( + RpcMethodMapping.newBuilder(70, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + withTempFile( + c.getMethod().getName(), + ".txt", + (tmpOutFile) -> { + state + .getBlob() + .downloadTo( + tmpOutFile, Blob.BlobSourceOption.generationMatch()); + byte[] downloadedBytes = Files.readAllBytes(tmpOutFile); + assertThat(downloadedBytes) + .isEqualTo(c.getHelloWorldUtf8Bytes()); + }))) + .build()); + a.add( + RpcMethodMapping.newBuilder(71, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + state.getBlob().downloadTo(baos); + byte[] downloadedBytes = baos.toByteArray(); + assertThat(downloadedBytes).isEqualTo(c.getHelloWorldUtf8Bytes()); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(72, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + state + .getBlob() + .downloadTo(baos, Blob.BlobSourceOption.generationMatch()); + byte[] downloadedBytes = baos.toByteArray(); + assertThat(downloadedBytes).isEqualTo(c.getHelloWorldUtf8Bytes()); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(73, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + byte[] downloadedBytes = state.getBlob().getContent(); + assertThat(downloadedBytes).isEqualTo(c.getHelloWorldUtf8Bytes()); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(74, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + byte[] downloadedBytes = + state + .getBlob() + .getContent(Blob.BlobSourceOption.metagenerationMatch()); + assertThat(downloadedBytes).isEqualTo(c.getHelloWorldUtf8Bytes()); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(75, objects.get) + .withApplicable(not(groupIsDownload)) + .withTest((ctx, c) -> ctx.peek(state -> state.getBlob().reload())) + .build()); + a.add( + RpcMethodMapping.newBuilder(76, objects.get) + .withApplicable(not(groupIsDownload)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + state + .getBlob() + .reload(Blob.BlobSourceOption.metagenerationMatch()))) + .build()); + a.add( + RpcMethodMapping.newBuilder(107, objects.get) + .withApplicable( + and(not(TestRetryConformance::isPreconditionsProvided), not(groupIsDownload))) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBucket().get(c.getObjectName())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(244, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + withTempFile( + c.getMethod().getName(), + ".txt", + (tmpOutFile) -> { + ctx.getStorage().downloadTo(state.getBlobId(), tmpOutFile); + byte[] downloadedBytes = Files.readAllBytes(tmpOutFile); + assertThat(downloadedBytes) + .isEqualTo(c.getHelloWorldUtf8Bytes()); + }))) + .build()); + a.add( + RpcMethodMapping.newBuilder(245, objects.get) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + withTempFile( + c.getMethod().getName(), + ".txt", + (tmpOutFile) -> { + FileOutputStream fos = + new FileOutputStream(tmpOutFile.toFile()); + ctx.getStorage().downloadTo(state.getBlobId(), fos); + byte[] downloadedBytes = Files.readAllBytes(tmpOutFile); + assertThat(downloadedBytes) + .isEqualTo(c.getHelloWorldUtf8Bytes()); + }))) + .build()); + } + + private static void insert(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(46, objects.insert) + .withApplicable( + and(TestRetryConformance::isPreconditionsProvided, not(groupIsResumableUpload))) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + c.getHelloWorldUtf8Bytes(), + BlobTargetOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(47, objects.insert) + .withApplicable( + and(TestRetryConformance::isPreconditionsProvided, not(groupIsResumableUpload))) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + c.getHelloWorldUtf8Bytes(), + 0, + c.getHelloWorldUtf8Bytes().length / 2, + BlobTargetOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(48, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()), + BlobWriteOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(49, objects.insert) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()), + BlobWriteOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(50, objects.insert) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + c.getHelloWorldFilePath(), + BlobWriteOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(51, objects.insert) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + c.getHelloWorldFilePath(), + _2MiB, + BlobWriteOption.generationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(52, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + try (WriteChannel writer = + ctx.getStorage().writer(ctx.getState().getBlobInfo())) { + writer.write(ByteBuffer.wrap(c.getHelloWorldUtf8Bytes())); + } + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(53, objects.insert) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(defaultSetup.andThen(Local.blobInfoWithGenerationZero)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + try (WriteChannel writer = + ctx.getStorage() + .writer( + ctx.getState().getBlobInfo(), + BlobWriteOption.generationMatch())) { + writer.write(ByteBuffer.wrap(c.getHelloWorldUtf8Bytes())); + } + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(54, objects.insert) + .withApplicable( + not(TestRetryConformance::isPreconditionsProvided) + .and(trc -> trc.getTransport() == Transport.HTTP)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + Storage storage = ctx.getStorage(); + URL signedUrl = + storage.signUrl( + state.getBlobInfo(), + 1, + TimeUnit.HOURS, + SignUrlOption.httpMethod(HttpMethod.POST), + // TODO(#1094): Instead of using bucketBoundHostname fix + // Signer to get BaseUri from StorageOptions + // NOTE(frankyn/benwhitehead): testbench expects HTTP scheme + // and we are using a hack to get around the lack of scheme + // manipulation by using bucketBoundHostname to select HTTP + // scheme instead. Bucket name is not present explicitly in + // bucketBoundHostname because it's expected to be referred to + // by the Bucket Bound Hostname so we must append it, being + // the hack, to get around the limitation. + SignUrlOption.withBucketBoundHostname( + c.getHost() + + "/" + + c.getBucketName() + + "/" + + c.getObjectName(), + UriScheme.HTTP), + SignUrlOption.withExtHeaders( + ImmutableMap.of("x-goog-resumable", "start")), + SignUrlOption.signWith(c.getServiceAccountSigner()), + SignUrlOption.withV4Signature()); + try (WriteChannel writer = storage.writer(signedUrl)) { + writer.write(ByteBuffer.wrap(c.getHelloWorldUtf8Bytes())); + } + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(77, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup( + defaultSetup + .andThen(Local.blobInfoWithoutGeneration) + .andThen(Rpc.createEmptyBlob)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + try (WriteChannel writer = state.getBlob().writer()) { + writer.write(ByteBuffer.wrap(c.getHelloWorldUtf8Bytes())); + } + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(78, objects.insert) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup( + defaultSetup + .andThen(Local.blobInfoWithoutGeneration) + .andThen(ResourceSetup.object)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> { + try (WriteChannel writer = + state.getBlob().writer(BlobWriteOption.generationMatch())) { + writer.write(ByteBuffer.wrap(c.getHelloWorldUtf8Bytes())); + } + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(108, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create(c.getObjectName(), c.getHelloWorldUtf8Bytes())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(109, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + c.getHelloWorldUtf8Bytes(), + "text/plain);charset=utf-8")))) + .build()); + a.add( + RpcMethodMapping.newBuilder(110, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(111, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()), + "text/plain);charset=utf-8")))) + .build()); + a.add( + RpcMethodMapping.newBuilder(112, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + c.getHelloWorldUtf8Bytes())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(113, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + c.getHelloWorldUtf8Bytes(), + 0, + c.getHelloWorldUtf8Bytes().length / 2)))) + .build()); + a.add( + RpcMethodMapping.newBuilder(114, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .create( + ctx.getState().getBlobInfo(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(115, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()))))) + .build()); + a.add( + RpcMethodMapping.newBuilder(116, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + c.getHelloWorldFilePath())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(117, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .createFrom( + ctx.getState().getBlobInfo(), + c.getHelloWorldFilePath(), + _2MiB)))) + .build()); + a.add( + RpcMethodMapping.newBuilder(118, objects.insert) + .withApplicable( + and(TestRetryConformance::isPreconditionsProvided, not(groupIsResumableUpload))) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + c.getHelloWorldUtf8Bytes(), + Bucket.BlobTargetOption.doesNotExist())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(119, objects.insert) + .withApplicable( + and(TestRetryConformance::isPreconditionsProvided, not(groupIsResumableUpload))) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + c.getHelloWorldUtf8Bytes(), + "text/plain;charset=utf-8", + Bucket.BlobTargetOption.doesNotExist())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(120, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()), + Bucket.BlobWriteOption.doesNotExist())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(121, objects.insert) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBucket() + .create( + c.getObjectName(), + new ByteArrayInputStream(c.getHelloWorldUtf8Bytes()), + "text/plain);charset=utf-8", + Bucket.BlobWriteOption.doesNotExist())))) + .build()); + } + + private static void list(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(55, objects.list) + .withTest( + (ctx, c) -> + ctx.map(state -> state.consume(ctx.getStorage().list(c.getBucketName())))) + .build()); + } + + private static void patch(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(56, objects.patch) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .update( + ctx.getState().getBlob().toBuilder() + .setMetadata(MODIFY) + .build())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(57, objects.patch) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + ctx.getStorage() + .update( + ctx.getState().getBlob().toBuilder() + .setMetadata(MODIFY) + .build(), + BlobTargetOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(79, objects.patch) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.peek( + state -> + state.getBlob().toBuilder().setMetadata(MODIFY).build().update())) + .build()); + a.add( + RpcMethodMapping.newBuilder(80, objects.patch) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state.getBlob().toBuilder() + .setMetadata(MODIFY) + .build() + .update(BlobTargetOption.metagenerationMatch())))) + .build()); + } + + private static void update(ArrayList a) {} + + private static void compose(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(35, objects.compose) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withSetup(defaultSetup.andThen(Local.composeRequest)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(ctx.getStorage().compose(state.getComposeRequest())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(241, objects.compose) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.composeRequest)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(ctx.getStorage().compose(state.getComposeRequest())))) + .build()); + } + + private static void rewrite(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(58, objects.rewrite) + .withApplicable(TestRetryConformance::isPreconditionsProvided) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(c.getBucketName(), c.getObjectName()) + .setTarget( + BlobId.of(c.getBucketName(), "destination-blob"), + BlobTargetOption.doesNotExist()) + .build(); + return state.with(ctx.getStorage().copy(copyRequest)); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(242, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withTest( + (ctx, c) -> + ctx.map( + state -> { + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(c.getBucketName(), c.getObjectName()) + .setTarget(BlobId.of(c.getBucketName(), "destination-blob")) + .build(); + return state.with(ctx.getStorage().copy(copyRequest)); + })) + .build()); + a.add( + RpcMethodMapping.newBuilder(81, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map(state -> state.with(state.getBlob().copyTo(state.getCopyDest())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(82, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBlob() + .copyTo( + state.getCopyDest(), + Blob.BlobSourceOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(83, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state.getBlob().copyTo(state.getCopyDest().getBucket())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(84, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBlob() + .copyTo( + state.getCopyDest().getBucket(), + Blob.BlobSourceOption.metagenerationMatch())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(85, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBlob() + .copyTo( + state.getCopyDest().getBucket(), + state.getCopyDest().getName())))) + .build()); + a.add( + RpcMethodMapping.newBuilder(86, objects.rewrite) + .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withSetup(defaultSetup.andThen(Local.blobCopy)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with( + state + .getBlob() + .copyTo( + state.getCopyDest().getBucket(), + state.getCopyDest().getName(), + Blob.BlobSourceOption.metagenerationMatch())))) + .build()); + } + + private static void copy(ArrayList a) {} + } + + static final class ServiceAccount { + + private static void get(ArrayList a) { + a.add( + RpcMethodMapping.newBuilder(59, serviceaccount.get) + .withApplicable(TestRetryConformance.transportIs(Transport.HTTP)) + .withTest( + (ctx, c) -> + ctx.map( + state -> + state.with(ctx.getStorage().getServiceAccount(c.getUserProject())))) + .build()); + } + + private static void put(ArrayList a) {} + } + } + + private static Predicate methodGroupIs(String s) { + return (c) -> s.equals(c.getMethod().getGroup()); + } + + private static CtxFunction temporarilySkipMapping( + String message, java.util.function.Predicate p) { + return (ctx, trc) -> { + assumeFalse(message, p.test(trc)); + return ctx; + }; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/State.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/State.java new file mode 100644 index 000000000000..bf7fcfa9ca03 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/State.java @@ -0,0 +1,396 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static java.util.Objects.requireNonNull; + +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.common.collect.ImmutableMap; +import com.google.errorprone.annotations.Immutable; +import com.google.pubsub.v1.TopicName; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + * A specialized wrapper around an immutable map allowing for declaration of named get/has/with + * methods without the need for individual fields. + * + *

Every mutation returns a copy with the mutation result. + * + *

Over the course of executing an individual test for a specific mapping some fields will be + * updated. + * + *

This approach was taken after multiple attempts to create more type safe alternatives which + * turned into far too much duplication given the possible permutations of state for various + * mappings. + */ +@Immutable +final class State { + + private static final State EMPTY = new State(); + private static final Key KEY_ACL = new Key<>("acl"); + private static final Key KEY_BLOB = new Key<>("blob"); + private static final Key KEY_BLOB_ID = new Key<>("blobId"); + private static final Key KEY_COPY_DEST = new Key<>("copyDest"); + private static final Key KEY_BLOB_INFO = new Key<>("blobInfo"); + private static final Key KEY_BOOL = new Key<>("bool"); + private static final Key KEY_BUCKET = new Key<>("bucket"); + private static final Key KEY_BUCKET_INFO = new Key<>("bucketInfo"); + private static final Key KEY_COPY = new Key<>("copy"); + private static final Key KEY_HMAC_KEY = new Key<>("hmacKey"); + private static final Key KEY_HMAC_KEY_METADATA = new Key<>("hmacKeyMetadata"); + private static final Key KEY_POLICY = new Key<>("policy"); + private static final Key KEY_SERVICE_ACCOUNT = new Key<>("serviceAccount"); + private static final Key> KEY_LIST_OBJECTS = new Key<>("list"); + private static final Key> KEY_TEST_IAM_PERMISSIONS_RESULTS = + new Key<>("testIamPermissionsResults"); + private static final Key> KEY_ACLS = new Key<>("acls"); + private static final Key KEY_BYTES = new Key<>("bytes"); + private static final Key KEY_COMPOSE_REQUEST = new Key<>("composeRequest"); + private static final Key KEY_PUBSUB_TOPIC = new Key<>("topicName"); + private static final Key KEY_NOTIFICATION = new Key<>("notification"); + private static final Key> KEY_LIST_NOTIFICATION = + new Key<>("lise"); + + private final ImmutableMap, Object> data; + + public State() { + this(ImmutableMap.of()); + } + + public State(ImmutableMap, Object> data) { + this.data = data; + } + + static State empty() { + return EMPTY; + } + + public boolean hasAcl() { + return hasValue(KEY_ACL); + } + + public Acl getAcl() { + return getValue(KEY_ACL); + } + + public State with(Acl acl) { + return newStateWith(KEY_ACL, acl); + } + + public boolean hasBlob() { + return hasValue(KEY_BLOB); + } + + public Blob getBlob() { + return getValue(KEY_BLOB); + } + + public State with(Blob blob) { + return newStateWith(KEY_BLOB, blob); + } + + public boolean hasBlobId() { + return hasValue(KEY_BLOB_ID); + } + + public BlobId getBlobId() { + return getValue(KEY_BLOB_ID); + } + + public State with(BlobId blobId) { + return newStateWith(KEY_BLOB_ID, blobId); + } + + public boolean hasCopyDest() { + return hasValue(KEY_COPY_DEST); + } + + public BlobId getCopyDest() { + return getValue(KEY_COPY_DEST); + } + + public State withCopyDest(BlobId copyDest) { + return newStateWith(KEY_COPY_DEST, copyDest); + } + + public boolean hasBlobInfo() { + return hasValue(KEY_BLOB_INFO); + } + + public BlobInfo getBlobInfo() { + return getValue(KEY_BLOB_INFO); + } + + public State with(BlobInfo blobInfo) { + return newStateWith(KEY_BLOB_INFO, blobInfo); + } + + public boolean hasBool() { + return hasValue(KEY_BOOL); + } + + public Boolean getBool() { + return getValue(KEY_BOOL); + } + + public State with(Boolean bool) { + return newStateWith(KEY_BOOL, bool); + } + + public boolean hasBucket() { + return hasValue(KEY_BUCKET); + } + + public Bucket getBucket() { + return getValue(KEY_BUCKET); + } + + public State with(Bucket bucket) { + return newStateWith(KEY_BUCKET, bucket); + } + + public boolean hasBucketInfo() { + return hasValue(KEY_BUCKET_INFO); + } + + public BucketInfo getBucketInfo() { + return getValue(KEY_BUCKET_INFO); + } + + public State with(BucketInfo bucketInfo) { + return newStateWith(KEY_BUCKET_INFO, bucketInfo); + } + + public boolean hasCopy() { + return hasValue(KEY_COPY); + } + + public CopyWriter getCopy() { + return getValue(KEY_COPY); + } + + public State with(CopyWriter copy) { + return newStateWith(KEY_COPY, copy); + } + + public boolean hasHmacKey() { + return hasValue(KEY_HMAC_KEY); + } + + public HmacKey getHmacKey() { + return getValue(KEY_HMAC_KEY); + } + + public State withHmacKey(HmacKey hmacKey) { + return newStateWith(KEY_HMAC_KEY, hmacKey); + } + + public boolean hasHmacKeyMetadata() { + return hasValue(KEY_HMAC_KEY_METADATA); + } + + public HmacKeyMetadata getHmacKeyMetadata() { + return getValue(KEY_HMAC_KEY_METADATA); + } + + public State with(HmacKeyMetadata hmacKeyMetadata) { + return newStateWith(KEY_HMAC_KEY_METADATA, hmacKeyMetadata); + } + + public boolean hasPolicy() { + return hasValue(KEY_POLICY); + } + + public Policy getPolicy() { + return getValue(KEY_POLICY); + } + + public State with(Policy policy) { + return newStateWith(KEY_POLICY, policy); + } + + public boolean hasServiceAccount() { + return hasValue(KEY_SERVICE_ACCOUNT); + } + + public ServiceAccount getServiceAccount() { + return getValue(KEY_SERVICE_ACCOUNT); + } + + public State with(ServiceAccount serviceAccount) { + return newStateWith(KEY_SERVICE_ACCOUNT, serviceAccount); + } + + public boolean hasBytes() { + return hasValue(KEY_BYTES); + } + + public byte[] getBytes() { + return getValue(KEY_BYTES); + } + + public State with(byte[] bytes) { + return newStateWith(KEY_BYTES, bytes); + } + + public boolean hasTestIamPermissionsResults() { + return hasValue(KEY_TEST_IAM_PERMISSIONS_RESULTS); + } + + public List getTestIamPermissionsResults() { + return getValue(KEY_TEST_IAM_PERMISSIONS_RESULTS); + } + + public State withTestIamPermissionsResults(List testIamPermissionsResults) { + return newStateWith(KEY_TEST_IAM_PERMISSIONS_RESULTS, testIamPermissionsResults); + } + + public boolean hasAcls() { + return hasValue(KEY_ACLS); + } + + public List getAcls() { + return getValue(KEY_ACLS); + } + + public State withAcls(List acls) { + return newStateWith(KEY_ACLS, acls); + } + + public State consume(Page page) { + List collect = + StreamSupport.stream(page.iterateAll().spliterator(), false).collect(Collectors.toList()); + return newStateWith(KEY_LIST_OBJECTS, collect); + } + + public State with(ComposeRequest composeRequest) { + return newStateWith(KEY_COMPOSE_REQUEST, composeRequest); + } + + public ComposeRequest getComposeRequest() { + return getValue(KEY_COMPOSE_REQUEST); + } + + public boolean hasComposeRequest() { + return hasValue(KEY_COMPOSE_REQUEST); + } + + public boolean hasTopicName() { + return hasValue(KEY_PUBSUB_TOPIC); + } + + public TopicName getTopicName() { + return getValue(KEY_PUBSUB_TOPIC); + } + + public State with(TopicName topic) { + return newStateWith(KEY_PUBSUB_TOPIC, topic); + } + + public boolean hasNotification() { + return hasValue(KEY_NOTIFICATION); + } + + public Notification getNotification() { + return getValue(KEY_NOTIFICATION); + } + + public State with(Notification notification) { + return newStateWith(KEY_NOTIFICATION, notification); + } + + public boolean hasNotifications() { + return hasValue(KEY_LIST_NOTIFICATION); + } + + public List getNotifications() { + return getValue(KEY_LIST_NOTIFICATION); + } + + public State with(List notifications) { + return newStateWith(KEY_LIST_NOTIFICATION, notifications); + } + + private T getValue(Key key) { + Object o = data.get(key); + requireNonNull(o, () -> String.format(Locale.US, "%s was not found in state", key.name)); + return key.cast(o); + } + + private boolean hasValue(Key key) { + return data.containsKey(key); + } + + private State newStateWith(Key key, T t) { + Map, Object> tmp = new HashMap<>(data); + if (t != null) { + tmp.put(key, t); + } else { + tmp.remove(key); + } + return new State(ImmutableMap.copyOf(tmp)); + } + + private static final class Key { + + private final String name; + + public Key(String name) { + this.name = requireNonNull(name, "name must be non null"); + } + + T cast(Object t) { + return (T) t; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Key)) { + return false; + } + Key key = (Key) o; + return name.equals(key.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java new file mode 100644 index 000000000000..7acfdee15eb5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java @@ -0,0 +1,333 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.conformance.retry; + +import static com.google.common.truth.Truth.assertThat; +import static java.util.Objects.requireNonNull; +import static org.junit.Assert.assertNotNull; + +import com.google.auth.ServiceAccountSigner; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.conformance.storage.v1.InstructionList; +import com.google.cloud.conformance.storage.v1.Method; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.common.base.Joiner; +import com.google.common.io.ByteStreams; +import com.google.errorprone.annotations.Immutable; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.List; +import java.util.Locale; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * An individual resolved test case correlating config from {@link + * com.google.cloud.conformance.storage.v1.RetryTest}s: the specific rpc method being tested, the + * instructions and the corresponding mapping id. + * + *

Generates some unique values for use in parallel test execution such as bucket names, object + * names, etc. + */ +@Immutable +final class TestRetryConformance { + static final String BASE_ID; + + static { + Instant now = Clock.systemUTC().instant(); + DateTimeFormatter formatter = + DateTimeFormatter.ISO_LOCAL_TIME.withZone(ZoneId.from(ZoneOffset.UTC)); + BASE_ID = formatter.format(now).replaceAll("[:]", "").substring(0, 6); + } + + private static final int _512KiB = 512 * 1024; + private static final int _8MiB = 8 * 1024 * 1024; + + private final Transport transport; + private final String projectId; + private final String bucketName; + private final String bucketName2; + private final String userProject; + private final String objectName; + private final String topicName; + + private final Supplier lazyHelloWorldUtf8Bytes; + private final Supplier helloWorldFilePath; + private final ServiceAccountCredentials serviceAccountCredentials; + + private final String host; + + private final int scenarioId; + private final Method method; + private final InstructionList instruction; + private final boolean preconditionsProvided; + private final boolean expectSuccess; + private final int mappingId; + + TestRetryConformance( + Transport transport, + String projectId, + String host, + int scenarioId, + Method method, + InstructionList instruction, + boolean preconditionsProvided, + boolean expectSuccess) { + this( + transport, + projectId, + host, + scenarioId, + method, + instruction, + preconditionsProvided, + expectSuccess, + 0); + } + + TestRetryConformance( + Transport transport, + String projectId, + String host, + int scenarioId, + Method method, + InstructionList instruction, + boolean preconditionsProvided, + boolean expectSuccess, + int mappingId) { + this.transport = transport; + this.projectId = projectId; + this.host = host; + this.scenarioId = scenarioId; + this.method = requireNonNull(method, "method must be non null"); + this.instruction = requireNonNull(instruction, "instruction must be non null"); + this.preconditionsProvided = preconditionsProvided; + this.expectSuccess = expectSuccess; + this.mappingId = mappingId; + String instructionsString = + this.instruction.getInstructionsList().stream() + .map(s -> s.replace("return-", "")) + .collect(Collectors.joining("_")); + char transportTag = transport.name().toLowerCase().charAt(0); + this.bucketName = + String.format( + Locale.US, + "%s_s%03d-%s-m%03d_bkt1_%s", + BASE_ID, + scenarioId, + instructionsString.toLowerCase(), + mappingId, + transportTag); + this.bucketName2 = + String.format( + Locale.US, + "%s_s%03d-%s-m%03d_bkt2_%s", + BASE_ID, + scenarioId, + instructionsString.toLowerCase(), + mappingId, + transportTag); + this.userProject = + String.format( + Locale.US, + "%s_s%03d-%s-m%03d_prj1_%s", + BASE_ID, + scenarioId, + instructionsString.toLowerCase(), + mappingId, + transportTag); + this.objectName = + String.format( + Locale.US, + "%s_s%03d-%s-m%03d_obj1_%s", + BASE_ID, + scenarioId, + instructionsString.toLowerCase(), + mappingId, + transportTag); + this.topicName = + String.format( + Locale.US, + "%s_s%03d-%s-m%03d_top1_%s", + BASE_ID, + scenarioId, + instructionsString.toLowerCase(), + mappingId, + transportTag); + // define a lazy supplier for bytes. + this.lazyHelloWorldUtf8Bytes = + () -> genBytes(this.method, this.instruction.getInstructionsList()); + this.helloWorldFilePath = + resolvePathForResource(objectName, method, this.instruction.getInstructionsList()); + this.serviceAccountCredentials = resolveServiceAccountCredentials(); + } + + public String getProjectId() { + return projectId; + } + + public String getHost() { + return host; + } + + public String getBucketName() { + return bucketName; + } + + public String getBucketName2() { + return bucketName2; + } + + public String getUserProject() { + return userProject; + } + + public String getObjectName() { + return objectName; + } + + public byte[] getHelloWorldUtf8Bytes() { + return lazyHelloWorldUtf8Bytes.get(); + } + + public Path getHelloWorldFilePath() { + return helloWorldFilePath.get(); + } + + public int getScenarioId() { + return scenarioId; + } + + public Method getMethod() { + return method; + } + + public InstructionList getInstruction() { + return instruction; + } + + public boolean isPreconditionsProvided() { + return preconditionsProvided; + } + + public boolean isExpectSuccess() { + return expectSuccess; + } + + public int getMappingId() { + return mappingId; + } + + public ServiceAccountSigner getServiceAccountSigner() { + return serviceAccountCredentials; + } + + public String getTestName() { + String instructionsDesc = Joiner.on("_").join(instruction.getInstructionsList()); + return String.format( + Locale.US, + "TestRetryConformance/%s-%d-[%s]-%s-%d", + transport.name().toLowerCase(), + scenarioId, + instructionsDesc, + method.getName(), + mappingId); + } + + public Transport getTransport() { + return transport; + } + + @Override + public String toString() { + return getTestName(); + } + + public static Predicate transportIs(Transport transport) { + return trc -> trc.getTransport() == transport; + } + + private static Supplier resolvePathForResource( + String objectName, Method method, List instructionList) { + return () -> { + try { + File tempFile = File.createTempFile(objectName, ""); + tempFile.deleteOnExit(); + + byte[] bytes = genBytes(method, instructionList); + try (ByteArrayInputStream in = new ByteArrayInputStream(bytes); + FileOutputStream out = new FileOutputStream(tempFile)) { + long copy = ByteStreams.copy(in, out); + assertThat(copy).isEqualTo(bytes.length); + } + + return tempFile.toPath(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + private static ServiceAccountCredentials resolveServiceAccountCredentials() { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + InputStream inputStream = + cl.getResourceAsStream( + "com/google/cloud/conformance/storage/v1/test_service_account.not-a-test.json"); + assertNotNull(inputStream); + try { + return ServiceAccountCredentials.fromStream(inputStream); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public String getTopicName() { + return topicName; + } + + private static byte[] genBytes(Method method, List instructionsList) { + // Not all tests need data for an object, though some tests - resumable upload - needs + // more than 8MiB. + // We want to avoid allocating 8.1MiB for each test unnecessarily, especially since we + // instantiate all permuted test cases. ~1000 * 8.1MiB ~~ > 8GiB. + switch (method.getName()) { + case "storage.objects.insert": + boolean after8m = instructionsList.stream().anyMatch(s -> s.endsWith("after-8192K")); + if (after8m) { + return DataGenerator.base64Characters().genBytes(_8MiB * 2 + _512KiB); + } else { + return DataGenerator.base64Characters().genBytes(_512KiB); + } + case "storage.objects.get": + return DataGenerator.base64Characters().genBytes(_512KiB); + default: + return "Hello, World!".getBytes(StandardCharsets.UTF_8); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/AssertRequestHeaders.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/AssertRequestHeaders.java new file mode 100644 index 000000000000..a9053ea8b553 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/AssertRequestHeaders.java @@ -0,0 +1,35 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import com.google.common.truth.IterableSubject; + +public interface AssertRequestHeaders { + + void clear(); + + default IterableSubject assertRequestHeader(String headerName) { + return assertRequestHeader(headerName, FilteringPolicy.DISTINCT); + } + + IterableSubject assertRequestHeader(String headerName, FilteringPolicy filteringPolicy); + + enum FilteringPolicy { + DISTINCT, + NO_FILTER + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/BucketCleaner.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/BucketCleaner.java new file mode 100644 index 000000000000..67c8f65d4be3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/BucketCleaner.java @@ -0,0 +1,252 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.paging.Page; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.FailedPreconditionException; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BucketSourceOption; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.StorageLayoutName; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class BucketCleaner { + + private static final Logger LOGGER = LoggerFactory.getLogger(BucketCleaner.class); + + public static void doCleanup(String bucketName, Storage s) { + LOGGER.trace("Starting bucket cleanup..."); + String projectId = s.getOptions().getProjectId(); + try { + // TODO: probe bucket existence, a bad test could have deleted the bucket + Page page1 = + s.list(bucketName, BlobListOption.userProject(projectId), BlobListOption.versions(true)); + + List deleteResults = + StreamSupport.stream(page1.iterateAll().spliterator(), false) + .map( + b -> + new DeleteResult( + b.getName(), + s.delete(b.getBlobId(), BlobSourceOption.userProject(projectId)))) + .collect(Collectors.toList()); + boolean anyFailedObjectDeletes = getIfAnyFailedAndReport(bucketName, deleteResults, "object"); + + if (!anyFailedObjectDeletes) { + s.delete(bucketName, BucketSourceOption.userProject(projectId)); + } else { + LOGGER.warn("Unable to delete bucket due to previous failed object deletes"); + } + LOGGER.trace("Bucket cleanup complete"); + } catch (Exception e) { + LOGGER.error("Error during bucket cleanup."); + } + } + + public static void doCleanup(String bucketName, Storage s, StorageControlClient ctrl) { + LOGGER.warn("Starting bucket cleanup: {}", bucketName); + String projectId = s.getOptions().getProjectId(); + try { + // TODO: probe bucket existence, a bad test could have deleted the bucket + Page page1 = + s.list( + bucketName, + BlobListOption.userProject(projectId), + BlobListOption.versions(true), + BlobListOption.fields(BlobField.NAME)); + + List objectResults = + StreamSupport.stream(page1.iterateAll().spliterator(), false) + .map( + b -> + new DeleteResult( + b.getName(), + s.delete(b.getBlobId(), BlobSourceOption.userProject(projectId)))) + .collect(Collectors.toList()); + boolean anyFailedObjectDelete = getIfAnyFailedAndReport(bucketName, objectResults, "object"); + boolean anyFailedFolderDelete = false; + boolean anyFailedManagedFolderDelete = false; + + GrpcCallContext grpcCallContext = + GrpcCallContext.createDefault() + .withExtraHeaders( + ImmutableMap.of("x-goog-user-project", ImmutableList.of(projectId))); + if (!anyFailedObjectDelete) { + BucketName parent = BucketName.of("_", bucketName); + StorageLayout storageLayout = + ctrl.getStorageLayoutCallable() + .call( + GetStorageLayoutRequest.newBuilder() + .setName( + StorageLayoutName.of(parent.getProject(), parent.getBucket()) + .toString()) + .build(), + grpcCallContext); + + List folderDeletes; + if (storageLayout.hasHierarchicalNamespace() + && storageLayout.getHierarchicalNamespace().getEnabled()) { + folderDeletes = + StreamSupport.stream( + ctrl.listFoldersPagedCallable() + .call( + ListFoldersRequest.newBuilder().setParent(parent.toString()).build(), + grpcCallContext) + .iterateAll() + .spliterator(), + false) + .collect(Collectors.toList()) + .stream() + .sorted(Collections.reverseOrder(Comparator.comparing(Folder::getName))) + .map( + folder -> { + String formatted = + String.format(Locale.US, "folder = %s", folder.getName()); + LOGGER.warn(formatted); + boolean success = true; + try { + ctrl.deleteFolderCallable() + .call( + DeleteFolderRequest.newBuilder() + .setName(folder.getName()) + .build(), + grpcCallContext); + } catch (ApiException e) { + success = false; + } + return new DeleteResult(folder.getName(), success); + }) + .collect(Collectors.toList()); + } else { + folderDeletes = ImmutableList.of(); + } + + List managedFolderDeletes; + try { + managedFolderDeletes = + StreamSupport.stream( + ctrl.listManagedFoldersPagedCallable() + .call( + ListManagedFoldersRequest.newBuilder() + .setParent(parent.toString()) + .build(), + grpcCallContext) + .iterateAll() + .spliterator(), + false) + .map( + managedFolder -> { + String formatted = + String.format(Locale.US, "managedFolder = %s", managedFolder.getName()); + LOGGER.warn(formatted); + boolean success = true; + try { + ctrl.deleteManagedFolderCallable() + .call( + DeleteManagedFolderRequest.newBuilder() + .setName(managedFolder.getName()) + .build(), + grpcCallContext); + } catch (ApiException e) { + success = false; + } + return new DeleteResult(managedFolder.getName(), success); + }) + .collect(Collectors.toList()); + } catch (FailedPreconditionException fpe) { + // FAILED_PRECONDITION: Uniform bucket-level access is required to be enabled on the + // bucket in order to perform this operation. Read more at + // https://cloud.google.com/storage/docs/uniform-bucket-level-access + managedFolderDeletes = ImmutableList.of(); + } + + anyFailedFolderDelete = getIfAnyFailedAndReport(bucketName, folderDeletes, "folder"); + anyFailedManagedFolderDelete = + getIfAnyFailedAndReport(bucketName, managedFolderDeletes, "managed folder"); + } + + List failed = + Stream.of( + anyFailedObjectDelete ? "object" : "", + anyFailedFolderDelete ? "folder" : "", + anyFailedManagedFolderDelete ? "managed folder" : "") + .filter(ss -> !ss.isEmpty()) + .collect(Collectors.toList()); + + if (!anyFailedObjectDelete && !anyFailedFolderDelete && !anyFailedManagedFolderDelete) { + s.delete(bucketName, BucketSourceOption.userProject(projectId)); + } else { + LOGGER.warn( + String.format( + Locale.US, + "Unable to delete bucket %s due to previous failed %s deletes", + bucketName, + failed)); + } + + LOGGER.warn("Bucket cleanup complete: {}", bucketName); + } catch (Exception e) { + LOGGER.error("Error during bucket cleanup."); + } + } + + private static boolean getIfAnyFailedAndReport( + String bucketName, List deleteResults, String resourceType) { + List failedDeletes = + deleteResults.stream().filter(r -> !r.success).collect(Collectors.toList()); + failedDeletes.forEach( + r -> + LOGGER.warn( + String.format( + Locale.US, "Failed to delete %s %s/%s", resourceType, bucketName, r.name))); + return !failedDeletes.isEmpty(); + } + + private static final class DeleteResult { + private final String name; + private final boolean success; + + DeleteResult(String name, boolean success) { + this.name = name; + this.success = success; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/CSEKSupport.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/CSEKSupport.java new file mode 100644 index 000000000000..fb62fa6eba6b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/CSEKSupport.java @@ -0,0 +1,132 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import com.google.common.hash.Hashing; +import java.security.Key; +import java.security.SecureRandom; +import java.util.Base64; +import java.util.Objects; +import javax.crypto.SecretKey; + +/** Supporting class for Customer Supplied Encryption Key related state */ +final class CSEKSupport { + + private final byte[] keyBytes; + private final EncryptionKeyTuple tuple; + private final Key key; + + private CSEKSupport(byte[] keyBytes, EncryptionKeyTuple tuple) { + this.keyBytes = keyBytes; + this.tuple = tuple; + this.key = + new SecretKey() { + @Override + public String getAlgorithm() { + return tuple.algorithm; + } + + @Override + public String getFormat() { + return null; + } + + @Override + public byte[] getEncoded() { + return keyBytes; + } + }; + } + + byte[] getKeyBytes() { + return keyBytes; + } + + EncryptionKeyTuple getTuple() { + return tuple; + } + + Key getKey() { + return key; + } + + static CSEKSupport create() { + byte[] bytes = new byte[32]; + new SecureRandom().nextBytes(bytes); + String encode = Base64.getEncoder().encodeToString(bytes); + String sha256 = Base64.getEncoder().encodeToString(Hashing.sha256().hashBytes(bytes).asBytes()); + return new CSEKSupport(bytes, new EncryptionKeyTuple("AES256", encode, sha256)); + } + + static final class EncryptionKeyTuple { + private final String algorithm; + private final String key; + private final String keySha256; + + EncryptionKeyTuple(String algorithm, String key, String keySha256) { + this.algorithm = algorithm; + this.key = key; + this.keySha256 = keySha256; + } + + String getAlgorithm() { + return algorithm; + } + + String getKey() { + return key; + } + + String getKeySha256() { + return keySha256; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof EncryptionKeyTuple)) { + return false; + } + EncryptionKeyTuple that = (EncryptionKeyTuple) o; + return Objects.equals(algorithm, that.algorithm) + && Objects.equals(key, that.key) + && Objects.equals(keySha256, that.keySha256); + } + + @Override + public int hashCode() { + return Objects.hash(algorithm, key, keySha256); + } + + @Override + public String toString() { + return "EncryptionKeyTuple{" + + "algorithm='" + + algorithm + + '\'' + + ", key='" + + key + + '\'' + + ", keySha256='" + + keySha256 + + '\'' + + '}'; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java new file mode 100644 index 000000000000..7050638c0534 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java @@ -0,0 +1,165 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkPositionIndexes; + +import com.google.cloud.storage.DataGenerator; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.common.primitives.Ints; +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.ChecksummedData; +import java.io.ByteArrayInputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; + +public final class ChecksummedTestContent { + + private final byte[] bytes; + private final int crc32c; + private final String md5Base64; + + private ChecksummedTestContent(byte[] bytes, int crc32c, String md5Base64) { + this.bytes = bytes; + this.crc32c = crc32c; + this.md5Base64 = md5Base64; + } + + public byte[] getBytes() { + return bytes; + } + + public int length() { + return bytes.length; + } + + public byte[] getBytes(int beginIndex) { + return UnsafeByteOperations.unsafeWrap(bytes).substring(beginIndex).toByteArray(); + } + + public byte[] getBytes(int beginIndex, int length) { + return UnsafeByteOperations.unsafeWrap(bytes) + .substring(beginIndex, beginIndex + length) + .toByteArray(); + } + + public int getCrc32c() { + return crc32c; + } + + public ByteString getMd5Bytes() { + return ByteString.copyFrom(BaseEncoding.base64().decode(md5Base64)); + } + + public String getMd5Base64() { + return md5Base64; + } + + public String getCrc32cBase64() { + return Base64.getEncoder().encodeToString(Ints.toByteArray(crc32c)); + } + + public byte[] concat(char c) { + return concat((byte) c); + } + + public byte[] concat(byte b) { + int lenOrig = bytes.length; + int lenNew = lenOrig + 1; + byte[] newBytes = Arrays.copyOf(bytes, lenNew); + newBytes[lenOrig] = b; + return newBytes; + } + + public ChecksummedTestContent concat(ChecksummedTestContent ctc) { + byte[] newBytes = new byte[this.length() + ctc.length()]; + System.arraycopy(bytes, 0, newBytes, 0, bytes.length); + System.arraycopy(ctc.bytes, 0, newBytes, bytes.length, ctc.length()); + return ChecksummedTestContent.of(newBytes); + } + + public ByteArrayInputStream bytesAsInputStream() { + return new ByteArrayInputStream(bytes); + } + + public ChecksummedData asChecksummedData() { + return ChecksummedData.newBuilder() + .setContent(ByteString.copyFrom(bytes)) + .setCrc32C(crc32c) + .build(); + } + + public ChecksummedTestContent slice(int begin, int length) { + return of(bytes, begin, Math.min(length, bytes.length - begin)); + } + + public List chunkup(int chunkSize) { + List elements = new ArrayList<>(); + for (int i = 0; i < bytes.length; i += chunkSize) { + elements.add(slice(i, chunkSize)); + } + return ImmutableList.copyOf(elements); + } + + public BidiWriteObjectRequest.Builder asBidiWrite() { + return BidiWriteObjectRequest.newBuilder().setChecksummedData(asChecksummedData()); + } + + public ByteBuffer asByteBuffer() { + return ByteBuffer.wrap(bytes); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("byteCount", bytes.length) + .add("crc32c", Integer.toUnsignedString(crc32c)) + .toString(); + } + + public static ChecksummedTestContent of(String content) { + byte[] bytes = content.getBytes(StandardCharsets.UTF_8); + return of(bytes); + } + + public static ChecksummedTestContent gen(int length) { + byte[] bytes1 = DataGenerator.base64Characters().genBytes(length); + return of(bytes1); + } + + public static ChecksummedTestContent of(byte[] bytes) { + int crc32c = Hashing.crc32c().hashBytes(bytes).asInt(); + String md5Base64 = Base64.getEncoder().encodeToString(Hashing.md5().hashBytes(bytes).asBytes()); + return new ChecksummedTestContent(bytes, crc32c, md5Base64); + } + + public static ChecksummedTestContent of(byte[] bytes, int from, int length) { + checkArgument(length >= 0, "length >= 0 (%s >= 0)", length); + checkPositionIndexes(from, from + length, bytes.length); + return of(Arrays.copyOfRange(bytes, from, from + length)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/DedupeOptionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/DedupeOptionTest.java new file mode 100644 index 000000000000..b7b38df000d0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/DedupeOptionTest.java @@ -0,0 +1,163 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.common.collect.ImmutableList; +import org.junit.Test; + +public final class DedupeOptionTest { + + @Test + public void dedupe_varargs() { + BucketTargetOption[] dedupe = + BucketTargetOption.dedupe( + BucketTargetOption.userProject("abc"), + BucketTargetOption.metagenerationMatch(), + BucketTargetOption.userProject("xyz")); + + assertThat(dedupe) + .asList() + .containsExactly( + BucketTargetOption.metagenerationMatch(), BucketTargetOption.userProject("xyz")); + } + + @Test + public void dedupe_collection_varargs() { + BucketTargetOption[] dedupe = + BucketTargetOption.dedupe( + ImmutableList.of( + BucketTargetOption.userProject("abc"), BucketTargetOption.metagenerationMatch()), + BucketTargetOption.userProject("xyz")); + + assertThat(dedupe) + .asList() + .containsExactly( + BucketTargetOption.metagenerationMatch(), BucketTargetOption.userProject("xyz")); + } + + @Test + public void dedupe_array_varargs() { + BucketTargetOption[] dedupe = + BucketTargetOption.dedupe( + new BucketTargetOption[] { + BucketTargetOption.userProject("abc"), BucketTargetOption.metagenerationMatch() + }, + BucketTargetOption.userProject("xyz")); + + assertThat(dedupe) + .asList() + .containsExactly( + BucketTargetOption.metagenerationMatch(), BucketTargetOption.userProject("xyz")); + } + + @Test + public void allClasses_varargs() { + Storage.BlobGetOption.dedupe(); + Storage.BlobListOption.dedupe(); + Storage.BlobSourceOption.dedupe(); + Storage.BlobTargetOption.dedupe(); + Storage.BlobWriteOption.dedupe(); + Storage.BucketGetOption.dedupe(); + Storage.BucketListOption.dedupe(); + Storage.BucketSourceOption.dedupe(); + Storage.BucketTargetOption.dedupe(); + Storage.CreateHmacKeyOption.dedupe(); + Storage.DeleteHmacKeyOption.dedupe(); + Storage.GetHmacKeyOption.dedupe(); + Storage.ListHmacKeysOption.dedupe(); + Storage.UpdateHmacKeyOption.dedupe(); + + Bucket.BlobTargetOption.dedupe(); + Bucket.BlobWriteOption.dedupe(); + Bucket.BucketSourceOption.dedupe(); + + Blob.BlobSourceOption.dedupe(); + } + + @Test + public void allClasses_collection_varargs() { + Storage.BlobGetOption.dedupe(ImmutableList.of()); + Storage.BlobListOption.dedupe(ImmutableList.of()); + Storage.BlobSourceOption.dedupe(ImmutableList.of()); + Storage.BlobTargetOption.dedupe(ImmutableList.of()); + Storage.BlobWriteOption.dedupe(ImmutableList.of()); + Storage.BucketGetOption.dedupe(ImmutableList.of()); + Storage.BucketListOption.dedupe(ImmutableList.of()); + Storage.BucketSourceOption.dedupe(ImmutableList.of()); + Storage.BucketTargetOption.dedupe(ImmutableList.of()); + Storage.CreateHmacKeyOption.dedupe(ImmutableList.of()); + Storage.DeleteHmacKeyOption.dedupe(ImmutableList.of()); + Storage.GetHmacKeyOption.dedupe(ImmutableList.of()); + Storage.ListHmacKeysOption.dedupe(ImmutableList.of()); + Storage.UpdateHmacKeyOption.dedupe(ImmutableList.of()); + + Bucket.BlobTargetOption.dedupe(ImmutableList.of()); + Bucket.BlobWriteOption.dedupe(ImmutableList.of()); + Bucket.BucketSourceOption.dedupe(ImmutableList.of()); + + Blob.BlobSourceOption.dedupe(ImmutableList.of()); + } + + @Test + public void allClasses_array_varargs() { + String p = "proj"; + Storage.BlobGetOption.dedupe( + new Storage.BlobGetOption[0], Storage.BlobGetOption.userProject(p)); + Storage.BlobListOption.dedupe( + new Storage.BlobListOption[0], Storage.BlobListOption.userProject(p)); + Storage.BlobSourceOption.dedupe( + new Storage.BlobSourceOption[0], Storage.BlobSourceOption.userProject(p)); + Storage.BlobTargetOption.dedupe( + new Storage.BlobTargetOption[0], Storage.BlobTargetOption.userProject(p)); + Storage.BlobWriteOption.dedupe( + new Storage.BlobWriteOption[0], Storage.BlobWriteOption.userProject(p)); + Storage.BucketGetOption.dedupe( + new Storage.BucketGetOption[0], Storage.BucketGetOption.userProject(p)); + Storage.BucketListOption.dedupe( + new Storage.BucketListOption[0], Storage.BucketListOption.userProject(p)); + Storage.BucketSourceOption.dedupe( + new Storage.BucketSourceOption[0], Storage.BucketSourceOption.userProject(p)); + Storage.BucketTargetOption.dedupe( + new Storage.BucketTargetOption[0], Storage.BucketTargetOption.userProject(p)); + Storage.CreateHmacKeyOption.dedupe( + new Storage.CreateHmacKeyOption[0], Storage.CreateHmacKeyOption.userProject(p)); + Storage.DeleteHmacKeyOption.dedupe( + new Storage.DeleteHmacKeyOption[0], Storage.DeleteHmacKeyOption.userProject(p)); + Storage.GetHmacKeyOption.dedupe( + new Storage.GetHmacKeyOption[0], Storage.GetHmacKeyOption.userProject(p)); + Storage.ListHmacKeysOption.dedupe( + new Storage.ListHmacKeysOption[0], Storage.ListHmacKeysOption.userProject(p)); + Storage.UpdateHmacKeyOption.dedupe( + new Storage.UpdateHmacKeyOption[0], Storage.UpdateHmacKeyOption.userProject(p)); + + Bucket.BlobTargetOption.dedupe( + new Bucket.BlobTargetOption[0], Bucket.BlobTargetOption.userProject(p)); + Bucket.BlobWriteOption.dedupe( + new Bucket.BlobWriteOption[0], Bucket.BlobWriteOption.userProject(p)); + Bucket.BucketSourceOption.dedupe( + new Bucket.BucketSourceOption[0], Bucket.BucketSourceOption.userProject(p)); + + Blob.BlobSourceOption.dedupe( + new Blob.BlobSourceOption[0], Blob.BlobSourceOption.userProject(p)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java new file mode 100644 index 000000000000..b39f6dd1c40c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java @@ -0,0 +1,286 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.storage.PackagePrivateMethodWorkarounds; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Any; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; +import com.google.protobuf.UnsafeByteOperations; +import com.google.rpc.DebugInfo; +import com.google.rpc.ErrorInfo; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Client side interceptor which will log gRPC request, headers, response, status and trailers in + * plain text, rather than the byte encoded text + * io.grpc.netty.shaded.io.grpc.netty.NettyClientHandler does. + * + *

This interceptor does not include the other useful information that NettyClientHandler + * provides such as method names, peers etc. + */ +public final class GrpcPlainRequestLoggingInterceptor implements ClientInterceptor { + + private static final Logger LOGGER = + LoggerFactory.getLogger(GrpcPlainRequestLoggingInterceptor.class); + + private static final GrpcPlainRequestLoggingInterceptor INSTANCE = + new GrpcPlainRequestLoggingInterceptor(); + + private static final Metadata.Key X_GOOG_REQUEST_PARAMS = + Metadata.Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + private static final Metadata.Key X_RETRY_TEST_ID = + Metadata.Key.of("x-retry-test-id", Metadata.ASCII_STRING_MARSHALLER); + + /** + * Define a map of message types we want to try to unpack from an {@link Any}. + * + *

The keys are the {@code type_url}, and the values are the default instances of each message. + */ + private static final Map anyParsers = + Stream.of( + com.google.rpc.ErrorInfo.getDefaultInstance(), + com.google.rpc.DebugInfo.getDefaultInstance(), + com.google.rpc.QuotaFailure.getDefaultInstance(), + com.google.rpc.PreconditionFailure.getDefaultInstance(), + com.google.rpc.BadRequest.getDefaultInstance(), + com.google.rpc.Help.getDefaultInstance(), + com.google.storage.v2.BidiReadObjectError.getDefaultInstance(), + com.google.storage.v2.BidiReadObjectRedirectedError.getDefaultInstance(), + com.google.storage.v2.BidiWriteObjectRedirectedError.getDefaultInstance()) + // take the stream of Message default instances and collect them to map entries + .collect( + Collectors.toMap( + // resolve the type_url of the message + m -> Any.pack(m).getTypeUrl(), + // return the message default instance as is for the value + Function.identity())); + + private GrpcPlainRequestLoggingInterceptor() {} + + public static GrpcPlainRequestLoggingInterceptor getInstance() { + return INSTANCE; + } + + public static GrpcInterceptorProvider getInterceptorProvider() { + return InterceptorProvider.INSTANCE; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + ClientCall call = next.newCall(method, callOptions); + return new SimpleForwardingClientCall(call) { + @Override + public void start(Listener responseListener, Metadata headers) { + if (headers.containsKey(X_GOOG_REQUEST_PARAMS) || headers.containsKey(X_RETRY_TEST_ID)) { + LOGGER.atDebug().log(() -> String.format(">>> headers = %s", headers)); + } + SimpleForwardingClientCallListener listener = + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onMessage(RespT message) { + LOGGER.atDebug().log( + () -> + String.format( + Locale.US, + "<<< %s{%n%s}", + message.getClass().getSimpleName(), + fmtProto(message))); + super.onMessage(message); + } + + @Override + public void onClose(Status status, Metadata trailers) { + LOGGER.atDebug().log(lazyOnCloseLogString(status, trailers)); + super.onClose(status, trailers); + } + }; + super.start(listener, headers); + } + + @Override + public void sendMessage(ReqT message) { + LOGGER.atDebug().log( + () -> + String.format( + Locale.US, + ">>> %s{%n%s}", + message.getClass().getSimpleName(), + fmtProto(message))); + super.sendMessage(message); + } + }; + } + + @NonNull + public static String fmtProto(@NonNull Object obj) { + return PackagePrivateMethodWorkarounds.fmtProto(obj, TextFormat.printer()::printToString); + } + + // Suppress DataFlowIssue warnings for this method. + // While the declared return type of trailers.get is @Nullable T, we're always calling get with a + // key we know to be present because we found the key name by calling trailers.keys(). + @SuppressWarnings("DataFlowIssue") + @VisibleForTesting + public static @NonNull Supplier lazyOnCloseLogString(Status status, Metadata trailers) { + return () -> { + final StringBuilder sb = new StringBuilder(); + String description = status.getDescription(); + sb.append("<<< status = {").append("\n code[4]=").append(status.getCode()); + if (description != null) { + sb.append(",\n description[") + .append(description.getBytes(StandardCharsets.US_ASCII).length) + .append("]='") + .append(description) + .append("'"); + } + sb.append("\n},\ntrailers = {"); + Set keys = trailers.keys(); + for (String key : keys) { + sb.append("\n ").append(key); + if (key.endsWith("-bin")) { + byte[] bytes = trailers.get(Metadata.Key.of(key, Metadata.BINARY_BYTE_MARSHALLER)); + sb.append("[").append(bytes.length).append("]").append(": "); + if (key.equals("grpc-status-details-bin")) { + com.google.rpc.Status s; + try { + s = com.google.rpc.Status.parseFrom(bytes); + } catch (InvalidProtocolBufferException e) { + sb.append(TextFormat.escapeBytes(UnsafeByteOperations.unsafeWrap(bytes))); + continue; + } + sb.append(com.google.rpc.Status.getDescriptor().getFullName()).append("{"); + s.getDetailsList() + .forEach( + a -> { + Message maybeParseAs = anyParsers.get(a.getTypeUrl()); + Message m = maybeParseAs == null ? a : unpack(a, maybeParseAs); + // base indentation, single uppercase variable name to make easier to read in + // the following code block + String I = " "; + sb.append("\n"); + sb.append(I).append("details {\n"); + sb.append(I).append(" type_url: ").append(a.getTypeUrl()).append("\n"); + sb.append(I).append(" value: {\n "); + sb.append(I).append(" ").append(fmtDetails(m, I)).append("\n"); + sb.append(I).append(" }\n"); + sb.append(I).append("}"); + }); + if (!s.getDetailsList().isEmpty()) { + sb.append("\n"); + } + sb.append(" }"); + } else if (key.contains("debuginfo")) { + sb.append("{") + .append(parseBytesAsMessage(DebugInfo.getDefaultInstance(), bytes)) + .append("}"); + } else if (key.contains("errorinfo")) { + sb.append("{") + .append(parseBytesAsMessage(ErrorInfo.getDefaultInstance(), bytes)) + .append("}"); + } else { + sb.append("{").append(parseBytesAsMessage(Any.getDefaultInstance(), bytes)).append("}"); + } + } else { + String asciiStr = trailers.get(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)); + sb.append("[") + .append(asciiStr.getBytes(StandardCharsets.US_ASCII).length) + .append("]") + .append(": "); + sb.append(asciiStr); + } + } + if (!keys.isEmpty()) { + sb.append("\n"); + } + sb.append("}"); + return sb.toString(); + }; + } + + private static String fmtDetails(Message m, String baseIndentation) { + String fmt = fmtProto(m); + return fmt.substring(0, fmt.length() - 1).replace("\n", "\n" + baseIndentation + " "); + } + + private static String parseBytesAsMessage(M m, byte[] bytes) { + boolean targetAny = m instanceof Any; + try { + Message parsed = m.getParserForType().parseFrom(bytes); + return fmtProto(parsed); + } catch (InvalidProtocolBufferException e) { + if (!targetAny) { + return parseBytesAsMessage(Any.getDefaultInstance(), bytes); + } else { + return TextFormat.escapeBytes(UnsafeByteOperations.unsafeWrap(bytes)); + } + } + } + + /** + * Helper method to unpack an Any. This is unsafe based on the contract of Any.unpack, however the + * Any we are unpacking here is limited to a set of known types which we have already checked, and + * the Any is already packed in a Status message, so we know it is already deserializable. + */ + private static M unpack(Any any, M m) { + try { + return any.unpackSameTypeAs(m); + } catch (InvalidProtocolBufferException e) { + throw new RuntimeException(e); + } + } + + private static final class InterceptorProvider implements GrpcInterceptorProvider { + private static final InterceptorProvider INSTANCE = new InterceptorProvider(); + + private final List interceptors; + + private InterceptorProvider() { + this.interceptors = ImmutableList.of(GrpcPlainRequestLoggingInterceptor.INSTANCE); + } + + @Override + public List getInterceptors() { + return interceptors; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java new file mode 100644 index 000000000000..6ade93ac1c22 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; +import static com.google.common.truth.Truth.assertThat; + +import com.google.common.io.Resources; +import com.google.protobuf.Any; +import com.google.protobuf.TextFormat; +import com.google.rpc.DebugInfo; +import com.google.storage.v2.BidiReadObjectError; +import com.google.storage.v2.BidiReadObjectRequest; +import com.google.storage.v2.BidiReadObjectSpec; +import com.google.storage.v2.ReadRange; +import com.google.storage.v2.ReadRangeError; +import io.grpc.Metadata; +import io.grpc.Status; +import java.io.IOException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.Optional; +import java.util.function.Supplier; +import org.junit.Test; + +public final class GrpcPlainRequestLoggingInterceptorTest { + + @Test + public void lazyOnCloseLogStringGolden() throws IOException { + BidiReadObjectRequest request = + BidiReadObjectRequest.newBuilder() + .setReadObjectSpec( + BidiReadObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .build()) + .addReadRanges(ReadRange.newBuilder().setReadId(3).setReadOffset(39).build()) + .build(); + + Optional readRange = request.getReadRangesList().stream().findFirst(); + String message = + String.format( + Locale.US, + "OUT_OF_RANGE read_offset = %d", + readRange.map(ReadRange::getReadOffset).orElse(0L)); + long readId = readRange.map(ReadRange::getReadId).orElse(0L); + + BidiReadObjectError err2 = + BidiReadObjectError.newBuilder() + .addReadRangeErrors( + ReadRangeError.newBuilder() + .setReadId(readId) + .setStatus( + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.OUT_OF_RANGE_VALUE) + .build()) + .build()) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.UNAVAILABLE_VALUE) + .setMessage("fail read_id: " + readId) + .addDetails(Any.pack(err2)) + .addDetails( + Any.pack( + DebugInfo.newBuilder() + .setDetail(message) + .addStackEntries(TextFormat.printer().shortDebugString(request)) + .build())) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + Supplier supplier = + GrpcPlainRequestLoggingInterceptor.lazyOnCloseLogString(Status.OUT_OF_RANGE, trailers); + String actual = supplier.get(); + + String expected = + loadGoldenFile( + "com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor/golden/OUT_OF_RANGE.txt"); + assertThat(actual).isEqualTo(expected); + } + + private static String loadGoldenFile(String resourcePath) throws IOException { + URL url = + GrpcPlainRequestLoggingInterceptorTest.class.getClassLoader().getResource(resourcePath); + assertThat(url).isNotNull(); + + return Resources.toString(url, StandardCharsets.UTF_8).replace(System.lineSeparator(), "\n"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcRequestAuditing.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcRequestAuditing.java new file mode 100644 index 000000000000..08a68d3d4335 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcRequestAuditing.java @@ -0,0 +1,109 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.common.collect.ImmutableList; +import com.google.common.truth.IterableSubject; +import io.grpc.Attributes; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ClientStreamTracer; +import io.grpc.ClientStreamTracer.StreamInfo; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class GrpcRequestAuditing + implements ClientInterceptor, AssertRequestHeaders, GrpcInterceptorProvider { + + private final List requestHeaders; + + public GrpcRequestAuditing() { + requestHeaders = Collections.synchronizedList(new ArrayList<>()); + } + + public void clear() { + requestHeaders.clear(); + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + CallOptions withStreamTracerFactory = callOptions.withStreamTracerFactory(new Factory()); + return next.newCall(method, withStreamTracerFactory); + } + + @Override + public IterableSubject assertRequestHeader(String headerName, FilteringPolicy filteringPolicy) { + Metadata.Key key = Metadata.Key.of(headerName, Metadata.ASCII_STRING_MARSHALLER); + Function, Stream> filter; + switch (filteringPolicy) { + case DISTINCT: + filter = Stream::distinct; + break; + case NO_FILTER: + filter = Function.identity(); + break; + default: + throw new IllegalStateException("Unhandled enum value: " + filteringPolicy); + } + return getIterableSubject(key, filter); + } + + public IterableSubject assertRequestHeader(Metadata.Key key) { + return getIterableSubject(key, Stream::distinct); + } + + private @NonNull IterableSubject getIterableSubject( + Metadata.Key key, Function, Stream> f) { + Stream stream = requestHeaders.stream().map(m -> m.get(key)).filter(Objects::nonNull); + ImmutableList actual = f.apply(stream).collect(ImmutableList.toImmutableList()); + return assertWithMessage(String.format(Locale.US, "Headers %s", key.name())).that(actual); + } + + @Override + public List getInterceptors() { + return ImmutableList.of(this); + } + + private final class Factory extends ClientStreamTracer.Factory { + @Override + public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) { + return new Tracer(); + } + } + + private final class Tracer extends ClientStreamTracer { + + @Override + public void streamCreated(Attributes transportAttrs, Metadata headers) { + requestHeaders.add(headers); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITAccessTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITAccessTest.java new file mode 100644 index 000000000000..69a417ce944a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITAccessTest.java @@ -0,0 +1,713 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.retry429s; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.Policy; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.BucketInfo.PublicAccessPrevention; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public class ITAccessTest { + + private static final Long RETENTION_PERIOD = 5L; + private static final Duration RETENTION_PERIOD_DURATION = Duration.ofSeconds(5); + + @Inject public Storage storage; + + @Inject public Transport transport; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + + @Test + public void bucket_defaultAcl_get() { + String bucketName = bucket.getName(); + // lookup an entity from the bucket which is known to exist + Bucket bucketWithAcls = + storage.get( + bucketName, BucketGetOption.fields(BucketField.ACL, BucketField.DEFAULT_OBJECT_ACL)); + + Acl actual = bucketWithAcls.getDefaultAcl().iterator().next(); + + Acl acl = retry429s(() -> storage.getDefaultAcl(bucketName, actual.getEntity()), storage); + + assertThat(acl).isEqualTo(actual); + } + + /** When a bucket does exist, but an acl for the specified entity is not defined return null */ + @Test + public void bucket_defaultAcl_get_notFoundReturnsNull() { + Acl acl = retry429s(() -> storage.getDefaultAcl(bucket.getName(), User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + /** When a bucket doesn't exist, return null for the acl value */ + @Test + public void bucket_defaultAcl_get_bucket404() { + Acl acl = + retry429s(() -> storage.getDefaultAcl(bucket.getName() + "x", User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + @Test + public void bucket_defaultAcl_list() { + String bucketName = bucket.getName(); + // lookup an entity from the bucket which is known to exist + Bucket bucketWithAcls = + storage.get( + bucketName, BucketGetOption.fields(BucketField.ACL, BucketField.DEFAULT_OBJECT_ACL)); + + Acl actual = bucketWithAcls.getDefaultAcl().iterator().next(); + + List acls = retry429s(() -> storage.listDefaultAcls(bucketName), storage); + + assertThat(acls).contains(actual); + } + + @Test + public void bucket_defaultAcl_list_bucket404() { + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.listDefaultAcls(bucket.getName() + "x"), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void bucket_defaultAcl_create() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + Acl actual = retry429s(() -> storage.createDefaultAcl(bucket.getName(), readAll), storage); + + assertThat(actual.getEntity()).isEqualTo(readAll.getEntity()); + assertThat(actual.getRole()).isEqualTo(readAll.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = dropEtags(bucket.getDefaultAcl()); + List actualAcls = dropEtags(bucketUpdated.getDefaultAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).contains(readAll); + } + } + + @Test + public void bucket_defaultAcl_create_bucket404() { + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + StorageException storageException = + assertThrows( + StorageException.class, + () -> + retry429s( + () -> storage.createDefaultAcl(bucket.getName() + "x", readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void bucket_defaultAcl_update() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + List defaultAcls = bucket.getDefaultAcl(); + assertThat(defaultAcls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.EDITORS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectEditorAsOwner = + defaultAcls.stream().filter(hasRole(Role.OWNER).and(isProjectEditor)).findFirst().get(); + + // lower the privileges of project editors to writer from owner + Entity entity = projectEditorAsOwner.getEntity(); + Acl projectEditorAsReader = Acl.of(entity, Role.READER); + + Acl actual = + retry429s( + () -> storage.updateDefaultAcl(bucket.getName(), projectEditorAsReader), storage); + + assertThat(actual.getEntity()).isEqualTo(projectEditorAsReader.getEntity()); + assertThat(actual.getRole()).isEqualTo(projectEditorAsReader.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = + dropEtags( + bucket.getDefaultAcl().stream() + .filter(isProjectEditor.negate()) + .collect(Collectors.toList())); + List actualAcls = dropEtags(bucketUpdated.getDefaultAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).doesNotContain(projectEditorAsOwner); + assertThat(actualAcls).contains(projectEditorAsReader); + } + } + + @Test + public void bucket_defaultAcl_update_bucket404() { + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + StorageException storageException = + assertThrows( + StorageException.class, + () -> + retry429s( + () -> storage.updateDefaultAcl(bucket.getName() + "x", readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void bucket_defaultAcl_delete() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + List defaultAcls = bucket.getDefaultAcl(); + assertThat(defaultAcls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.VIEWERS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectViewerAsReader = + defaultAcls.stream().filter(hasRole(Role.READER).and(isProjectEditor)).findFirst().get(); + + Entity entity = projectViewerAsReader.getEntity(); + + boolean actual = retry429s(() -> storage.deleteDefaultAcl(bucket.getName(), entity), storage); + + assertThat(actual).isTrue(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when deletes happen, drop before our comparison + List expectedAcls = + dropEtags( + bucket.getDefaultAcl().stream() + .filter(isProjectEditor.negate()) + .collect(Collectors.toList())); + List actualAcls = dropEtags(bucketUpdated.getDefaultAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + Optional search = + actualAcls.stream().map(Acl::getEntity).filter(e -> e.equals(entity)).findAny(); + assertThat(search.isPresent()).isFalse(); + } + } + + @Test + public void bucket_defaultAcl_delete_bucket404() { + boolean actual = + retry429s( + () -> storage.deleteDefaultAcl(bucket.getName() + "x", User.ofAllUsers()), storage); + + assertThat(actual).isEqualTo(false); + } + + @Test + public void bucket_defaultAcl_delete_noExistingAcl() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + boolean actual = + retry429s(() -> storage.deleteDefaultAcl(bucket.getName(), User.ofAllUsers()), storage); + + assertThat(actual).isEqualTo(false); + } + } + + /** Validate legacy deprecated field is redirected correctly */ + @Test + @SuppressWarnings("deprecation") + public void testBucketWithBucketPolicyOnlyEnabled() throws Exception { + doTestUniformBucketLevelAccessAclImpact( + BucketInfo.IamConfiguration.newBuilder().setIsBucketPolicyOnlyEnabled(true).build()); + } + + @Test + public void testBucketWithUniformBucketLevelAccessEnabled() throws Exception { + doTestUniformBucketLevelAccessAclImpact( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()); + } + + private void doTestUniformBucketLevelAccessAclImpact(IamConfiguration iamConfiguration) + throws Exception { + String bucketName = generator.randomBucketName(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + Bucket.newBuilder(bucketName).setIamConfiguration(iamConfiguration).build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tempB.getBucket(); + + assertTrue(bucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertNotNull( + bucket.getIamConfiguration().getUniformBucketLevelAccessLockedTimeOffsetDateTime()); + + if (transport == Transport.HTTP) { + StorageException listAclsError = + assertThrows(StorageException.class, () -> storage.listAcls(bucketName)); + assertAll( + () -> assertThat(listAclsError.getCode()).isEqualTo(400), + () -> assertThat(listAclsError.getReason()).isEqualTo("invalid")); + + StorageException listDefaultAclsError = + assertThrows(StorageException.class, () -> storage.listDefaultAcls(bucketName)); + assertAll( + () -> assertThat(listDefaultAclsError.getCode()).isEqualTo(400), + () -> assertThat(listDefaultAclsError.getReason()).isEqualTo("invalid")); + } else if (transport == Transport.GRPC) { + assertThat(storage.listAcls(bucketName)).isEmpty(); + assertThat(storage.listDefaultAcls(bucketName)).isEmpty(); + } + } + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void testEnableAndDisableUniformBucketLevelAccessOnExistingBucket() throws Exception { + String bpoBucket = generator.randomBucketName(); + BucketInfo.IamConfiguration ublaDisabledIamConfiguration = + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(false) + .build(); + BucketInfo bucketInfo = + Bucket.newBuilder(bpoBucket) + .setIamConfiguration(ublaDisabledIamConfiguration) + .setAcl(ImmutableList.of(Acl.of(User.ofAllAuthenticatedUsers(), Role.READER))) + .setDefaultAcl(ImmutableList.of(Acl.of(User.ofAllAuthenticatedUsers(), Role.READER))) + .build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + // BPO is disabled by default. + BucketInfo bucket = tempB.getBucket(); + assertThat(bucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()).isFalse(); + + storage.update( + bucket.toBuilder() + .setAcl(null) + .setDefaultAcl(null) + .setIamConfiguration( + ublaDisabledIamConfiguration.toBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build(), + BucketTargetOption.metagenerationMatch()); + + Bucket remoteBucket = + storage.get( + bpoBucket, + Storage.BucketGetOption.fields( + BucketField.IAMCONFIGURATION, BucketField.METAGENERATION)); + + assertTrue(remoteBucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertNotNull(remoteBucket.getIamConfiguration().getUniformBucketLevelAccessLockedTime()); + + remoteBucket.toBuilder() + .setIamConfiguration(ublaDisabledIamConfiguration) + .build() + .update(BucketTargetOption.metagenerationMatch()); + + remoteBucket = + storage.get( + bpoBucket, + Storage.BucketGetOption.fields( + BucketField.IAMCONFIGURATION, BucketField.ACL, BucketField.DEFAULT_OBJECT_ACL)); + + assertFalse(remoteBucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertEquals(User.ofAllAuthenticatedUsers(), remoteBucket.getDefaultAcl().get(0).getEntity()); + assertEquals(Role.READER, remoteBucket.getDefaultAcl().get(0).getRole()); + assertEquals(User.ofAllAuthenticatedUsers(), remoteBucket.getAcl().get(0).getEntity()); + assertEquals(Role.READER, remoteBucket.getAcl().get(0).getRole()); + } + } + + @Test + public void testEnforcedPublicAccessPreventionOnBucket() throws Exception { + String papBucket = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(papBucket) + .setIamConfiguration( + IamConfiguration.newBuilder() + .setPublicAccessPrevention(PublicAccessPrevention.ENFORCED) + .build()) + .build(); + + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + // Making bucket public should fail. + try { + storage.setIamPolicy( + papBucket, + Policy.newBuilder() + .setVersion(3) + .setBindings( + ImmutableList.of( + com.google.cloud.Binding.newBuilder() + .setRole("roles/storage.objectViewer") + .addMembers("allUsers") + .build())) + .build()); + fail("pap: expected adding allUsers policy to bucket should fail"); + } catch (StorageException storageException) { + // Creating a bucket with roles/storage.objectViewer is not + // allowed when publicAccessPrevention is enabled. + assertEquals(storageException.getCode(), 412); + } + + // Making object public via ACL should fail. + try { + // Create a public object + storage.create( + BlobInfo.newBuilder(bucket, "pap-test-object").build(), + BlobTargetOption.predefinedAcl(Storage.PredefinedAcl.PUBLIC_READ)); + fail("pap: expected adding allUsers ACL to object should fail"); + } catch (StorageException storageException) { + // Creating an object with allUsers roles/storage.viewer permission + // is not allowed. When Public Access Prevention is enabled. + assertEquals(storageException.getCode(), 412); + } + } + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void testUnspecifiedPublicAccessPreventionOnBucket() throws Exception { + String papBucket = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(papBucket) + .setIamConfiguration( + IamConfiguration.newBuilder() + .setPublicAccessPrevention(PublicAccessPrevention.INHERITED) + .build()) + .build(); + + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + // Now, making object public or making bucket public should succeed. + try { + // Create a public object + storage.create( + BlobInfo.newBuilder(bucket, "pap-test-object").build(), + BlobTargetOption.predefinedAcl(Storage.PredefinedAcl.PUBLIC_READ)); + } catch (StorageException storageException) { + fail("pap: expected adding allUsers ACL to object to succeed"); + } + + // Now, making bucket public should succeed. + try { + storage.setIamPolicy( + papBucket, + Policy.newBuilder() + .setVersion(3) + .setBindings( + ImmutableList.of( + com.google.cloud.Binding.newBuilder() + .setRole("roles/storage.objectViewer") + .addMembers("allUsers") + .build())) + .build()); + } catch (StorageException storageException) { + fail("pap: expected adding allUsers policy to bucket to succeed"); + } + } + } + + @Test + public void changingPAPDoesNotAffectUBLA() throws Exception { + String bucketName = generator.randomBucketName(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(PublicAccessPrevention.INHERITED) + .setIsUniformBucketLevelAccessEnabled(false) + .build()) + .build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tempB.getBucket(); + assertEquals( + bucket.getIamConfiguration().getPublicAccessPrevention(), + BucketInfo.PublicAccessPrevention.INHERITED); + assertFalse(bucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertFalse(bucket.getIamConfiguration().isBucketPolicyOnlyEnabled()); + + IamConfiguration iamConfiguration1 = + bucket.getIamConfiguration().toBuilder() + .setPublicAccessPrevention(PublicAccessPrevention.ENFORCED) + .build(); + // Update PAP setting to ENFORCED and should not affect UBLA setting. + storage.update( + bucket.toBuilder().setIamConfiguration(iamConfiguration1).build(), + BucketTargetOption.metagenerationMatch()); + Bucket bucket2 = + storage.get(bucketName, Storage.BucketGetOption.fields(BucketField.IAMCONFIGURATION)); + assertEquals( + bucket2.getIamConfiguration().getPublicAccessPrevention(), + BucketInfo.PublicAccessPrevention.ENFORCED); + assertFalse(bucket2.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertFalse(bucket2.getIamConfiguration().isBucketPolicyOnlyEnabled()); + } + } + + @Test + public void changingUBLADoesNotAffectPAP() throws Exception { + String bucketName = generator.randomBucketName(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(PublicAccessPrevention.INHERITED) + .setIsUniformBucketLevelAccessEnabled(false) + .build()) + .build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tempB.getBucket(); + assertEquals( + bucket.getIamConfiguration().getPublicAccessPrevention(), + PublicAccessPrevention.INHERITED); + assertFalse(bucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertFalse(bucket.getIamConfiguration().isBucketPolicyOnlyEnabled()); + + IamConfiguration iamConfiguration1 = + bucket.getIamConfiguration().toBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build(); + // Updating UBLA should not affect PAP setting. + Bucket bucket2 = + storage.update( + bucket.toBuilder() + .setIamConfiguration(iamConfiguration1) + // clear out ACL related config in conjunction with enabling UBLA + .setAcl(Collections.emptyList()) + .setDefaultAcl(Collections.emptyList()) + .build(), + BucketTargetOption.metagenerationMatch()); + assertEquals( + bucket2.getIamConfiguration().getPublicAccessPrevention(), + PublicAccessPrevention.INHERITED); + assertTrue(bucket2.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertTrue(bucket2.getIamConfiguration().isBucketPolicyOnlyEnabled()); + } + } + + @Test + public void testRetentionPolicyNoLock() throws Exception { + String bucketName = generator.randomBucketName(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(bucketName).setRetentionPeriod(RETENTION_PERIOD).build()) + .setStorage(storage) + .build()) { + BucketInfo remoteBucket = tempB.getBucket(); + + assertThat(remoteBucket.getRetentionPeriod()).isEqualTo(RETENTION_PERIOD); + assertThat(remoteBucket.getRetentionPeriodDuration()).isEqualTo(RETENTION_PERIOD_DURATION); + assertNotNull(remoteBucket.getRetentionEffectiveTime()); + assertThat(remoteBucket.retentionPolicyIsLocked()).isAnyOf(null, false); + + Bucket remoteBucket2 = + storage.get( + bucketName, + Storage.BucketGetOption.fields( + BucketField.RETENTION_POLICY, BucketField.METAGENERATION)); + assertEquals(RETENTION_PERIOD, remoteBucket2.getRetentionPeriod()); + assertThat(remoteBucket2.getRetentionPeriodDuration()).isEqualTo(RETENTION_PERIOD_DURATION); + assertNotNull(remoteBucket2.getRetentionEffectiveTime()); + assertThat(remoteBucket2.retentionPolicyIsLocked()).isAnyOf(null, false); + + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blobInfo); + assertNotNull(remoteBlob.getRetentionExpirationTime()); + + Bucket remoteBucket3 = + remoteBucket2.toBuilder() + .setRetentionPeriod(null) + .build() + .update(BucketTargetOption.metagenerationMatch()); + assertNull(remoteBucket3.getRetentionPeriod()); + } + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again.") + @SuppressWarnings({"unchecked", "deprecation"}) + public void testEnableAndDisableBucketPolicyOnlyOnExistingBucket() throws Exception { + String bpoBucket = generator.randomBucketName(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + Bucket.newBuilder(bpoBucket) + .setAcl(ImmutableList.of(Acl.of(User.ofAllAuthenticatedUsers(), Role.READER))) + .setDefaultAcl( + ImmutableList.of(Acl.of(User.ofAllAuthenticatedUsers(), Role.READER))) + .build()) + .setStorage(storage) + .build()) { + // BPO is disabled by default. + BucketInfo bucket = tempB.getBucket(); + assertThat(bucket.getIamConfiguration().isBucketPolicyOnlyEnabled()).isFalse(); + + BucketInfo.IamConfiguration bpoEnabledIamConfiguration = + BucketInfo.IamConfiguration.newBuilder().setIsBucketPolicyOnlyEnabled(true).build(); + storage.update( + bucket.toBuilder() + .setAcl(null) + .setDefaultAcl(null) + .setIamConfiguration(bpoEnabledIamConfiguration) + .build(), + BucketTargetOption.metagenerationMatch()); + + Bucket remoteBucket = storage.get(bpoBucket); + + assertTrue(remoteBucket.getIamConfiguration().isBucketPolicyOnlyEnabled()); + assertNotNull(remoteBucket.getIamConfiguration().getBucketPolicyOnlyLockedTime()); + + remoteBucket.toBuilder() + .setIamConfiguration( + bpoEnabledIamConfiguration.toBuilder().setIsBucketPolicyOnlyEnabled(false).build()) + .build() + .update(BucketTargetOption.metagenerationMatch()); + + remoteBucket = + storage.get( + bpoBucket, + Storage.BucketGetOption.fields( + BucketField.IAMCONFIGURATION, BucketField.ACL, BucketField.DEFAULT_OBJECT_ACL)); + + assertFalse(remoteBucket.getIamConfiguration().isBucketPolicyOnlyEnabled()); + assertEquals(User.ofAllAuthenticatedUsers(), remoteBucket.getDefaultAcl().get(0).getEntity()); + assertEquals(Role.READER, remoteBucket.getDefaultAcl().get(0).getRole()); + assertEquals(User.ofAllAuthenticatedUsers(), remoteBucket.getAcl().get(0).getEntity()); + assertEquals(Role.READER, remoteBucket.getAcl().get(0).getRole()); + } + } + + static ImmutableList dropEtags(List defaultAcls) { + return defaultAcls.stream() + .map(ITAccessTest::dropEtag) + .collect(ImmutableList.toImmutableList()); + } + + static Acl dropEtag(Acl acl) { + return Acl.of(acl.getEntity(), acl.getRole()); + } + + static Predicate hasRole(Acl.Role expected) { + return acl -> acl.getRole().equals(expected); + } + + static Predicate hasProjectRole(Acl.Project.ProjectRole expected) { + return acl -> { + Entity entity = acl.getEntity(); + if (entity.getType().equals(Entity.Type.PROJECT)) { + return ((Acl.Project) entity).getProjectRole().equals(expected); + } + return false; + }; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBatchTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBatchTest.java new file mode 100644 index 000000000000..b5948e719ebb --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBatchTest.java @@ -0,0 +1,270 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageBatchResult; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public class ITBatchTest { + private static final String CONTENT_TYPE = "text/plain"; + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private String bucketName; + + @Before + public void setUp() throws Exception { + bucketName = bucket.getName(); + } + + @Test + public void testBatchRequest() { + String sourceBlobName1 = "test-batch-request-blob-1"; + String sourceBlobName2 = "test-batch-request-blob-2"; + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucketName, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucketName, sourceBlobName2).build(); + assertNotNull(storage.create(sourceBlob1)); + assertNotNull(storage.create(sourceBlob2)); + + // Batch update request + BlobInfo updatedBlob1 = sourceBlob1.toBuilder().setContentType(CONTENT_TYPE).build(); + BlobInfo updatedBlob2 = sourceBlob2.toBuilder().setContentType(CONTENT_TYPE).build(); + StorageBatch updateBatch = storage.batch(); + StorageBatchResult updateResult1 = updateBatch.update(updatedBlob1); + StorageBatchResult updateResult2 = updateBatch.update(updatedBlob2); + updateBatch.submit(); + Blob remoteUpdatedBlob1 = updateResult1.get(); + Blob remoteUpdatedBlob2 = updateResult2.get(); + assertEquals(sourceBlob1.getBucket(), remoteUpdatedBlob1.getBucket()); + assertEquals(sourceBlob1.getName(), remoteUpdatedBlob1.getName()); + assertEquals(sourceBlob2.getBucket(), remoteUpdatedBlob2.getBucket()); + assertEquals(sourceBlob2.getName(), remoteUpdatedBlob2.getName()); + assertEquals(updatedBlob1.getContentType(), remoteUpdatedBlob1.getContentType()); + assertEquals(updatedBlob2.getContentType(), remoteUpdatedBlob2.getContentType()); + + // Batch get request + StorageBatch getBatch = storage.batch(); + StorageBatchResult getResult1 = getBatch.get(bucketName, sourceBlobName1); + StorageBatchResult getResult2 = getBatch.get(bucketName, sourceBlobName2); + getBatch.submit(); + Blob remoteBlob1 = getResult1.get(); + Blob remoteBlob2 = getResult2.get(); + assertEquals(remoteUpdatedBlob1, remoteBlob1); + assertEquals(remoteUpdatedBlob2, remoteBlob2); + + // Batch delete request + StorageBatch deleteBatch = storage.batch(); + StorageBatchResult deleteResult1 = deleteBatch.delete(bucketName, sourceBlobName1); + StorageBatchResult deleteResult2 = deleteBatch.delete(bucketName, sourceBlobName2); + deleteBatch.submit(); + assertTrue(deleteResult1.get()); + assertTrue(deleteResult2.get()); + } + + @Test + public void testBatchRequestManyOperations() throws Exception { + // define some object ids for use in the batch operations + BlobId id1 = BlobId.of(bucketName, generator.randomObjectName()); + BlobId id2 = BlobId.of(bucketName, generator.randomObjectName()); + BlobId id3 = BlobId.of(bucketName, generator.randomObjectName()); + BlobId id4 = BlobId.of(bucketName, generator.randomObjectName()); + BlobId id5 = BlobId.of(bucketName, generator.randomObjectName()); + + ImmutableMap ka = ImmutableMap.of("k", "a"); + ImmutableMap kB = ImmutableMap.of("k", "B"); + + // Create objects which exist before the batch operations + BlobInfo info1 = BlobInfo.newBuilder(id1).setMetadata(ka).build(); + BlobInfo info2 = BlobInfo.newBuilder(id2).setMetadata(ka).build(); + BlobInfo info3 = BlobInfo.newBuilder(id3).setMetadata(ka).build(); + Blob obj1 = storage.create(info1, BlobTargetOption.doesNotExist()); + Blob obj2 = storage.create(info2, BlobTargetOption.doesNotExist()); + Blob obj3 = storage.create(info3, BlobTargetOption.doesNotExist()); + + // Define our batch operations + StorageBatch batch = storage.batch(); + + StorageBatchResult get1Success = batch.get(id1); + StorageBatchResult update2Success = + batch.update( + obj2.toBuilder().setMetadata(kB).build(), BlobTargetOption.metagenerationMatch()); + StorageBatchResult delete3Success = batch.delete(id3); + StorageBatchResult get4Error = batch.get(id4); + StorageBatchResult delete5Error = batch.delete(id5); + + // submit the batch + batch.submit(); + + // verify our expected results + assertAll( + () -> { + Blob blob = get1Success.get(); + assertThat(blob.getBucket()).isEqualTo(bucketName); + assertThat(blob.getName()).isEqualTo(id1.getName()); + assertThat(blob.getMetadata()).isEqualTo(ka); + }, + () -> { + Blob blob = update2Success.get(); + assertThat(blob.getBucket()).isEqualTo(bucketName); + assertThat(blob.getName()).isEqualTo(id2.getName()); + assertThat(blob.getMetadata()).isEqualTo(kB); + }, + () -> assertThat(delete3Success.get()).isTrue(), + () -> assertThat(get4Error.get()).isNull(), + () -> assertThat(delete5Error.get()).isFalse()); + } + + @Test + public void testBatchRequestFail() { + String blobName = "test-batch-request-blob-fail"; + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + BlobInfo updatedBlob = BlobInfo.newBuilder(bucketName, blobName, -1L).build(); + StorageBatch batch = storage.batch(); + StorageBatchResult updateResult = + batch.update(updatedBlob, Storage.BlobTargetOption.generationMatch()); + StorageBatchResult deleteResult1 = + batch.delete(bucketName, blobName, Storage.BlobSourceOption.generationMatch(-1L)); + StorageBatchResult deleteResult2 = batch.delete(BlobId.of(bucketName, blobName, -1L)); + StorageBatchResult getResult1 = + batch.get(bucketName, blobName, Storage.BlobGetOption.generationMatch(-1L)); + StorageBatchResult getResult2 = batch.get(BlobId.of(bucketName, blobName, -1L)); + batch.submit(); + try { + updateResult.get(); + fail("Expected StorageException"); + } catch (StorageException ex) { + // expected + } + try { + deleteResult1.get(); + fail("Expected StorageException"); + } catch (StorageException ex) { + // expected + } + try { + deleteResult2.get(); + fail("Expected an 'Invalid argument' exception"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("Invalid argument"); + } + try { + getResult1.get(); + fail("Expected StorageException"); + } catch (StorageException ex) { + // expected + } + try { + getResult2.get(); + fail("Expected an 'Invalid argument' exception"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("Invalid argument"); + } + } + + @Test + public void batchSuccessiveUpdatesWork() { + byte[] bytes = DataGenerator.base64Characters().genBytes(137); + + List blobs = + IntStream.range(0, 2) + .mapToObj( + i -> { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (WriteChannel writer = storage.writer(info, BlobWriteOption.doesNotExist())) { + writer.write(ByteBuffer.wrap(bytes)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return info.getBlobId(); + }) + .collect(Collectors.toList()); + + OffsetDateTime now1 = + Clock.systemUTC().instant().atOffset(ZoneOffset.UTC).truncatedTo(ChronoUnit.MILLIS); + + List update1 = + storage.update( + blobs.stream() + .map(id -> BlobInfo.newBuilder(id).setCustomTimeOffsetDateTime(now1).build()) + .collect(Collectors.toList())); + + OffsetDateTime now2 = + Clock.systemUTC().instant().atOffset(ZoneOffset.UTC).truncatedTo(ChronoUnit.MILLIS); + List update2 = + storage.update( + blobs.stream() + .map(id -> BlobInfo.newBuilder(id).setCustomTimeOffsetDateTime(now2).build()) + .collect(Collectors.toList())); + + assertThat( + update2.stream() + .filter( + b -> + !now2.equals( + b.getCustomTimeOffsetDateTime().truncatedTo(ChronoUnit.MILLIS))) + .map(BlobInfo::getBlobId) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())) + .isEmpty(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java new file mode 100644 index 000000000000..cd6812087f15 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java @@ -0,0 +1,659 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.CrossRun.Exclude; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture.ObjectAndContent; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.BaseEncoding; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Locale; +import java.util.zip.GZIPInputStream; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = Backend.PROD) +public final class ITBlobReadChannelTest { + + private static final int _16MiB = 16 * 1024 * 1024; + private static final int _256KiB = 256 * 1024; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + private static final byte[] COMPRESSED_CONTENT = + BaseEncoding.base64() + .decode("H4sIAAAAAAAAAPNIzcnJV3DPz0/PSVVwzskvTVEILskvSkxPVQQA/LySchsAAAA="); + + @Rule public final TemporaryFolder tmp = new TemporaryFolder(); + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public ObjectsFixture objectsFixture; + @Inject public Generator generator; + + @Test + public void testLimit_smallerThanOneChunk() throws IOException { + int srcContentSize = _256KiB; + int rangeBegin = 57; + int rangeEnd = 2384; + int chunkSize = _16MiB; + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + + @Test + public void testLimit_noSeek() throws IOException { + int srcContentSize = 16; + int rangeBegin = 0; + int rangeEnd = 10; + int chunkSize = _256KiB; + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + + @Test + public void testLimit_pastEndOfBlob() throws IOException { + int srcContentSize = _256KiB; + int rangeBegin = _256KiB - 20; + int rangeEnd = _256KiB + 20; + int chunkSize = _16MiB; + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + + @Test + public void testLimit_endBeforeBegin() throws IOException { + int srcContentSize = _256KiB; + int rangeBegin = 4; + int rangeEnd = 3; + int chunkSize = _16MiB; + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + + @Test + public void testLimit_largerThanOneChunk() throws IOException { + int srcContentSize = _16MiB + (_256KiB * 3); + int rangeBegin = 384; + int rangeEnd = rangeBegin + _16MiB; + int chunkSize = _16MiB; + + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + + @Test + public void testLimit_downloadToFile() throws IOException { + String blobName = String.format(Locale.US, "%s/src", generator.randomObjectName()); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + ByteBuffer content = DataGenerator.base64Characters().genByteBuffer(108); + try (WriteChannel writer = storage.writer(BlobInfo.newBuilder(blobId).build())) { + writer.write(content); + } + + File file = tmp.newFile(); + String destFileName = file.getAbsolutePath(); + byte[] expectedBytes = new byte[37 - 14]; + ByteBuffer duplicate = content.duplicate(); + duplicate.position(14); + duplicate.limit(37); + duplicate.get(expectedBytes); + String xxdExpected = xxd(expectedBytes); + + try { + Path path = Paths.get(destFileName); + try (ReadChannel from = storage.reader(blobId); + FileChannel to = FileChannel.open(path, StandardOpenOption.WRITE)) { + from.seek(14); + from.limit(37); + + ByteStreams.copy(from, to); + } + + byte[] readBytes = Files.readAllBytes(path); + String xxdActual = xxd(readBytes); + assertThat(xxdActual).isEqualTo(xxdExpected); + } finally { + file.delete(); + } + } + + @Test + public void + testReadChannel_preconditionFailureResultsInIOException_metagenerationMatch_specified() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + try (ReadChannel reader = + storage.reader(blob.getBlobId(), BlobSourceOption.metagenerationMatch(-1L))) { + reader.read(ByteBuffer.allocate(42)); + fail("IOException was expected"); + } catch (IOException ex) { + // expected + } + } + + @Test + public void testReadChannel_preconditionFailureResultsInIOException_generationMatch_specified() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + try (ReadChannel reader = + storage.reader(blob.getBlobId(), BlobSourceOption.generationMatch(-1L))) { + reader.read(ByteBuffer.allocate(42)); + fail("IOException was expected"); + } catch (IOException ex) { + // expected + } + } + + @Test + public void testReadChannel_preconditionFailureResultsInIOException_generationMatch_extractor() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + BlobId blobIdWrongGeneration = BlobId.of(bucket.getName(), blobName, -1L); + try (ReadChannel reader = + storage.reader(blobIdWrongGeneration, BlobSourceOption.generationMatch())) { + reader.read(ByteBuffer.allocate(42)); + fail("IOException was expected"); + } catch (IOException ex) { + // expected + } + } + + @Test + public void ensureReaderReturnsCompressedBytesByDefault() throws IOException { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = + BlobInfo.newBuilder(bucket, blobName) + .setContentType("text/plain") + .setContentEncoding("gzip") + .build(); + Blob blob = storage.create(blobInfo, COMPRESSED_CONTENT); + try (ByteArrayOutputStream output = new ByteArrayOutputStream()) { + try (ReadChannel reader = storage.reader(BlobId.of(bucket.getName(), blobName))) { + reader.setChunkSize(8); + ByteStreams.copy(reader, Channels.newChannel(output)); + } + assertArrayEquals( + BLOB_STRING_CONTENT.getBytes(UTF_8), + storage.readAllBytes( + bucket.getName(), blobName, BlobSourceOption.shouldReturnRawInputStream(false))); + assertArrayEquals(COMPRESSED_CONTENT, output.toByteArray()); + try (GZIPInputStream zipInput = + new GZIPInputStream(new ByteArrayInputStream(output.toByteArray()))) { + assertArrayEquals(BLOB_STRING_CONTENT.getBytes(UTF_8), ByteStreams.toByteArray(zipInput)); + } + } + } + + @Test + public void ensureReaderCanAutoDecompressWhenReturnRawInputStream_false() throws IOException { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = + BlobInfo.newBuilder(bucket, blobName) + .setContentType("text/plain") + .setContentEncoding("gzip") + .build(); + Blob blob = storage.create(blobInfo, COMPRESSED_CONTENT); + try (ByteArrayOutputStream output = new ByteArrayOutputStream()) { + try (ReadChannel reader = + storage.reader( + BlobId.of(bucket.getName(), blobName), + BlobSourceOption.shouldReturnRawInputStream(false))) { + reader.setChunkSize(8); + ByteStreams.copy(reader, Channels.newChannel(output)); + } + assertArrayEquals(BLOB_STRING_CONTENT.getBytes(UTF_8), output.toByteArray()); + } + } + + @Test + public void returnRawInputStream_true() throws IOException { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = + BlobInfo.newBuilder(bucket, blobName) + .setContentType("text/plain") + .setContentEncoding("gzip") + .build(); + Blob blob = storage.create(blobInfo, COMPRESSED_CONTENT); + try (ByteArrayOutputStream output = new ByteArrayOutputStream()) { + try (ReadChannel reader = + storage.reader( + BlobId.of(bucket.getName(), blobName), + BlobSourceOption.shouldReturnRawInputStream(true))) { + reader.setChunkSize(8); + ByteStreams.copy(reader, Channels.newChannel(output)); + } + assertArrayEquals( + BLOB_STRING_CONTENT.getBytes(UTF_8), + storage.readAllBytes( + bucket.getName(), blobName, BlobSourceOption.shouldReturnRawInputStream(false))); + assertArrayEquals(COMPRESSED_CONTENT, output.toByteArray()); + try (GZIPInputStream zipInput = + new GZIPInputStream(new ByteArrayInputStream(output.toByteArray()))) { + assertArrayEquals(BLOB_STRING_CONTENT.getBytes(UTF_8), ByteStreams.toByteArray(zipInput)); + } + } + } + + @Test + @Exclude(transports = Transport.GRPC) + public void channelIsConsideredOpenUponConstruction() { + ReadChannel reader = storage.reader(objectsFixture.getInfo1().getBlobId()); + assertThat(reader.isOpen()).isTrue(); + reader.close(); + } + + @Test + public void optionsWork() { + byte[] bytes1 = "A".getBytes(UTF_8); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info, bytes1, BlobTargetOption.doesNotExist()); + + // attempt to read generation=1 && ifGenerationNotMatch=1 + try (ReadChannel r = + storage.reader( + gen1.getBlobId(), BlobSourceOption.generationNotMatch(gen1.getGeneration()))) { + r.read(ByteBuffer.allocate(1)); + } catch (IOException e) { + assertThat(e).hasCauseThat().isInstanceOf(StorageException.class); + StorageException se = (StorageException) e.getCause(); + // b/261214971 for differing response code + assertThat(se.getCode()).isAnyOf(/*json*/ 304, /*grpc*/ 412); + } + } + + @Test + @Exclude(transports = Transport.GRPC) + public void captureAndRestore_position_Limit() throws IOException { + captureAndRestoreTest(26, 51); + } + + @Test + @Exclude(transports = Transport.GRPC) + public void captureAndRestore_position_noLimit() throws IOException { + captureAndRestoreTest(26, null); + } + + @Test + @Exclude(transports = Transport.GRPC) + public void captureAndRestore_noPosition_limit() throws IOException { + captureAndRestoreTest(null, 51); + } + + @Test + @Exclude(transports = Transport.GRPC) + public void captureAndRestore_noPosition_noLimit() throws IOException { + captureAndRestoreTest(null, null); + } + + @Test + public void seekAfterReadWorks() throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + byte[] expected1 = Arrays.copyOfRange(bytes, 0, 4); + byte[] expected2 = Arrays.copyOfRange(bytes, 8, 13); + + String xxdExpected1 = xxd(expected1); + String xxdExpected2 = xxd(expected2); + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + // read some bytes + byte[] bytes1 = new byte[expected1.length]; + reader.read(ByteBuffer.wrap(bytes1)); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + + // seek forward to a new offset + reader.seek(8); + + // read again + byte[] bytes2 = new byte[expected2.length]; + reader.read(ByteBuffer.wrap(bytes2)); + String xxd2 = xxd(bytes2); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + + @Test + public void seekBackToStartAfterReachingEndOfObjectWorks() throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + int from = bytes.length - 5; + byte[] expected1 = Arrays.copyOfRange(bytes, from, bytes.length); + + String xxdExpected1 = xxd(expected1); + String xxdExpected2 = xxd(bytes); + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + // seek forward to a new offset + reader.seek(from); + + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel out = Channels.newChannel(baos)) { + ByteStreams.copy(reader, out); + String xxd = xxd(baos.toByteArray()); + assertThat(xxd).isEqualTo(xxdExpected1); + } + + // seek back to the beginning + reader.seek(0); + // read again + try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel out = Channels.newChannel(baos)) { + ByteStreams.copy(reader, out); + String xxd = xxd(baos.toByteArray()); + assertThat(xxd).isEqualTo(xxdExpected2); + } + } + } + + @Test + public void limitAfterReadWorks() throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + byte[] expected1 = Arrays.copyOfRange(bytes, 0, 4); + byte[] expected2 = Arrays.copyOfRange(bytes, 4, 10); + + String xxdExpected1 = xxd(expected1); + String xxdExpected2 = xxd(expected2); + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + // read some bytes + byte[] bytes1 = new byte[expected1.length]; + reader.read(ByteBuffer.wrap(bytes1)); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + + // change the limit + reader.limit(10); + + // read again + byte[] bytes2 = new byte[expected2.length]; + reader.read(ByteBuffer.wrap(bytes2)); + String xxd2 = xxd(bytes2); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + + @Test + public void readingLastByteReturnsOneByte_seekOnly() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + byte[] expected1 = Arrays.copyOfRange(bytes, 9, 10); + String xxdExpected1 = xxd(expected1); + try (ReadChannel reader = storage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.seek(length - 1); + ByteStreams.copy(reader, writer); + byte[] bytes1 = baos.toByteArray(); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + } + } + + @Test + public void readingLastByteReturnsOneByte_seekOnly_negativeOffset() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + byte[] expected1 = Arrays.copyOfRange(bytes, 9, 10); + String xxdExpected1 = xxd(expected1); + try (ReadChannel reader = storage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.seek(-1); + ByteStreams.copy(reader, writer); + byte[] bytes1 = baos.toByteArray(); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + } + } + + @Test + public void readingLastByteReturnsOneByte_seekAndLimit() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + byte[] expected1 = Arrays.copyOfRange(bytes, 9, 10); + String xxdExpected1 = xxd(expected1); + try (ReadChannel reader = storage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.seek(length - 1); + reader.limit(length); + ByteStreams.copy(reader, writer); + byte[] bytes1 = baos.toByteArray(); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + } + } + + /** + * This is specifically in place for compatibility with BlobReadChannelV1. + * + *

This is behavior is a bug, and should be fixed at the next major version + */ + @Test + @Exclude(transports = Transport.GRPC) + public void responseWith416ReturnsZeroAndLeavesTheChannelOpen() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + reader.seek(length); + ByteBuffer buf = ByteBuffer.allocate(1); + int read = reader.read(buf); + assertThat(read).isEqualTo(-1); + assertThat(reader.isOpen()).isTrue(); + int read2 = reader.read(buf); + assertThat(read2).isEqualTo(-1); + } + } + + @Test + public void responseWith416AttemptingToReadStartingPastTheEndOfTheObjectIsTerminallyEOF() + throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = + BlobInfo.newBuilder(bucket, generator.randomObjectName()) + .setMetadata(ImmutableMap.of("gen", "1")) + .build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + reader.seek(length + 1); + ByteBuffer buf = ByteBuffer.allocate(1); + assertThat(reader.read(buf)).isEqualTo(-1); + assertThat(reader.read(buf)).isEqualTo(-1); + + BlobInfo update = gen1.toBuilder().setMetadata(ImmutableMap.of("gen", "2")).build(); + BlobInfo gen2 = + storage.create( + update, + DataGenerator.base64Characters().genBytes(length + 2), + BlobTargetOption.generationMatch()); + + assertThat(reader.read(buf)).isEqualTo(-1); + assertThat(reader.read(buf)).isEqualTo(-1); + } + } + + /** Read channel does not consider itself closed once it returns {@code -1} from read. */ + @Test + public void readChannelIsAlwaysOpen_willReturnNegative1UntilExplicitlyClosed() throws Exception { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + ByteBuffer buf = ByteBuffer.allocate(length * 2); + int read = reader.read(buf); + assertAll( + () -> assertThat(read).isEqualTo(length), () -> assertThat(reader.isOpen()).isTrue()); + int read2 = reader.read(buf); + assertAll(() -> assertThat(read2).isEqualTo(-1), () -> assertThat(reader.isOpen()).isTrue()); + int read3 = reader.read(buf); + assertAll(() -> assertThat(read3).isEqualTo(-1), () -> assertThat(reader.isOpen()).isTrue()); + reader.close(); + assertThrows(ClosedChannelException.class, () -> reader.read(buf)); + } + } + + private void captureAndRestoreTest(@Nullable Integer position, @Nullable Integer endOffset) + throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + String xxdExpected1; + String xxdExpected2; + { + int begin = position != null ? position : 0; + int end = endOffset != null ? endOffset : bytes.length; + byte[] expected1 = Arrays.copyOfRange(bytes, begin, begin + 10); + byte[] expected2 = Arrays.copyOfRange(bytes, begin, end); + xxdExpected1 = xxd(expected1); + xxdExpected2 = xxd(expected2); + } + + ReadChannel reader = storage.reader(gen1.getBlobId()); + if (position != null) { + reader.seek(position); + } + if (endOffset != null) { + reader.limit(endOffset); + } + + ByteBuffer buf = ByteBuffer.allocate(bytes.length); + buf.limit(10); + + int read1 = reader.read(buf); + assertThat(read1).isEqualTo(10); + String xxd1 = xxd(buf); + assertThat(xxd1).isEqualTo(xxdExpected1); + buf.limit(buf.capacity()); + + RestorableState capture = reader.capture(); + reader.close(); + + try (ReadChannel restore = capture.restore()) { + restore.read(buf); + String xxd2 = xxd(buf); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + + private void doLimitTest(int srcContentSize, int rangeBegin, int rangeEnd, int chunkSize) + throws IOException { + String blobName = String.format(Locale.US, "%s/src", generator.randomObjectName()); + BlobInfo src = BlobInfo.newBuilder(bucket, blobName).build(); + ByteBuffer content = DataGenerator.base64Characters().genByteBuffer(srcContentSize); + ByteBuffer dup = content.duplicate(); + dup.position(rangeBegin); + int newLimit = Math.min(dup.capacity(), rangeEnd); + dup.limit(newLimit); + byte[] expectedSubContent = new byte[dup.remaining()]; + dup.get(expectedSubContent); + + try (WriteChannel writer = storage.writer(src)) { + writer.write(content); + } + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = storage.reader(src.getBlobId()); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.setChunkSize(chunkSize); + if (rangeBegin > 0) { + reader.seek(rangeBegin); + } + reader.limit(rangeEnd); + ByteStreams.copy(reader, writer); + } + + byte[] actual = baos.toByteArray(); + assertThat(xxd(actual)).isEqualTo(xxd(expectedSubContent)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java new file mode 100644 index 000000000000..1526d9fa983e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java @@ -0,0 +1,252 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.slice; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.http.HttpRequest; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.TestBench; +import com.google.cloud.storage.it.runner.registry.TestBench.RetryTestResource; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.ByteStreams; +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.Random; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.TEST_BENCH) +public final class ITBlobReadChannelV2RetryTest { + + private static final int _512KiB = 512 * 1024; + + @Inject public TestBench testBench; + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void generationIsLockedForRetries() throws Exception { + + StorageOptions baseOptions = storage.getOptions(); + byte[] bytes = DataGenerator.base64Characters().genBytes(_512KiB); + + BlobId id = BlobId.of(bucket.getName(), generator.randomObjectName()); + Blob gen1 = + storage.create(BlobInfo.newBuilder(id).build(), bytes, BlobTargetOption.doesNotExist()); + + byte[] slice1 = slice(bytes, 0, 10); + byte[] slice2 = slice(bytes, 10, bytes.length); + + String xxdExpected1 = xxd(slice1); + String xxdExpected2 = xxd(slice2); + + JsonObject instructions = new JsonObject(); + JsonArray value = new JsonArray(); + value.add("return-broken-stream-after-256K"); + instructions.add("storage.objects.get", value); + RetryTestResource retryTestResource = new RetryTestResource(instructions); + RetryTestResource retryTest = testBench.createRetryTest(retryTestResource); + + ImmutableMap headers = ImmutableMap.of("x-retry-test-id", retryTest.id); + + RequestAuditing requestAuditing = new RequestAuditing(); + StorageOptions testStorageOptions = + baseOptions.toBuilder() + .setTransportOptions(requestAuditing) + .setHeaderProvider(FixedHeaderProvider.create(headers)) + .build(); + + ByteBuffer buf1 = ByteBuffer.allocate(10); + ByteBuffer buf2 = ByteBuffer.allocate(_512KiB); + try (Storage testStorage = testStorageOptions.getService(); + // explicitly use id rather than gen1, we want to start the read without the generation + // present + ReadChannel r = testStorage.reader(id)) { + r.setChunkSize(16); + // perform a read to open the first socket against gen1 + // This should leave the socket open with bytes left to read since we've set our 'chunkSize' + // to 16, we won't read far enough into the object yet to trigger the + // 'broken-stream-after-256K'. + r.read(buf1); + String xxd1 = xxd(buf1); + assertThat(xxd1).isEqualTo(xxdExpected1); + // verify no generation was passed + requestAuditing.assertQueryParam("generation", ImmutableList.of()); + + // now that the socket is open, modify the object so that it will get a new generation + Blob gen2 = + storage.create( + gen1, "A".getBytes(StandardCharsets.UTF_8), BlobTargetOption.generationMatch()); + + // Now try and read the rest of the object. + // after reaching the 256Kth byte the stream should break causing the ReadChannel to try and + // resume the download + r.read(buf2); + String xxd2 = xxd(buf2); + assertThat(xxd2).isEqualTo(xxdExpected2); + requestAuditing.assertQueryParam("generation", gen1.getGeneration(), Long::new); + } + } + + @Test + public void restartingAStreamForGzipContentIsAtTheCorrectOffset() throws Exception { + + StorageOptions baseOptions = storage.getOptions(); + Random rand = new Random(918273645); + + ChecksummedTestContent uncompressed; + ChecksummedTestContent gzipped; + { + // must use random strategy, base64 characters compress too well. 512KiB uncompressed becomes + // ~1600 bytes which is smaller than our 'return-broken-stream-after-256K' rule + byte[] bytes = DataGenerator.rand(rand).genBytes(_512KiB); + uncompressed = ChecksummedTestContent.of(bytes); + gzipped = ChecksummedTestContent.of(TestUtils.gzipBytes(bytes)); + } + BlobId id = BlobId.of(bucket.getName(), generator.randomObjectName()); + BlobInfo info = + BlobInfo.newBuilder(id) + .setCrc32c(gzipped.getCrc32cBase64()) + .setContentType("application/vnd.j.bytes") + .setContentEncoding("gzip") + .build(); + Blob gen1 = storage.create(info, gzipped.getBytes(), BlobTargetOption.doesNotExist()); + + JsonObject instructions = new JsonObject(); + JsonArray value = new JsonArray(); + value.add("return-broken-stream-after-256K"); + instructions.add("storage.objects.get", value); + RetryTestResource retryTestResource = new RetryTestResource(instructions); + RetryTestResource retryTest = testBench.createRetryTest(retryTestResource); + + ImmutableMap headers = ImmutableMap.of("x-retry-test-id", retryTest.id); + + RequestAuditing requestAuditing = new RequestAuditing(); + StorageOptions testStorageOptions = + baseOptions.toBuilder() + .setTransportOptions(requestAuditing) + .setHeaderProvider(FixedHeaderProvider.create(headers)) + .build(); + + String expected = xxd(uncompressed.getBytes()); + + // explicitly set reader to decompress + BlobSourceOption option = BlobSourceOption.shouldReturnRawInputStream(false); + try (Storage testStorage = testStorageOptions.getService(); + ReadChannel r = testStorage.reader(gen1.getBlobId(), option); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel w = Channels.newChannel(baos)) { + long copy = ByteStreams.copy(r, w); + String actual = xxd(baos.toByteArray()); + ImmutableList requests = requestAuditing.getRequests(); + assertAll( + () -> assertThat(copy).isEqualTo(uncompressed.getBytes().length), + () -> assertThat(actual).isEqualTo(expected), + () -> assertThat(requests.get(0).getHeaders().get("range")).isNull(), + () -> + assertThat(requests.get(1).getHeaders().get("range")) + .isEqualTo(ImmutableList.of(String.format(Locale.US, "bytes=%d-", 256 * 1024)))); + } + } + + @Test + public void resumeFromCorrectOffsetWhenPartialReadSuccess() throws Exception { + StorageOptions baseOptions = storage.getOptions(); + Random rand = new Random(918273645); + + ChecksummedTestContent uncompressed; + { + // must use random strategy, base64 characters compress too well. 512KiB uncompressed becomes + // ~1600 bytes which is smaller than our 'return-broken-stream-after-256K' rule + byte[] bytes = DataGenerator.rand(rand).genBytes(_512KiB); + // byte[] bytes = DataGenerator.base64Characters().genBytes(_512KiB); + uncompressed = ChecksummedTestContent.of(bytes); + } + BlobId id = BlobId.of(bucket.getName(), generator.randomObjectName()); + BlobInfo info = BlobInfo.newBuilder(id).build(); + Blob gen1 = storage.create(info, uncompressed.getBytes(), BlobTargetOption.doesNotExist()); + + JsonObject instructions = new JsonObject(); + JsonArray value = new JsonArray(); + value.add("return-broken-stream-after-256K"); + instructions.add("storage.objects.get", value); + RetryTestResource retryTestResource = new RetryTestResource(instructions); + RetryTestResource retryTest = testBench.createRetryTest(retryTestResource); + + ImmutableMap headers = ImmutableMap.of("x-retry-test-id", retryTest.id); + + RequestAuditing requestAuditing = new RequestAuditing(); + StorageOptions testStorageOptions = + baseOptions.toBuilder() + .setTransportOptions(requestAuditing) + .setHeaderProvider(FixedHeaderProvider.create(headers)) + .build(); + + String expected = xxd(uncompressed.getBytes()); + + try (Storage testStorage = testStorageOptions.getService(); + ReadChannel r = testStorage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel w = Channels.newChannel(baos)) { + long copy = ByteStreams.copy(r, w); + String actual = xxd(baos.toByteArray()); + ImmutableList requests = requestAuditing.getRequests(); + assertAll( + () -> assertThat(copy).isEqualTo(uncompressed.getBytes().length), + () -> assertThat(actual).isEqualTo(expected), + () -> assertThat(requests.get(0).getHeaders().get("range")).isNull(), + () -> + assertThat(requests.get(1).getHeaders().get("range")) + .isEqualTo(ImmutableList.of(String.format(Locale.US, "bytes=%d-", 256 * 1024)))); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadMaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadMaskTest.java new file mode 100644 index 000000000000..5c76995a57d3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadMaskTest.java @@ -0,0 +1,218 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ITBlobReadMaskTest.BlobReadMaskTestParameters; +import com.google.cloud.storage.it.ReadMaskTestUtils.Args; +import com.google.cloud.storage.it.ReadMaskTestUtils.LazyAssertion; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@Parameterized(BlobReadMaskTestParameters.class) +@SingleBackend(Backend.PROD) +@ParallelFriendly +public final class ITBlobReadMaskTest { + + @Inject + @StorageFixture(Transport.HTTP) + public Storage sh; + + @Inject + @StorageFixture(Transport.GRPC) + public Storage sg; + + @Inject public BucketInfo bucket; + + @Inject public ObjectsFixture objectsFixture; + + @Parameter public Args args; + + private BlobField field; + private LazyAssertion assertion; + + @Before + public void setUp() throws Exception { + field = args.getField(); + assertion = args.getAssertion(); + } + + @Test + public void get() { + BlobInfo blobJson = getBlob(sh); + BlobInfo blobGrpc = getBlob(sg); + + assertion.validate(blobJson, blobGrpc); + } + + @Test + public void list() { + List blobsJson = listBlobs(sh); + List blobsGrpc = listBlobs(sg); + + assertion.pairwiseList().validate(blobsJson, blobsGrpc); + } + + private BlobInfo getBlob(Storage s) { + return s.get(objectsFixture.getInfo1().getBlobId(), BlobGetOption.fields(field)).asBlobInfo(); + } + + private List listBlobs(Storage s) { + Page p = + s.list( + bucket.getName(), + BlobListOption.prefix(ReadMaskTestUtils.class.getSimpleName()), + BlobListOption.fields(field)); + return StreamSupport.stream(p.iterateAll().spliterator(), false) + .map(Blob::asBlobInfo) + .collect(Collectors.toList()); + } + + public static final class BlobReadMaskTestParameters implements Parameterized.ParametersProvider { + + @Override + public ImmutableList parameters() { + ImmutableList> args = + ImmutableList.of( + new Args<>(BlobField.ACL, LazyAssertion.equal()), + new Args<>(BlobField.BUCKET, LazyAssertion.equal()), + new Args<>( + BlobField.CACHE_CONTROL, + LazyAssertion.apiaryNullGrpcDefault("", BlobInfo::getCacheControl)), + // for non-composed objects, json and grpc differ in their resulting values. For json, + // a null will be returned whereas for grpc we will get the type default value which + // in this case is 0. The only possible way we could guard against this would be if + // the proto changed component_count to proto3_optional forcing it to generate a + // hasComponentCount. + new Args<>( + BlobField.COMPONENT_COUNT, + (jsonT, grpcT) -> { + if (grpcT.getComponentCount() == 0) { + assertThat(jsonT.getComponentCount()).isNull(); + } else { + assertThat(grpcT.getComponentCount()).isEqualTo(jsonT.getComponentCount()); + } + }), + new Args<>( + BlobField.CONTENT_DISPOSITION, + LazyAssertion.apiaryNullGrpcDefault("", BlobInfo::getContentDisposition)), + new Args<>( + BlobField.CONTENT_ENCODING, + LazyAssertion.apiaryNullGrpcDefault("", BlobInfo::getContentEncoding)), + new Args<>( + BlobField.CONTENT_LANGUAGE, + LazyAssertion.apiaryNullGrpcDefault("", BlobInfo::getContentLanguage)), + // we'd expect this to follow the patter of the other Content-* headers, however via + // the json api GCS will default null contentType to application/octet-stream. Note, + // however it doesn't carry this forward to composed objects so a composed object can + // have a null/empty content-type. + new Args<>( + BlobField.CONTENT_TYPE, + (jsonT, grpcT) -> { + assertThat(jsonT.getContentType()).isAnyOf("application/octet-stream", null); + assertThat(grpcT.getContentType()).isAnyOf("application/octet-stream", ""); + }), + new Args<>(BlobField.CRC32C, LazyAssertion.equal()), + new Args<>(BlobField.CUSTOMER_ENCRYPTION, LazyAssertion.equal()), + new Args<>(BlobField.CUSTOM_TIME, LazyAssertion.equal()), + new Args<>(BlobField.ETAG, LazyAssertion.equal()), + new Args<>( + BlobField.EVENT_BASED_HOLD, + LazyAssertion.apiaryNullGrpcDefault(false, BlobInfo::getEventBasedHold)), + new Args<>(BlobField.GENERATION, LazyAssertion.equal()), + new Args<>( + BlobField.ID, + (jsonT, grpcT) -> { + assertThat(jsonT.getGeneratedId()).isNotEmpty(); + assertThat(grpcT.getGeneratedId()).isNull(); + }), + new Args<>( + BlobField.KIND, + (jsonT, grpcT) -> { + // pass - we don't expose kind in the public surface + }), + new Args<>(BlobField.KMS_KEY_NAME, LazyAssertion.equal()), + new Args<>(BlobField.MD5HASH, LazyAssertion.equal()), + new Args<>( + BlobField.MEDIA_LINK, + (jsonT, grpcT) -> { + assertThat(jsonT.getMediaLink()).isNotEmpty(); + assertThat(grpcT.getMediaLink()).isNull(); + }), + new Args<>(BlobField.METADATA, LazyAssertion.equal()), + new Args<>(BlobField.METAGENERATION, LazyAssertion.equal()), + new Args<>(BlobField.NAME, LazyAssertion.equal()), + new Args<>(BlobField.OWNER, LazyAssertion.equal()), + new Args<>(BlobField.RETENTION_EXPIRATION_TIME, LazyAssertion.equal()), + new Args<>( + BlobField.SELF_LINK, + (jsonT, grpcT) -> { + assertThat(jsonT.getSelfLink()).isNotEmpty(); + assertThat(grpcT.getSelfLink()).isNull(); + }), + new Args<>(BlobField.SIZE, LazyAssertion.equal()), + new Args<>(BlobField.STORAGE_CLASS, LazyAssertion.equal()), + new Args<>( + BlobField.TEMPORARY_HOLD, + LazyAssertion.apiaryNullGrpcDefault(false, BlobInfo::getTemporaryHold)), + new Args<>(BlobField.TIME_CREATED, LazyAssertion.equal()), + new Args<>(BlobField.TIME_DELETED, LazyAssertion.equal()), + new Args<>(BlobField.TIME_STORAGE_CLASS_UPDATED, LazyAssertion.equal()), + new Args<>(BlobField.UPDATED, LazyAssertion.equal()), + new Args<>( + BlobField.RETENTION, + LazyAssertion.skip("TODO: jesse fill in buganizer bug here")), + new Args<>(BlobField.OBJECT_CONTEXTS, LazyAssertion.equal()), + new Args<>(BlobField.SOFT_DELETE_TIME, LazyAssertion.equal()), + new Args<>(BlobField.HARD_DELETE_TIME, LazyAssertion.equal())); + List argsDefined = + args.stream().map(Args::getField).map(Enum::name).sorted().collect(Collectors.toList()); + + List definedFields = + Arrays.stream(BlobField.values()).map(Enum::name).sorted().collect(Collectors.toList()); + + assertThat(argsDefined).containsExactlyElementsIn(definedFields); + return args; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteChannelTest.java new file mode 100644 index 000000000000..d910a68defe8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteChannelTest.java @@ -0,0 +1,250 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.client.json.JsonParser; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.conformance.storage.v1.InstructionList; +import com.google.cloud.conformance.storage.v1.Method; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.PackagePrivateMethodWorkarounds; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.TestBench; +import com.google.cloud.storage.it.runner.registry.TestBench.RetryTestResource; +import com.google.common.collect.ImmutableMap; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.Locale; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.TEST_BENCH) +public final class ITBlobWriteChannelTest { + + private static final String NOW_STRING; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + + static { + Instant now = Clock.systemUTC().instant(); + DateTimeFormatter formatter = + DateTimeFormatter.ISO_LOCAL_DATE_TIME.withZone(ZoneId.from(ZoneOffset.UTC)); + NOW_STRING = formatter.format(now); + } + + @Inject public TestBench testBench; + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + /** + * Test for unexpected EOF at the beginning of trying to read the json response. + * + *

The error of this case shows up as an IllegalArgumentException rather than a json parsing + * error which comes from {@link JsonParser}{@code #startParsing()} which fails to find a node to + * start parsing. + */ + @Test + public void testJsonEOF_0B() throws IOException { + int contentSize = 512 * 1024; + int cappedByteCount = 0; + + doJsonUnexpectedEOFTest(contentSize, cappedByteCount); + } + + /** Test for unexpected EOF 10 bytes into the json response */ + @Test + public void testJsonEOF_10B() throws IOException { + int contentSize = 512 * 1024; + int cappedByteCount = 10; + + doJsonUnexpectedEOFTest(contentSize, cappedByteCount); + } + + @Test + public void testWriteChannelExistingBlob() throws IOException { + HttpStorageOptions baseStorageOptions = + StorageOptions.http() + .setCredentials(NoCredentials.getInstance()) + .setHost(testBench.getBaseUri()) + .setProjectId("test-project-id") + .build(); + Storage storage = baseStorageOptions.getService(); + Instant now = Clock.systemUTC().instant(); + DateTimeFormatter formatter = + DateTimeFormatter.ISO_LOCAL_DATE_TIME.withZone(ZoneId.from(ZoneOffset.UTC)); + String nowString = formatter.format(now); + BucketInfo bucketInfo = BucketInfo.of(generator.randomBucketName()); + String blobPath = + String.format(Locale.US, "%s/%s/blob", generator.randomObjectName(), nowString); + BlobId blobId = BlobId.of(bucketInfo.getName(), blobPath); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + storage.create(bucketInfo); + storage.create(blobInfo); + byte[] stringBytes; + try (WriteChannel writer = storage.writer(blobInfo)) { + stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); + writer.write(ByteBuffer.wrap(stringBytes)); + } + assertArrayEquals(stringBytes, storage.readAllBytes(blobInfo.getBlobId())); + assertTrue(storage.delete(bucketInfo.getName(), blobInfo.getName())); + } + + @Test + public void changeChunkSizeAfterWrite() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + int _512KiB = 512 * 1024; + byte[] bytes = DataGenerator.base64Characters().genBytes(_512KiB + 13); + try (WriteChannel writer = storage.writer(info, BlobWriteOption.doesNotExist())) { + writer.setChunkSize(2 * 1024 * 1024); + writer.write(ByteBuffer.wrap(bytes, 0, _512KiB)); + assertThrows(IllegalStateException.class, () -> writer.setChunkSize(768 * 1024)); + } + } + + @Test + public void restoreProperlyPlumbsBeginOffset() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + int _256KiB = 256 * 1024; + + byte[] bytes1 = DataGenerator.base64Characters().genBytes(_256KiB); + byte[] bytes2 = DataGenerator.base64Characters().genBytes(73); + + int allLength = bytes1.length + bytes2.length; + byte[] expected = Arrays.copyOf(bytes1, allLength); + System.arraycopy(bytes2, 0, expected, bytes1.length, bytes2.length); + String xxdExpected = xxd(expected); + + RestorableState capture; + { + WriteChannel writer = storage.writer(info, BlobWriteOption.doesNotExist()); + writer.setChunkSize(_256KiB); + writer.write(ByteBuffer.wrap(bytes1)); + // explicitly do not close writer, it will finalize the session + capture = writer.capture(); + } + + assertThat(capture).isNotNull(); + WriteChannel restored = capture.restore(); + restored.write(ByteBuffer.wrap(bytes2)); + restored.close(); + + byte[] readAllBytes = storage.readAllBytes(info.getBlobId()); + assertThat(readAllBytes).hasLength(expected.length); + String xxdActual = xxd(readAllBytes); + assertThat(xxdActual).isEqualTo(xxdExpected); + } + + private void doJsonUnexpectedEOFTest(int contentSize, int cappedByteCount) throws IOException { + String blobPath = + String.format(Locale.US, "%s/%s/blob", generator.randomObjectName(), NOW_STRING); + + BucketInfo bucketInfo = BucketInfo.of(generator.randomBucketName()); + BlobInfo blobInfoGen0 = BlobInfo.newBuilder(bucketInfo, blobPath, 0L).build(); + + RetryTestResource retryTestResource = + RetryTestResource.newRetryTestResource( + Method.newBuilder().setName("storage.objects.insert").build(), + InstructionList.newBuilder() + .addInstructions( + String.format( + Locale.US, "return-broken-stream-final-chunk-after-%dB", cappedByteCount)) + .build(), + Transport.HTTP.name()); + RetryTestResource retryTest = testBench.createRetryTest(retryTestResource); + + StorageOptions baseOptions = + StorageOptions.http() + .setCredentials(NoCredentials.getInstance()) + .setHost(testBench.getBaseUri()) + .setProjectId("project-id") + .setHeaderProvider( + FixedHeaderProvider.create(ImmutableMap.of("x-retry-test-id", retryTest.id))) + .build(); + + Storage testStorage = baseOptions.getService(); + + testStorage.create(bucketInfo); + + ByteBuffer content = DataGenerator.base64Characters().genByteBuffer(contentSize); + // create a duplicate to preserve the initial offset and limit for assertion later + ByteBuffer expected = content.duplicate(); + + WriteChannel w = testStorage.writer(blobInfoGen0, BlobWriteOption.generationMatch()); + w.write(content); + w.close(); + + RetryTestResource postRunState = testBench.getRetryTest(retryTest); + assertTrue(postRunState.completed); + + Optional optionalStorageObject = + PackagePrivateMethodWorkarounds.maybeGetBlobInfoFunction().apply(w); + + assertThat(optionalStorageObject.isPresent()).isTrue(); + BlobInfo internalInfo = optionalStorageObject.get(); + assertThat(internalInfo.getName()).isEqualTo(blobInfoGen0.getName()); + + // construct a new blob id, without a generation, so we get the latest when we perform a get + BlobId blobIdGen1 = BlobId.of(internalInfo.getBucket(), internalInfo.getName()); + Blob blobGen2 = testStorage.get(blobIdGen1); + assertEquals(contentSize, (long) blobGen2.getSize()); + assertNotEquals(blobInfoGen0.getGeneration(), blobGen2.getGeneration()); + ByteArrayOutputStream actualData = new ByteArrayOutputStream(); + blobGen2.downloadTo(actualData); + ByteBuffer actual = ByteBuffer.wrap(actualData.toByteArray()); + assertEquals(expected, actual); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionCommonSemanticsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionCommonSemanticsTest.java new file mode 100644 index 000000000000..663a3b1de20d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionCommonSemanticsTest.java @@ -0,0 +1,270 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfig; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.BufferAllocationStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecorator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ITBlobWriteSessionCommonSemanticsTest.ParamsProvider; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +@Parameterized(ParamsProvider.class) +public final class ITBlobWriteSessionCommonSemanticsTest { + + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Inject public Storage injectedStorage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Parameter public Params params; + + private Storage storage; + + @Before + public void setUp() throws Exception { + Path tmpDir = temporaryFolder.newFolder().toPath(); + BlobWriteSessionConfig config = params.ctor.apply(tmpDir); + + StorageOptions originalOptions = injectedStorage.getOptions(); + StorageOptions newOptions = null; + try { + newOptions = originalOptions.toBuilder().setBlobWriteSessionConfig(config).build(); + } catch (IllegalArgumentException e) { + assertThat(e).hasMessageThat().contains("not compatible with this"); + assumeTrue(false); + } + storage = newOptions.getService(); + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @Test + public void closingAnOpenedSessionWithoutCallingWriteShouldMakeAnEmptyObject() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobWriteSession session = storage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + + WritableByteChannel open = session.open(); + open.close(); + BlobInfo gen1 = session.getResult().get(1, TimeUnit.SECONDS); + + // sometimes testbench will not define `.size = 0`, default it here if we get null + Long size = gen1.getSize(); + if (size == null) { + size = 0L; + } + assertThat(size).isEqualTo(0); + } + + @Test + public void attemptingToUseASessionWhichResultsInFailureShouldThrowAStorageException() { + // attempt to write to a bucket which we have not created + String badBucketName = bucket.getName() + "x"; + BlobInfo info = BlobInfo.newBuilder(badBucketName, generator.randomObjectName()).build(); + + BlobWriteSession session = storage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + StorageException se = + assertThrows( + StorageException.class, + () -> { + WritableByteChannel open = session.open(); + open.close(); + }); + + assertThat(se.getCode()).isEqualTo(404); + } + + @Test + public void callingOpenIsOnlyAllowedOnce() throws Exception { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobWriteSession session = storage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + + WritableByteChannel open = session.open(); + IllegalStateException se = assertThrows(IllegalStateException.class, session::open); + + assertAll(() -> assertThat(se.getMessage()).contains("already open")); + } + + @Test + public void getResultErrorsWhenTheSessionErrors() throws Exception { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + byte[] helloWorld = "Hello World".getBytes(StandardCharsets.UTF_8); + storage.create(info, helloWorld, BlobTargetOption.doesNotExist()); + + BlobWriteSession session = + storage.blobWriteSession( + info, + // this precondition will result in failure + BlobWriteOption.doesNotExist()); + + try (WritableByteChannel open = session.open()) { + open.write(ByteBuffer.wrap(helloWorld)); + } catch (StorageException se) { + assertThat(se.getCode()).isEqualTo(412); + } catch (IOException ioe) { + assertThat(ioe).hasCauseThat().isInstanceOf(StorageException.class); + StorageException se = (StorageException) ioe.getCause(); + assertThat(se.getCode()).isEqualTo(412); + } + + ExecutionException resultSe = + assertThrows(ExecutionException.class, () -> session.getResult().get(10, TimeUnit.SECONDS)); + + assertAll( + () -> assertThat(resultSe).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(((StorageException) resultSe.getCause()).getCode()).isEqualTo(412)); + } + + @Test + public void userProvidedCrc32cValueIsRespected() throws IOException { + assumeFalse("b/226975500", params.desc.startsWith("p")); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(5 * 1024 * 1024 + 17)); + ChecksummedTestContent missingAByte = testContent.slice(0, testContent.getBytes().length - 1); + + BlobInfo info = + BlobInfo.newBuilder(bucket, generator.randomObjectName()) + .setCrc32c(missingAByte.getCrc32cBase64()) + .build(); + BlobWriteSession session = + storage.blobWriteSession( + info, BlobWriteOption.crc32cMatch(), BlobWriteOption.doesNotExist()); + WritableByteChannel open = session.open(); + try { + open.write(ByteBuffer.wrap(testContent.getBytes())); + } finally { + StorageException se = assertThrows(StorageException.class, () -> open.close()); + assertThat(se.getCode()).isEqualTo(400); + } + } + + @Test + public void userProvidedMd5ValueIsRespected() throws IOException { + assumeFalse("b/226975500", params.desc.startsWith("p")); + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(5 * 1024 * 1024 + 17)); + ChecksummedTestContent missingAByte = testContent.slice(0, testContent.getBytes().length - 1); + + BlobInfo info = + BlobInfo.newBuilder(bucket, generator.randomObjectName()) + .setMd5(missingAByte.getMd5Base64()) + .build(); + BlobWriteSession session = + storage.blobWriteSession(info, BlobWriteOption.md5Match(), BlobWriteOption.doesNotExist()); + WritableByteChannel open = session.open(); + try { + open.write(ByteBuffer.wrap(testContent.getBytes())); + } finally { + StorageException se = assertThrows(StorageException.class, () -> open.close()); + assertThat(se.getCode()).isEqualTo(400); + } + } + + public static final class ParamsProvider implements ParametersProvider { + @Override + public ImmutableList parameters() { + final int _2MiB = 2 * 1024 * 1024; + final int _4MiB = 4 * 1024 * 1024; + return ImmutableList.of( + new Params("default", p -> BlobWriteSessionConfigs.getDefault()), + new Params("c!c.2MiB", p -> BlobWriteSessionConfigs.getDefault().withChunkSize(_2MiB)), + new Params("b!p.1", BlobWriteSessionConfigs::bufferToDiskThenUpload), + new Params("j!p.1", p -> BlobWriteSessionConfigs.journaling(ImmutableList.of(p))), + new Params( + "p!t.c&b.s*&p.4MiB&c.n&m.n", + p -> + BlobWriteSessionConfigs.parallelCompositeUpload() + .withExecutorSupplier(ExecutorSupplier.cachedPool()) + .withPartNamingStrategy(PartNamingStrategy.noPrefix()) + .withBufferAllocationStrategy(BufferAllocationStrategy.simple(_4MiB)) + .withPartCleanupStrategy(PartCleanupStrategy.never()) + .withPartMetadataFieldDecorator(PartMetadataFieldDecorator.noOp())), + new Params("d!c.2MiB", p -> BlobWriteSessionConfigs.bidiWrite().withBufferSize(_2MiB))); + } + } + + public interface ParamsCtor { + BlobWriteSessionConfig apply(Path p) throws IOException; + } + + public static final class Params { + private final String desc; + private final ParamsCtor ctor; + + public Params(String desc, ParamsCtor ctor) { + this.desc = desc; + this.ctor = ctor; + } + + @Override + public String toString() { + return desc; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionTest.java new file mode 100644 index 000000000000..adc3763a59e8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobWriteSessionTest.java @@ -0,0 +1,137 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.JournalingBlobWriteSessionConfig; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.util.concurrent.TimeUnit; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public final class ITBlobWriteSessionTest { + + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + + @Test + public void allDefaults() throws Exception { + doTest(storage); + } + + @Test + public void bufferToTempDirThenUpload() throws Exception { + Path path = temporaryFolder.newFolder().toPath(); + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bufferToDiskThenUpload(path)) + .build(); + try (Storage s = options.getService()) { + doTest(s); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void journalingNotSupportedByHttp() { + HttpStorageOptions.Builder builder = ((HttpStorageOptions) storage.getOptions()).toBuilder(); + + Path rootPath = temporaryFolder.getRoot().toPath(); + JournalingBlobWriteSessionConfig journaling = + BlobWriteSessionConfigs.journaling(ImmutableList.of(rootPath)); + + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, () -> builder.setBlobWriteSessionConfig(journaling)); + + assertThat(iae).hasMessageThat().contains("HTTP transport"); + } + + @Test + public void overrideDefaultBufferSize() throws Exception { + StorageOptions options = + (storage.getOptions()) + .toBuilder() + .setBlobWriteSessionConfig( + BlobWriteSessionConfigs.getDefault().withChunkSize(256 * 1024)) + .build(); + + try (Storage s = options.getService()) { + doTest(s); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void bidiTest() throws Exception { + StorageOptions options = + (storage.getOptions()) + .toBuilder().setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()).build(); + try (Storage s = options.getService()) { + doTest(s); + } + } + + private void doTest(Storage underTest) throws Exception { + BlobWriteSession sess = + underTest.blobWriteSession( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + BlobWriteOption.doesNotExist()); + + byte[] bytes = DataGenerator.base64Characters().genBytes(512 * 1024); + try (WritableByteChannel w = sess.open()) { + w.write(ByteBuffer.wrap(bytes)); + } + + BlobInfo gen1 = sess.getResult().get(10, TimeUnit.SECONDS); + + byte[] allBytes = storage.readAllBytes(gen1.getBlobId()); + + assertThat(allBytes).isEqualTo(bytes); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketAclTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketAclTest.java new file mode 100644 index 000000000000..47be90d8eae9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketAclTest.java @@ -0,0 +1,311 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.retry429s; +import static com.google.cloud.storage.it.ITAccessTest.dropEtags; +import static com.google.cloud.storage.it.ITAccessTest.hasProjectRole; +import static com.google.cloud.storage.it.ITAccessTest.hasRole; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +@ParallelFriendly +public final class ITBucketAclTest { + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + + @Test + public void bucket_acl_get() { + String bucketName = bucket.getName(); + // lookup an entity from the bucket which is known to exist + Bucket bucketWithAcls = storage.get(bucketName, BucketGetOption.fields(BucketField.ACL)); + + Acl actual = bucketWithAcls.getAcl().iterator().next(); + + Acl acl = retry429s(() -> storage.getAcl(bucketName, actual.getEntity()), storage); + + assertThat(acl).isEqualTo(actual); + } + + /** When a bucket does exist, but an acl for the specified entity is not defined return null */ + @Test + public void bucket_acl_get_notFoundReturnsNull() { + Acl acl = retry429s(() -> storage.getAcl(bucket.getName(), User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + /** When a bucket doesn't exist, return null for the acl value */ + @Test + public void bucket_acl_get_bucket404() { + Acl acl = retry429s(() -> storage.getAcl(bucket.getName() + "x", User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + @Test + public void bucket_acl_list() { + String bucketName = bucket.getName(); + // lookup an entity from the bucket which is known to exist + Bucket bucketWithAcls = storage.get(bucketName, BucketGetOption.fields(BucketField.ACL)); + + Acl actual = bucketWithAcls.getAcl().iterator().next(); + + List acls = retry429s(() -> storage.listAcls(bucketName), storage); + + assertThat(acls).contains(actual); + } + + @Test + public void bucket_acl_list_bucket404() { + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.listAcls(bucket.getName() + "x"), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void bucket_acl_create() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + Acl actual = retry429s(() -> storage.createAcl(bucket.getName(), readAll), storage); + + assertThat(actual.getEntity()).isEqualTo(readAll.getEntity()); + assertThat(actual.getRole()).isEqualTo(readAll.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = dropEtags(bucket.getAcl()); + List actualAcls = dropEtags(bucketUpdated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).contains(readAll); + } + } + + @Test + public void bucket_acl_create_bucket404() { + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.createAcl(bucket.getName() + "x", readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void bucket_acl_update() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + List acls = bucket.getAcl(); + assertThat(acls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.EDITORS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectEditorAsOwner = + acls.stream().filter(hasRole(Role.OWNER).and(isProjectEditor)).findFirst().get(); + + // lower the privileges of project editors to writer from owner + Entity entity = projectEditorAsOwner.getEntity(); + Acl projectEditorAsReader = Acl.of(entity, Role.READER); + + Acl actual = + retry429s(() -> storage.updateAcl(bucket.getName(), projectEditorAsReader), storage); + + assertThat(actual.getEntity()).isEqualTo(projectEditorAsReader.getEntity()); + assertThat(actual.getRole()).isEqualTo(projectEditorAsReader.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = + dropEtags( + bucket.getAcl().stream() + .filter(isProjectEditor.negate()) + .collect(Collectors.toList())); + List actualAcls = dropEtags(bucketUpdated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).doesNotContain(projectEditorAsOwner); + assertThat(actualAcls).contains(projectEditorAsReader); + } + } + + @Test + public void bucket_acl_update_bucket404() { + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.updateAcl(bucket.getName() + "x", readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + /** Update of an acl that doesn't exist should create it */ + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void bucket_acl_404_acl_update() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo mgen1 = tempB.getBucket(); + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + Acl actual = + // todo: json non-idempotent? + retry429s(() -> storage.updateAcl(mgen1.getName(), readAll), storage); + + assertThat(actual.getEntity()).isEqualTo(readAll.getEntity()); + assertThat(actual.getRole()).isEqualTo(readAll.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Bucket updated = + storage.get( + mgen1.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(updated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = dropEtags(mgen1.getAcl()); + List actualAcls = dropEtags(updated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).contains(readAll); + } + } + + @Test + public void bucket_acl_delete() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + List acls = bucket.getAcl(); + assertThat(acls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.VIEWERS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectViewerAsReader = + acls.stream().filter(hasRole(Role.READER).and(isProjectEditor)).findFirst().get(); + + Entity entity = projectViewerAsReader.getEntity(); + + boolean actual = retry429s(() -> storage.deleteAcl(bucket.getName(), entity), storage); + + assertThat(actual).isTrue(); + + Bucket bucketUpdated = + storage.get( + bucket.getName(), + BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertThat(bucketUpdated.getMetageneration()).isNotEqualTo(bucket.getMetageneration()); + + // etags change when deletes happen, drop before our comparison + List expectedAcls = + dropEtags( + bucket.getAcl().stream() + .filter(isProjectEditor.negate()) + .collect(Collectors.toList())); + List actualAcls = dropEtags(bucketUpdated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + Optional search = + actualAcls.stream().map(Acl::getEntity).filter(e -> e.equals(entity)).findAny(); + assertThat(search.isPresent()).isFalse(); + } + } + + @Test + public void bucket_acl_delete_bucket404() { + boolean actual = + retry429s(() -> storage.deleteAcl(bucket.getName() + "x", User.ofAllUsers()), storage); + + assertThat(actual).isEqualTo(false); + } + + @Test + public void bucket_acl_delete_noExistingAcl() throws Exception { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + boolean actual = + retry429s(() -> storage.deleteAcl(bucket.getName(), User.ofAllUsers()), storage); + + assertThat(actual).isEqualTo(false); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketIamPolicyTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketIamPolicyTest.java new file mode 100644 index 000000000000..ded7e96b8b93 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketIamPolicyTest.java @@ -0,0 +1,225 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.Binding; +import com.google.cloud.Condition; +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketSourceOption; +import com.google.cloud.storage.StorageRoles; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.util.List; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +@ParallelFriendly +public final class ITBucketIamPolicyTest { + @Inject public Storage storage; + + @Inject public BucketInfo bucketInfo; + + @Inject public Generator generator; + + private Identity projectOwner; + private Identity projectEditor; + private Identity projectViewer; + + @Before + public void setUp() throws Exception { + String projectId = storage.getOptions().getProjectId(); + projectOwner = Identity.projectOwner(projectId); + projectEditor = Identity.projectEditor(projectId); + projectViewer = Identity.projectViewer(projectId); + } + + /** + * In order to define an IAM Condition, policy version 3 and Uniform Bucket Level Access must both + * be used. + * + *

Define a policy with a condition and verify it can be read back and decoded equivalently. + */ + @Test + public void iamPolicyWithCondition() throws Exception { + BucketSourceOption opt = BucketSourceOption.requestedPolicyVersion(3); + Policy policy = + Policy.newBuilder() + .setVersion(3) + .setBindings( + ImmutableList.of( + Binding.newBuilder() + .setRole(StorageRoles.legacyBucketReader().toString()) + .setMembers(ImmutableList.of(projectViewer.strValue())) + .build(), + Binding.newBuilder() + .setRole(StorageRoles.legacyBucketOwner().toString()) + .setMembers( + ImmutableList.of(projectEditor.strValue(), projectOwner.strValue())) + .build(), + Binding.newBuilder() + .setRole(StorageRoles.legacyObjectReader().toString()) + .setMembers( + ImmutableList.of( + "serviceAccount:storage-python@spec-test-ruby-samples.iam.gserviceaccount.com")) + .setCondition( + Condition.newBuilder() + .setTitle("Title") + .setDescription("Description") + .setExpression( + "resource.name.startsWith(\"projects/_/buckets/bucket-name/objects/prefix-a-\")") + .build()) + .build())) + .build(); + + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + // create a bucket with UBLA set to true + BucketInfo.newBuilder(generator.randomBucketName()) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tempB.getBucket(); + String bucketName = bucket.getName(); + + // Set the policy on the bucket + Policy setResult = + storage.setIamPolicy( + bucketName, + policy, + BucketSourceOption.metagenerationMatch(bucket.getMetageneration()), + opt); + assertPolicyEqual(policy, setResult); + + Policy actual = storage.getIamPolicy(bucketName, opt); + assertPolicyEqual(policy, actual); + } + } + + @Test + public void iamPolicyWithoutCondition() throws Exception { + BucketSourceOption opt = BucketSourceOption.requestedPolicyVersion(1); + Policy policy = + Policy.newBuilder() + .setVersion(1) + .setBindings( + ImmutableMap.of( + StorageRoles.legacyBucketOwner(), + ImmutableSet.of(projectOwner, projectEditor), + StorageRoles.legacyBucketReader(), + ImmutableSet.of(projectViewer))) + .build(); + + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo( + // create a bucket without UBLA + BucketInfo.newBuilder(generator.randomBucketName()) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(false) + .build()) + .build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tempB.getBucket(); + String bucketName = bucket.getName(); + + // Set the policy on the bucket + Policy setResult = + storage.setIamPolicy( + bucketName, + policy, + BucketSourceOption.metagenerationMatch(bucket.getMetageneration()), + opt); + assertPolicyEqual(policy, setResult); + + Policy actual = storage.getIamPolicy(bucketName, opt); + assertPolicyEqual(policy, actual); + } + } + + @Test + public void testIamPermissions() { + List expectedResult = ImmutableList.of(true, true); + ImmutableList permissions = + ImmutableList.of("storage.buckets.getIamPolicy", "storage.buckets.setIamPolicy"); + List actual = storage.testIamPermissions(bucketInfo.getName(), permissions); + assertThat(actual).isEqualTo(expectedResult); + } + + private static void assertPolicyEqual(Policy expected, Policy actual) throws Exception { + TestUtils.assertAll( + () -> assertThat(actual.getVersion()).isEqualTo(expected.getVersion()), + () -> assertBindingsEqual(expected.getBindingsList(), actual.getBindingsList())); + } + + private static void assertBindingsEqual(List expected, List actual) { + + // pre-stringify the value to be compared to make it easier to diff if there is a failure + // ordering is not necessarily maintained across RPCs, after stringification sort before + // comparison + String e = stringifyBindings(expected); + String a = stringifyBindings(actual); + + assertThat(a).isEqualTo(e); + } + + private static String stringifyBindings(List bindings) { + Collector joining = Collectors.joining(",\n\t", "[\n\t", "\n]"); + // ordering is not necessarily maintained across RPCs + // Sort any lists before stringification + return bindings.stream() + .map( + b -> { + Binding.Builder builder = b.toBuilder(); + builder.setRole(b.getRole()); + builder.setCondition(b.getCondition()); + builder.setMembers( + b.getMembers().stream().sorted().collect(ImmutableList.toImmutableList())); + return builder.build(); + }) + .map(Object::toString) + .sorted() + .collect(joining); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleRulesTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleRulesTest.java new file mode 100644 index 000000000000..d7f14d8282d2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleRulesTest.java @@ -0,0 +1,143 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public final class ITBucketLifecycleRulesTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Test + public void deleteRule_addingALabelToABucketWithASingleDeleteRuleOnlyModifiesTheLabels() + throws Exception { + LifecycleRule d1 = + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder() + .setMatchesPrefix(ImmutableList.of("pre")) + .setMatchesSuffix(ImmutableList.of("suf")) + .setAge(50) + .build()); + BucketInfo info = baseInfo().setLifecycleRules(ImmutableList.of(d1)).build(); + + try (TemporaryBucket tmp = + TemporaryBucket.newBuilder().setBucketInfo(info).setStorage(storage).build()) { + BucketInfo bucket = tmp.getBucket(); + assertThat(bucket.getLabels()).isNull(); + + ImmutableMap labels = ImmutableMap.of("label1", "val1"); + BucketInfo withLabels = bucket.toBuilder().setLabels(labels).build(); + Bucket update = storage.update(withLabels, BucketTargetOption.metagenerationMatch()); + assertThat(update.getLabels()).isEqualTo(labels); + assertThat(update.getLifecycleRules()).isEqualTo(ImmutableList.of(d1)); + } + } + + @Test + public void condition_ageDays_0_shouldWork() throws Exception { + LifecycleRule d1 = + new LifecycleRule( + LifecycleAction.newAbortIncompleteMPUploadAction(), + LifecycleCondition.newBuilder().setAge(0).build()); + BucketInfo info = baseInfo().setLifecycleRules(ImmutableList.of(d1)).build(); + + try (TemporaryBucket tmp = + TemporaryBucket.newBuilder().setBucketInfo(info).setStorage(storage).build()) { + BucketInfo bucket = tmp.getBucket(); + Bucket update = storage.get(bucket.getName()); + assertThat(update.getLifecycleRules()).isEqualTo(ImmutableList.of(d1)); + } + } + + @Test + public void deleteRule_modifyingLifecycleRulesMatchesLastOperation() throws Exception { + BucketInfo info; + { + LifecycleRule d1 = + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder() + .setMatchesPrefix(ImmutableList.of("pre")) + .setMatchesSuffix(ImmutableList.of("suf")) + .setAge(50) + .build()); + info = baseInfo().setLifecycleRules(ImmutableList.of(d1)).build(); + } + + try (TemporaryBucket tmp = + TemporaryBucket.newBuilder().setBucketInfo(info).setStorage(storage).build()) { + BucketInfo bucket = tmp.getBucket(); + + ImmutableList newRules = + bucket.getLifecycleRules().stream() + .map( + r -> { + if (r.getAction().equals(LifecycleAction.newDeleteAction())) { + LifecycleCondition condition = r.getCondition(); + LifecycleCondition.Builder b = condition.toBuilder(); + b.setMatchesPrefix( + ImmutableList.builder() + .addAll(condition.getMatchesPrefix()) + .add("a") + .build()); + b.setMatchesSuffix( + ImmutableList.builder() + .addAll(condition.getMatchesSuffix()) + .add("z") + .build()); + return new LifecycleRule(LifecycleAction.newDeleteAction(), b.build()); + } else { + return r; + } + }) + .collect(ImmutableList.toImmutableList()); + + BucketInfo modifiedRules = bucket.toBuilder().setLifecycleRules(newRules).build(); + Bucket update = storage.update(modifiedRules, BucketTargetOption.metagenerationMatch()); + assertThat(update.getLifecycleRules()).isEqualTo(newRules); + } + } + + private BucketInfo.Builder baseInfo() { + return BucketInfo.newBuilder(generator.randomBucketName()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleTest.java new file mode 100644 index 000000000000..7ffbbbf156dc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketLifecycleTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.AbortIncompleteMPUAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.BucketInfo.LifecycleRule.SetStorageClassLifecycleAction; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.time.OffsetDateTime; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public class ITBucketLifecycleTest { + + private static final LifecycleRule LIFECYCLE_RULE_1 = + new LifecycleRule( + LifecycleAction.newSetStorageClassAction(StorageClass.COLDLINE), + LifecycleCondition.newBuilder() + .setAge(1) + .setNumberOfNewerVersions(3) + .setIsLive(false) + .setMatchesStorageClass(ImmutableList.of(StorageClass.COLDLINE)) + .build()); + private static final LifecycleRule LIFECYCLE_RULE_2 = + new LifecycleRule( + LifecycleAction.newDeleteAction(), LifecycleCondition.newBuilder().setAge(1).build()); + private static final ImmutableList LIFECYCLE_RULES = + ImmutableList.of(LIFECYCLE_RULE_1, LIFECYCLE_RULE_2); + + @Inject public Storage storage; + @Inject public Generator generator; + + @Test + public void testGetBucketLifecycleRules() throws Exception { + String lifecycleTestBucketName = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(lifecycleTestBucketName) + .setLocation("us") + .setLifecycleRules( + ImmutableList.of( + new LifecycleRule( + LifecycleAction.newSetStorageClassAction(StorageClass.COLDLINE), + LifecycleCondition.newBuilder() + .setAge(1) + .setNumberOfNewerVersions(3) + .setIsLive(false) + .setCreatedBeforeOffsetDateTime(OffsetDateTime.now()) + .setMatchesStorageClass(ImmutableList.of(StorageClass.COLDLINE)) + .setDaysSinceNoncurrentTime(30) + .setNoncurrentTimeBeforeOffsetDateTime(OffsetDateTime.now()) + .setCustomTimeBeforeOffsetDateTime(OffsetDateTime.now()) + .setDaysSinceCustomTime(30) + .build()))) + .build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo remoteBucket = tempB.getBucket(); + LifecycleRule lifecycleRule = remoteBucket.getLifecycleRules().get(0); + assertThat(lifecycleRule.getAction().getActionType()) + .isEqualTo(SetStorageClassLifecycleAction.TYPE); + assertEquals(3, lifecycleRule.getCondition().getNumberOfNewerVersions().intValue()); + assertNotNull(lifecycleRule.getCondition().getCreatedBeforeOffsetDateTime()); + assertFalse(lifecycleRule.getCondition().getIsLive()); + assertEquals(1, lifecycleRule.getCondition().getAge().intValue()); + assertEquals(1, lifecycleRule.getCondition().getMatchesStorageClass().size()); + assertEquals(30, lifecycleRule.getCondition().getDaysSinceNoncurrentTime().intValue()); + assertNotNull(lifecycleRule.getCondition().getNoncurrentTimeBeforeOffsetDateTime()); + assertEquals(30, lifecycleRule.getCondition().getDaysSinceCustomTime().intValue()); + assertNotNull(lifecycleRule.getCondition().getCustomTimeBeforeOffsetDateTime()); + } + } + + @Test + public void testGetBucketAbortMPULifecycle() throws Exception { + String lifecycleTestBucketName = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(lifecycleTestBucketName) + .setLocation("us") + .setLifecycleRules( + ImmutableList.of( + new LifecycleRule( + LifecycleAction.newAbortIncompleteMPUploadAction(), + LifecycleCondition.newBuilder().setAge(1).build()))) + .build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo remoteBucket = tempB.getBucket(); + LifecycleRule lifecycleRule = remoteBucket.getLifecycleRules().get(0); + assertEquals(AbortIncompleteMPUAction.TYPE, lifecycleRule.getAction().getActionType()); + assertEquals(1, lifecycleRule.getCondition().getAge().intValue()); + } + } + + @Test + public void testDeleteLifecycleRules() throws Exception { + String bucketName = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setLocation("us") + .setLifecycleRules(LIFECYCLE_RULES) + .build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + assertThat(bucket.getLifecycleRules()).isNotNull(); + assertThat(bucket.getLifecycleRules()).hasSize(2); + BucketInfo updatedBucket = bucket.toBuilder().deleteLifecycleRules().build(); + storage.update(updatedBucket); + assertThat(updatedBucket.getLifecycleRules()).hasSize(0); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketReadMaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketReadMaskTest.java new file mode 100644 index 000000000000..cb9b5515c233 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketReadMaskTest.java @@ -0,0 +1,186 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ITBucketReadMaskTest.BucketReadMaskTestParameters; +import com.google.cloud.storage.it.ReadMaskTestUtils.Args; +import com.google.cloud.storage.it.ReadMaskTestUtils.LazyAssertion; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.common.collect.ImmutableList; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@Parameterized(BucketReadMaskTestParameters.class) +@SingleBackend(value = Backend.PROD) +@ParallelFriendly +public final class ITBucketReadMaskTest { + + @Inject + @StorageFixture(Transport.HTTP) + public Storage sh; + + @Inject + @StorageFixture(Transport.GRPC) + public Storage sg; + + @Inject public BucketInfo bucket; + + @Parameter public Args args; + + private BucketField field; + private LazyAssertion assertion; + + @Before + public void setUp() throws Exception { + field = args.getField(); + assertion = args.getAssertion(); + } + + @Test + public void get() { + BucketInfo bucketJson = getBucket(sh); + BucketInfo bucketGrpc = getBucket(sg); + + assertion.validate(bucketJson, bucketGrpc); + } + + @Test + public void list() { + List bucketsJson = listBuckets(sh); + List bucketsGrpc = listBuckets(sg); + + assertion.pairwiseList().validate(bucketsJson, bucketsGrpc); + } + + public static final class BucketReadMaskTestParameters implements ParametersProvider { + + @Override + public ImmutableList parameters() { + ImmutableList> args = + ImmutableList.of( + new Args<>(BucketField.PROJECT, LazyAssertion.equal()), + new Args<>(BucketField.ACL, LazyAssertion.equal()), + new Args<>(BucketField.AUTOCLASS, LazyAssertion.equal()), + new Args<>(BucketField.BILLING, LazyAssertion.equal()), + new Args<>(BucketField.CORS, LazyAssertion.equal()), + new Args<>(BucketField.CUSTOM_PLACEMENT_CONFIG, LazyAssertion.equal()), + new Args<>( + BucketField.DEFAULT_EVENT_BASED_HOLD, + (jsonT, grpcT) -> { + assertThat(jsonT.getDefaultEventBasedHold()).isNull(); + assertThat(grpcT.getDefaultEventBasedHold()).isFalse(); + }), + new Args<>(BucketField.DEFAULT_OBJECT_ACL, LazyAssertion.equal()), + new Args<>(BucketField.ENCRYPTION, LazyAssertion.equal()), + new Args<>(BucketField.ETAG, LazyAssertion.equal()), + new Args<>(BucketField.IAMCONFIGURATION, LazyAssertion.equal()), + new Args<>(BucketField.ID, LazyAssertion.equal()), + new Args<>(BucketField.IP_FILTER, LazyAssertion.equal()), + new Args<>(BucketField.LABELS, LazyAssertion.equal()), + new Args<>(BucketField.LIFECYCLE, LazyAssertion.equal()), + new Args<>(BucketField.LOCATION, LazyAssertion.equal()), + new Args<>(BucketField.LOCATION_TYPE, LazyAssertion.equal()), + new Args<>(BucketField.LOGGING, LazyAssertion.equal()), + new Args<>(BucketField.METAGENERATION, LazyAssertion.equal()), + new Args<>(BucketField.NAME, LazyAssertion.equal()), + new Args<>(BucketField.OWNER, LazyAssertion.equal()), + new Args<>(BucketField.RETENTION_POLICY, LazyAssertion.equal()), + new Args<>(BucketField.RPO, LazyAssertion.equal()), + new Args<>( + BucketField.SELF_LINK, + (jsonT, grpcT) -> { + assertThat(jsonT.getSelfLink()).isNotEmpty(); + assertThat(grpcT.getSelfLink()).isNull(); + }), + new Args<>(BucketField.STORAGE_CLASS, LazyAssertion.equal()), + new Args<>(BucketField.TIME_CREATED, LazyAssertion.equal()), + new Args<>(BucketField.UPDATED, LazyAssertion.equal()), + new Args<>(BucketField.VERSIONING, LazyAssertion.equal()), + new Args<>(BucketField.WEBSITE, LazyAssertion.equal()), + new Args<>( + BucketField.SOFT_DELETE_POLICY, + (jsonT, grpcT) -> { + assertThat( + jsonT + .getSoftDeletePolicy() + .getRetentionDuration() + .equals(grpcT.getSoftDeletePolicy().getRetentionDuration())); + assertThat( + jsonT + .getSoftDeletePolicy() + .getEffectiveTime() + .truncatedTo(ChronoUnit.SECONDS) + .equals( + grpcT + .getSoftDeletePolicy() + .getEffectiveTime() + .truncatedTo(ChronoUnit.SECONDS))); + }), + new Args<>(BucketField.HIERARCHICAL_NAMESPACE, LazyAssertion.equal())); + + List argsDefined = + args.stream().map(Args::getField).map(Enum::name).sorted().collect(Collectors.toList()); + + List definedFields = + Arrays.stream(TestUtils.filterOutHttpOnlyBucketFields(BucketField.values())) + .map(Enum::name) + .sorted() + .collect(Collectors.toList()); + + assertThat(argsDefined).containsExactlyElementsIn(definedFields); + return args; + } + } + + private BucketInfo getBucket(Storage s) { + return s.get(bucket.getName(), BucketGetOption.fields(field)).asBucketInfo(); + } + + private List listBuckets(Storage s) { + Page p = + s.list(BucketListOption.prefix(bucket.getName()), BucketListOption.fields(field)); + return StreamSupport.stream(p.iterateAll().spliterator(), false) + .map(Bucket::asBucketInfo) + .collect(Collectors.toList()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketTest.java new file mode 100644 index 000000000000..7cf2f7f4eecf --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBucketTest.java @@ -0,0 +1,656 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.paging.Page; +import com.google.api.services.storage.model.Folder; +import com.google.cloud.Policy; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.Autoclass; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.BucketInfo.ObjectRetention.Mode; +import com.google.cloud.storage.Cors; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.Rpo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.StreamSupport; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public class ITBucketTest { + + private static final byte[] BLOB_BYTE_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final Map BUCKET_LABELS = ImmutableMap.of("label1", "value1"); + private static final Long RETENTION_PERIOD = 5L; + private static final Duration RETENTION_DURATION = Duration.ofSeconds(5); + + @Inject + @BucketFixture(BucketType.DEFAULT) + public BucketInfo bucket; + + @Inject + @BucketFixture(BucketType.REQUESTER_PAYS) + public BucketInfo requesterPaysBucket; + + @Inject public Storage storage; + @Inject public Generator generator; + + @Test + public void testListBuckets() { + Page page = + storage.list( + BucketListOption.prefix(bucket.getName()), BucketListOption.fields(BucketField.NAME)); + ImmutableList bucketNames = + StreamSupport.stream(page.iterateAll().spliterator(), false) + .map(BucketInfo::getName) + .collect(ImmutableList.toImmutableList()); + assertThat(bucketNames).contains(bucket.getName()); + } + + @Test + public void testGetBucketSelectedFields() { + Bucket remoteBucket = + storage.get(bucket.getName(), Storage.BucketGetOption.fields(BucketField.ID)); + assertEquals(bucket.getName(), remoteBucket.getName()); + assertNull(remoteBucket.getCreateTime()); + assertNotNull(remoteBucket.getGeneratedId()); + } + + @Test + public void testGetBucketAllSelectedFields() { + Bucket remoteBucket = + storage.get( + bucket.getName(), + Storage.BucketGetOption.fields( + TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + assertEquals(bucket.getName(), remoteBucket.getName()); + assertNotNull(remoteBucket.getCreateTime()); + } + + @Test + public void testBucketLocationType() throws Exception { + String bucketName = generator.randomBucketName(); + BucketInfo bucketInfo = BucketInfo.newBuilder(bucketName).setLocation("us").build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + + assertEquals("multi-region", bucket.getLocationType()); + } + } + + @Test + public void testBucketCustomPlacmentConfigDualRegion() throws Exception { + String bucketName = generator.randomBucketName(); + List locations = new ArrayList<>(); + locations.add("US-EAST1"); + locations.add("US-WEST1"); + CustomPlacementConfig customPlacementConfig = + CustomPlacementConfig.newBuilder().setDataLocations(locations).build(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setCustomPlacementConfig(customPlacementConfig) + .setLocation("us") + .build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + BucketInfo bucket = tempB.getBucket(); + assertTrue(bucket.getCustomPlacementConfig().getDataLocations().contains("US-EAST1")); + assertTrue(bucket.getCustomPlacementConfig().getDataLocations().contains("US-WEST1")); + assertTrue(bucket.getLocation().equalsIgnoreCase("us")); + } + } + + @Test + public void testBucketLogging() throws Exception { + String logsBucketName = generator.randomBucketName(); + String loggingBucketName = generator.randomBucketName(); + + BucketInfo logsBucketInfo = BucketInfo.newBuilder(logsBucketName).setLocation("us").build(); + BucketInfo loggingBucketInfo = + BucketInfo.newBuilder(loggingBucketName) + .setLocation("us") + .setLogging( + BucketInfo.Logging.newBuilder() + .setLogBucket(logsBucketName) + .setLogObjectPrefix("test-logs") + .build()) + .build(); + + try (TemporaryBucket tempLogsB = + TemporaryBucket.newBuilder().setBucketInfo(logsBucketInfo).setStorage(storage).build(); + TemporaryBucket tempLoggingB = + TemporaryBucket.newBuilder() + .setBucketInfo(loggingBucketInfo) + .setStorage(storage) + .build(); ) { + BucketInfo logsBucket = tempLogsB.getBucket(); + BucketInfo loggingBucket = tempLoggingB.getBucket(); + assertNotNull(logsBucket); + + Policy policy = storage.getIamPolicy(logsBucketName); + assertNotNull(policy); + assertEquals(logsBucketName, loggingBucket.getLogging().getLogBucket()); + assertEquals("test-logs", loggingBucket.getLogging().getLogObjectPrefix()); + + // Disable bucket logging. + Bucket updatedBucket = + storage.update( + loggingBucket.toBuilder().setLogging(null).build(), + BucketTargetOption.metagenerationMatch()); + assertNull(updatedBucket.getLogging()); + } + } + + @Test + public void testRemoveBucketCORS() { + String bucketName = generator.randomBucketName(); + List origins = ImmutableList.of(Cors.Origin.of("http://cloud.google.com")); + List httpMethods = ImmutableList.of(HttpMethod.GET); + List responseHeaders = ImmutableList.of("Content-Type"); + try { + Cors cors = + Cors.newBuilder() + .setOrigins(origins) + .setMethods(httpMethods) + .setResponseHeaders(responseHeaders) + .setMaxAgeSeconds(100) + .build(); + // GRPC creation bug + storage.create(BucketInfo.newBuilder(bucketName).setCors(ImmutableList.of(cors)).build()); + + // case-1 : Cors are set and field selector is selected then returns not-null. + Bucket remoteBucket = + storage.get(bucketName, Storage.BucketGetOption.fields(BucketField.CORS)); + assertThat(remoteBucket.getCors()).isNotNull(); + assertThat(remoteBucket.getCors().get(0).getMaxAgeSeconds()).isEqualTo(100); + assertThat(remoteBucket.getCors().get(0).getMethods()).isEqualTo(httpMethods); + assertThat(remoteBucket.getCors().get(0).getOrigins()).isEqualTo(origins); + assertThat(remoteBucket.getCors().get(0).getResponseHeaders()).isEqualTo(responseHeaders); + + // case-2 : Cors are set but field selector isn't selected then returns not-null. + remoteBucket = storage.get(bucketName); + assertThat(remoteBucket.getCors()).isNotNull(); + + // Remove CORS configuration from the bucket. + Bucket updatedBucket = remoteBucket.toBuilder().setCors(null).build().update(); + assertThat(updatedBucket.getCors()).isNull(); + + // case-3 : Cors are not set and field selector is selected then returns null. + updatedBucket = storage.get(bucketName, Storage.BucketGetOption.fields(BucketField.CORS)); + + assertThat(updatedBucket.getCors()).isNull(); + + // case-4 : Cors are not set and field selector isn't selected then returns null. + updatedBucket = storage.get(bucketName); + assertThat(updatedBucket.getCors()).isNull(); + + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testRpoConfig() { + String rpoBucket = generator.randomBucketName(); + try { + Bucket bucket = + storage.create( + BucketInfo.newBuilder(rpoBucket).setLocation("NAM4").setRpo(Rpo.ASYNC_TURBO).build()); + assertEquals("ASYNC_TURBO", bucket.getRpo().toString()); + + bucket.toBuilder().setRpo(Rpo.DEFAULT).build().update(); + + assertEquals("DEFAULT", storage.get(rpoBucket).getRpo().toString()); + } finally { + BucketCleaner.doCleanup(rpoBucket, storage); + } + } + + @Test + public void testRetentionPolicyLock() { + retentionPolicyLockRequesterPays(false); + } + + @Test + public void testRetentionPolicyLockRequesterPays() { + retentionPolicyLockRequesterPays(true); + } + + private void retentionPolicyLockRequesterPays(boolean requesterPays) { + String projectId = storage.getOptions().getProjectId(); + String bucketName = generator.randomBucketName(); + BucketInfo bucketInfo; + if (requesterPays) { + bucketInfo = + BucketInfo.newBuilder(bucketName) + .setRetentionPeriod(RETENTION_PERIOD) + .setRequesterPays(true) + .build(); + } else { + bucketInfo = BucketInfo.newBuilder(bucketName).setRetentionPeriod(RETENTION_PERIOD).build(); + } + Bucket remoteBucket = storage.create(bucketInfo); + assertThat(remoteBucket.getRetentionPeriod()).isEqualTo(RETENTION_PERIOD); + assertThat(remoteBucket.getRetentionPeriodDuration()).isEqualTo(RETENTION_DURATION); + try { + // in json if the bucket retention policy is not locked null is possible, however in grpc + // there is no distinguishment between unset and false. + assertThat(remoteBucket.retentionPolicyIsLocked()).isAnyOf(null, false); + assertNotNull(remoteBucket.getRetentionEffectiveTime()); + assertNotNull(remoteBucket.getMetageneration()); + if (requesterPays) { + remoteBucket = + storage.lockRetentionPolicy( + remoteBucket, + Storage.BucketTargetOption.metagenerationMatch(), + Storage.BucketTargetOption.userProject(projectId)); + } else { + remoteBucket = + storage.lockRetentionPolicy( + remoteBucket, Storage.BucketTargetOption.metagenerationMatch()); + } + assertTrue(remoteBucket.retentionPolicyIsLocked()); + assertNotNull(remoteBucket.getRetentionEffectiveTime()); + } finally { + if (requesterPays) { + bucketInfo = bucketInfo.toBuilder().setRequesterPays(false).build(); + Bucket updateBucket = + storage.update(bucketInfo, Storage.BucketTargetOption.userProject(projectId)); + assertFalse(updateBucket.requesterPays()); + } + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testEnableDisableBucketDefaultEventBasedHold() { + String bucketName = generator.randomBucketName(); + Bucket remoteBucket = + storage.create(BucketInfo.newBuilder(bucketName).setDefaultEventBasedHold(true).build()); + try { + assertTrue(remoteBucket.getDefaultEventBasedHold()); + remoteBucket = + storage.get( + bucketName, + Storage.BucketGetOption.fields( + BucketField.DEFAULT_EVENT_BASED_HOLD, BucketField.METAGENERATION)); + assertTrue(remoteBucket.getDefaultEventBasedHold()); + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blobInfo, BlobTargetOption.doesNotExist()); + assertTrue(remoteBlob.getEventBasedHold()); + remoteBlob = + storage.get( + blobInfo.getBlobId(), + Storage.BlobGetOption.fields(BlobField.EVENT_BASED_HOLD, BlobField.METAGENERATION)); + assertTrue(remoteBlob.getEventBasedHold()); + remoteBlob = + remoteBlob.toBuilder() + .setEventBasedHold(false) + .build() + .update(BlobTargetOption.metagenerationMatch()); + assertFalse(remoteBlob.getEventBasedHold()); + remoteBucket = + remoteBucket.toBuilder() + .setDefaultEventBasedHold(false) + .build() + .update(BucketTargetOption.metagenerationMatch()); + assertFalse(remoteBucket.getDefaultEventBasedHold()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testCreateBucketWithAutoclass() { + String bucketName = generator.randomBucketName(); + storage.create( + BucketInfo.newBuilder(bucketName) + .setAutoclass(Autoclass.newBuilder().setEnabled(true).build()) + .build()); + try { + Bucket remoteBucket = storage.get(bucketName); + + assertNotNull(remoteBucket.getAutoclass()); + assertTrue(remoteBucket.getAutoclass().getEnabled()); + OffsetDateTime time = remoteBucket.getAutoclass().getToggleTime(); + assertNotNull(time); + + remoteBucket.toBuilder() + .setAutoclass(Autoclass.newBuilder().setEnabled(false).build()) + .build() + .update(); + + remoteBucket = storage.get(bucketName); + assertNotNull(remoteBucket.getAutoclass()); + assertFalse(remoteBucket.getAutoclass().getEnabled()); + assertNotNull(remoteBucket.getAutoclass().getToggleTime()); + assertNotEquals(time, remoteBucket.getAutoclass().getToggleTime()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void testObjectRetention() { + String bucketName = generator.randomBucketName(); + + // Create a bucket with object retention enabled + storage.create( + BucketInfo.newBuilder(bucketName).build(), BucketTargetOption.enableObjectRetention(true)); + + try { + Bucket remoteBucket = storage.get(bucketName); + assertNotNull(remoteBucket.getObjectRetention()); + assertEquals(Mode.ENABLED, remoteBucket.getObjectRetention().getMode()); + + OffsetDateTime now = OffsetDateTime.now(); + + // Create an object with a retention policy configured + storage.create( + BlobInfo.newBuilder(bucketName, "retentionObject") + .setRetention( + BlobInfo.Retention.newBuilder() + .setMode(BlobInfo.Retention.Mode.UNLOCKED) + .setRetainUntilTime(now.plusDays(2)) + .build()) + .build()); + + Blob remoteBlob = storage.get(bucketName, "retentionObject"); + assertNotNull(remoteBlob.getRetention()); + assertEquals(BlobInfo.Retention.Mode.UNLOCKED, remoteBlob.getRetention().getMode()); + + // Reduce the retainUntilTime of an object's retention policy + remoteBlob.toBuilder() + .setRetention( + BlobInfo.Retention.newBuilder() + .setMode(BlobInfo.Retention.Mode.UNLOCKED) + .setRetainUntilTime(now.plusHours(1)) + .build()) + .build() + .update( + Storage.BlobTargetOption.overrideUnlockedRetention(true), + BlobTargetOption.metagenerationMatch()); + + remoteBlob = storage.get(bucketName, "retentionObject"); + assertEquals( + now.plusHours(1).toInstant().truncatedTo(ChronoUnit.SECONDS), + remoteBlob + .getRetention() + .getRetainUntilTime() + .toInstant() + .truncatedTo(ChronoUnit.SECONDS)); + + // Remove an unlocked retention policy + remoteBlob.toBuilder() + .setRetention(null) + .build() + .update( + Storage.BlobTargetOption.overrideUnlockedRetention(true), + BlobTargetOption.metagenerationMatch()); + + remoteBlob = storage.get(bucketName, "retentionObject"); + assertNull(remoteBlob.getRetention()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + public void testCreateBucketWithAutoclass_ARCHIVE() throws Exception { + String bucketName = generator.randomBucketName(); + Autoclass autoclass = + Autoclass.newBuilder() + .setEnabled(true) + .setTerminalStorageClass(StorageClass.ARCHIVE) + .build(); + BucketInfo info = BucketInfo.newBuilder(bucketName).setAutoclass(autoclass).build(); + try (TemporaryBucket tmpb = + TemporaryBucket.newBuilder().setStorage(storage).setBucketInfo(info).build()) { + BucketInfo remoteBucket = tmpb.getBucket(); + + Autoclass remoteBucketAutoclass = remoteBucket.getAutoclass(); + assertThat(remoteBucketAutoclass).isNotNull(); + assertThat(remoteBucketAutoclass.getEnabled()).isTrue(); + assertThat(remoteBucketAutoclass.getToggleTime()).isNotNull(); + assertThat(remoteBucketAutoclass.getTerminalStorageClassUpdateTime()).isNotNull(); + assertThat(remoteBucketAutoclass.getTerminalStorageClass()).isEqualTo(StorageClass.ARCHIVE); + + Page bucketPage = storage.list(BucketListOption.prefix(bucketName)); + ImmutableList buckets = ImmutableList.copyOf(bucketPage.iterateAll()); + + Optional first = + buckets.stream().filter(b -> bucketName.equals(b.getName())).findFirst(); + + assertThat(first.isPresent()).isTrue(); + assertThat(first.get().getAutoclass().getTerminalStorageClass()) + .isEqualTo(StorageClass.ARCHIVE); + + BucketInfo disabled = + remoteBucket.toBuilder() + .setAutoclass(Autoclass.newBuilder().setEnabled(false).build()) + .build(); + Bucket updated = storage.update(disabled, BucketTargetOption.metagenerationMatch()); + + Autoclass updatedAutoclass = updated.getAutoclass(); + assertThat(updatedAutoclass.getEnabled()).isFalse(); + assertThat(updatedAutoclass.getTerminalStorageClass()).isNull(); + + assertThat(updatedAutoclass).isNotEqualTo(remoteBucketAutoclass); + } + } + + @Test + public void testUpdateBucket_noModification() throws Exception { + String bucketName = generator.randomBucketName(); + BucketInfo bucketInfo = BucketInfo.newBuilder(bucketName).build(); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + // in grpc, create will return acls but update does not. re-get the metadata with default + // fields + BucketInfo bucket = tempB.getBucket(); + Bucket gen1 = + storage.get( + bucket.getName(), BucketGetOption.metagenerationMatch(bucket.getMetageneration())); + + Bucket gen2 = storage.update(gen1, BucketTargetOption.metagenerationMatch()); + assertThat(gen2).isEqualTo(gen1); + } + } + + @Test + public void nonExistentBucketReturnsNull() { + Bucket bucket = storage.get(generator.randomBucketName()); + assertThat(bucket).isNull(); + } + + @Test + public void testSoftDeletePolicy() { + String bucketName = generator.randomBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(10)) + .build()) + .build(); + try { + storage.create(bucketInfo); + + Bucket remoteBucket = storage.get(bucketName); + assertEquals(Duration.ofDays(10), remoteBucket.getSoftDeletePolicy().getRetentionDuration()); + assertNotNull(remoteBucket.getSoftDeletePolicy().getEffectiveTime()); + + String softDelBlobName = "softdelblob"; + remoteBucket.create(softDelBlobName, BLOB_BYTE_CONTENT); + + Blob blob = remoteBucket.get(softDelBlobName); + long gen = blob.getGeneration(); + + assertNull(blob.getSoftDeleteTime()); + assertNull(blob.getHardDeleteTime()); + + blob.delete(); + + assertNull(remoteBucket.get(softDelBlobName)); + + ImmutableList softDeletedBlobs = + ImmutableList.copyOf( + remoteBucket.list(Storage.BlobListOption.softDeleted(true)).iterateAll()); + assertThat(softDeletedBlobs.size() > 0); + + Blob softDeletedBlob = + remoteBucket.get(softDelBlobName, gen, Storage.BlobGetOption.softDeleted(true)); + + assertNotNull(softDeletedBlob); + assertNotNull(softDeletedBlob.getSoftDeleteTime()); + assertNotNull(softDeletedBlob.getHardDeleteTime()); + + assertNotNull(storage.restore(softDeletedBlob.getBlobId())); + + remoteBucket.toBuilder() + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(20)) + .build()) + .build() + .update(BucketTargetOption.metagenerationMatch()); + + assertEquals( + Duration.ofDays(20), + storage.get(bucketName).getSoftDeletePolicy().getRetentionDuration()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void createBucketWithHierarchicalNamespace() { + String bucketName = generator.randomBucketName(); + storage.create( + BucketInfo.newBuilder(bucketName) + .setHierarchicalNamespace( + BucketInfo.HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build()); + try { + Bucket remoteBucket = storage.get(bucketName); + assertNotNull(remoteBucket.getHierarchicalNamespace()); + assertTrue(remoteBucket.getHierarchicalNamespace().getEnabled()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testListObjectsWithFolders() throws Exception { + String bucketName = generator.randomBucketName(); + storage.create( + BucketInfo.newBuilder(bucketName) + .setHierarchicalNamespace( + BucketInfo.HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build()); + try { + com.google.api.services.storage.Storage apiaryStorage = + new HttpStorageRpc(StorageOptions.getDefaultInstance()).getStorage(); + apiaryStorage + .folders() + .insert(bucketName, new Folder().setName("F").setBucket(bucketName)) + .execute(); + + Page blobs = + storage.list( + bucketName, + Storage.BlobListOption.delimiter("/"), + Storage.BlobListOption.includeFolders(false)); + + boolean found = false; + for (Blob blob : blobs.iterateAll()) { + if (blob.getName().equals("F/")) { + found = true; + } + } + assert (!found); + + blobs = + storage.list( + bucketName, + Storage.BlobListOption.delimiter("/"), + Storage.BlobListOption.includeFolders(true)); + + for (Blob blob : blobs.iterateAll()) { + if (blob.getName().equals("F/")) { + found = true; + } + } + assert (found); + + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITCustomJsonFactoryTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITCustomJsonFactoryTest.java new file mode 100644 index 000000000000..d009a89c88df --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITCustomJsonFactoryTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.verify; + +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +/** + * Explicitly configure an alternative {@link com.google.api.client.json.JsonFactory} for use by an + * instance of {@link com.google.cloud.storage.Storage} + */ +@RunWith(MockitoJUnitRunner.class) +public final class ITCustomJsonFactoryTest { + + @Spy GsonFactory gsonFactory = new GsonFactory(); + + @Test + public void customJsonFactoryConfigurableViaStorageOptions() { + Storage s = + StorageOptions.newBuilder() + .setServiceRpcFactory( + options -> { + if (options instanceof HttpStorageOptions) { + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) options; + return new HttpStorageRpc(httpStorageOptions, gsonFactory); + } else { + throw new IllegalArgumentException("Only HttpStorageOptions supported"); + } + }) + .build() + .getService(); + + Page bucketPage = s.list(BucketListOption.pageSize(10)); + List buckets = + StreamSupport.stream(bucketPage.iterateAll().spliterator(), false) + .collect(Collectors.toList()); + // if we reach here, we're good the call didn't fail, and we parsed some json. + + verify(gsonFactory, atLeastOnce()) + .createJsonParser(any(InputStream.class), eq(StandardCharsets.UTF_8)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDefaultProjectionCompatibilityTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDefaultProjectionCompatibilityTest.java new file mode 100644 index 000000000000..32d0f31d9c29 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDefaultProjectionCompatibilityTest.java @@ -0,0 +1,166 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.common.base.MoreObjects; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITDefaultProjectionCompatibilityTest { + + @Inject + @StorageFixture(Transport.HTTP) + public Storage http; + + @Inject + @StorageFixture(Transport.GRPC) + public Storage grpc; + + @Inject public BucketInfo bucket; + + @Inject public ObjectsFixture objectsFixture; + + @Test + public void objectMetadata_includesAcls() { + Blob httpBlob = http.get(objectsFixture.getInfo1().getBlobId()); + Blob grpcBlob = grpc.get(objectsFixture.getInfo1().getBlobId()); + + assertThat(extractFromBlob(grpcBlob)).isEqualTo(extractFromBlob(httpBlob)); + } + + @Test + public void listObjectMetadata_includesAcls() { + String bucketName = bucket.getName(); + BlobListOption prefix = BlobListOption.prefix(objectsFixture.getInfo1().getBlobId().getName()); + List httpBlob = http.list(bucketName, prefix).streamAll().collect(Collectors.toList()); + List grpcBlob = grpc.list(bucketName, prefix).streamAll().collect(Collectors.toList()); + + List a = extractFromBlobs(httpBlob); + List b = extractFromBlobs(grpcBlob); + + assertThat(a).isEqualTo(b); + } + + @Test + public void bucketMetadata_includesAcls() { + Bucket httpBucket = http.get(bucket.getName()); + Bucket grpcBucket = grpc.get(bucket.getName()); + + assertThat(extractFromBucket(httpBucket)).isEqualTo(extractFromBucket(grpcBucket)); + } + + @Test + public void listBucketMetadata_includesAcls() { + BucketListOption prefix = BucketListOption.prefix(bucket.getName()); + List httpBucket = http.list(prefix).streamAll().collect(Collectors.toList()); + List grpcBucket = grpc.list(prefix).streamAll().collect(Collectors.toList()); + + List a = extractFromBuckets(httpBucket); + List b = extractFromBuckets(grpcBucket); + + assertThat(a).isEqualTo(b); + } + + @NonNull + private static List extractFromBlobs(List httpBlob) { + return httpBlob.stream() + .map(ITDefaultProjectionCompatibilityTest::extractFromBlob) + .collect(Collectors.toList()); + } + + @NonNull + private static AclRelatedFields extractFromBlob(Blob b) { + return new AclRelatedFields(b.getOwner(), b.getAcl(), null); + } + + @NonNull + private static List extractFromBuckets(List httpBucket) { + return httpBucket.stream() + .map(ITDefaultProjectionCompatibilityTest::extractFromBucket) + .collect(Collectors.toList()); + } + + @NonNull + private static AclRelatedFields extractFromBucket(Bucket b) { + return new AclRelatedFields(b.getOwner(), b.getAcl(), null); + } + + private static final class AclRelatedFields { + @Nullable private final Entity owner; + @Nullable private final List acls; + @Nullable private final List defaultAcls; + + private AclRelatedFields( + @Nullable Entity owner, @Nullable List acls, @Nullable List defaultAcls) { + this.owner = owner; + this.acls = acls; + this.defaultAcls = defaultAcls; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof AclRelatedFields)) { + return false; + } + AclRelatedFields that = (AclRelatedFields) o; + return Objects.equals(owner, that.owner) + && Objects.equals(acls, that.acls) + && Objects.equals(defaultAcls, that.defaultAcls); + } + + @Override + public int hashCode() { + return Objects.hash(owner, acls, defaultAcls); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("owner", owner) + .add("acls", acls) + .add("defaultAcls", defaultAcls) + .toString(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadBlobWithoutAuth.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadBlobWithoutAuth.java new file mode 100644 index 000000000000..3febdab092f6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadBlobWithoutAuth.java @@ -0,0 +1,104 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.util.Iterator; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun(transports = Transport.HTTP, backends = Backend.PROD) +public class ITDownloadBlobWithoutAuth { + private static final boolean IS_VPC_TEST = + System.getenv("GOOGLE_CLOUD_TESTS_IN_VPCSC") != null + && System.getenv("GOOGLE_CLOUD_TESTS_IN_VPCSC").equalsIgnoreCase("true"); + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void testDownloadPublicBlobWithoutAuthentication() { + assumeFalse(IS_VPC_TEST); + String bucketName = bucket.getName(); + // create an unauthorized user + Storage unauthorizedStorage = StorageOptions.getUnauthenticatedInstance().getService(); + + // try to download blobs from a public bucket + String landsatBucket = "gcp-public-data-landsat"; + String landsatPrefix = "LC08/01/001/002/LC08_L1GT_001002_20160817_20170322_01_T2/"; + String landsatBlob = landsatPrefix + "LC08_L1GT_001002_20160817_20170322_01_T2_ANG.txt"; + byte[] bytes = unauthorizedStorage.readAllBytes(landsatBucket, landsatBlob); + + assertThat(bytes.length).isEqualTo(117255); + int numBlobs = 0; + Iterator blobIterator = + unauthorizedStorage + .list(landsatBucket, Storage.BlobListOption.prefix(landsatPrefix)) + .iterateAll() + .iterator(); + while (blobIterator.hasNext()) { + numBlobs++; + blobIterator.next(); + } + assertThat(numBlobs).isEqualTo(14); + + // try to download blobs from a bucket that requires authentication + // authenticated client will succeed + // unauthenticated client will receive an exception + String sourceBlobName = generator.randomObjectName(); + BlobInfo sourceBlob = BlobInfo.newBuilder(bucketName, sourceBlobName).build(); + assertThat(storage.create(sourceBlob)).isNotNull(); + assertThat(storage.readAllBytes(bucketName, sourceBlobName)).isNotNull(); + try { + unauthorizedStorage.readAllBytes(bucketName, sourceBlobName); + fail("Expected StorageException"); + } catch (StorageException ex) { + // expected + } + assertThat(storage.get(sourceBlob.getBlobId()).delete()).isTrue(); + + // try to upload blobs to a bucket that requires authentication + // authenticated client will succeed + // unauthenticated client will receive an exception + assertThat(storage.create(sourceBlob)).isNotNull(); + try { + unauthorizedStorage.create(sourceBlob); + fail("Expected StorageException"); + } catch (StorageException ex) { + // expected + } + assertThat(storage.get(sourceBlob.getBlobId()).delete()).isTrue(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadToTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadToTest.java new file mode 100644 index 000000000000..097609a57ba3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITDownloadToTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public final class ITDownloadToTest { + + private static final byte[] helloWorldTextBytes = "hello world".getBytes(); + private static final byte[] helloWorldGzipBytes = TestUtils.gzipBytes(helloWorldTextBytes); + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private BlobId blobId; + + @Before + public void before() { + String objectString = generator.randomObjectName(); + blobId = BlobId.of(bucket.getName(), objectString); + BlobInfo blobInfo = + BlobInfo.newBuilder(blobId).setContentEncoding("gzip").setContentType("text/plain").build(); + storage.create(blobInfo, helloWorldGzipBytes); + } + + @Test + public void downloadTo_returnRawInputStream_yes() throws IOException { + Path helloWorldTxtGz = File.createTempFile(blobId.getName(), ".txt.gz").toPath(); + storage.downloadTo( + blobId, helloWorldTxtGz, Storage.BlobSourceOption.shouldReturnRawInputStream(true)); + + byte[] actualTxtGzBytes = Files.readAllBytes(helloWorldTxtGz); + if (Arrays.equals(actualTxtGzBytes, helloWorldTextBytes)) { + fail("expected gzipped bytes, but got un-gzipped bytes"); + } + assertThat(actualTxtGzBytes).isEqualTo(helloWorldGzipBytes); + } + + @Test + public void downloadTo_returnRawInputStream_no() throws IOException { + Path helloWorldTxt = File.createTempFile(blobId.getName(), ".txt").toPath(); + storage.downloadTo( + blobId, helloWorldTxt, Storage.BlobSourceOption.shouldReturnRawInputStream(false)); + byte[] actualTxtBytes = Files.readAllBytes(helloWorldTxt); + assertThat(actualTxtBytes).isEqualTo(helloWorldTextBytes); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITFoldersTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITFoldersTest.java new file mode 100644 index 000000000000..1eceea6ce08b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITFoldersTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.MoveBlobRequest; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableMap; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.FolderName; +import com.google.storage.control.v2.StorageControlClient; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP, Transport.GRPC}) +public class ITFoldersTest { + + @Inject public StorageControlClient ctrl; + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.HNS) + public BucketInfo bucket; + + @Inject public Generator generator; + + @Test + public void createFolder() throws Exception { + String folderId = generator.randomObjectName() + "/"; + String bucketName = bucket.getName(); + Folder folder = + ctrl.createFolder( + CreateFolderRequest.newBuilder() + .setParent(BucketName.format("_", bucketName)) + .setFolderId(folderId) + .build()); + assertAll( + () -> assertThat(folder.getName()).isEqualTo(FolderName.format("_", bucketName, folderId)), + () -> assertThat(folder.getMetageneration()).isGreaterThan(0)); + } + + @Test + public void moveObject() throws Exception { + ChecksummedTestContent testContent = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(5286)); + + BlobId id1 = BlobId.of(bucket.getName(), generator.randomObjectName()); + BlobId id2 = BlobId.of(bucket.getName(), generator.randomObjectName()); + + ImmutableMap metadata = ImmutableMap.of("a", "b", "c", "d"); + BlobInfo info1 = BlobInfo.newBuilder(id1).setMetadata(metadata).build(); + Blob blob1 = storage.create(info1, testContent.getBytes(), BlobTargetOption.doesNotExist()); + + Blob blob2 = + storage.moveBlob( + MoveBlobRequest.newBuilder() + .setSource(blob1.getBlobId()) + .setTarget(id2) + .setSourceOptions(BlobSourceOption.generationMatch()) + .setTargetOptions(BlobTargetOption.doesNotExist()) + .build()); + + assertAll( + () -> assertThat(blob2.getCrc32c()).isEqualTo(testContent.getCrc32cBase64()), + () -> assertThat(blob2.getMetadata()).isEqualTo(metadata)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcIdempotencyTokenTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcIdempotencyTokenTest.java new file mode 100644 index 000000000000..f6cfb5a35ef0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcIdempotencyTokenTest.java @@ -0,0 +1,170 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.truth.IterableSubject; +import io.grpc.Metadata; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITGrpcIdempotencyTokenTest { + + private static final Metadata.Key X_GOOG_GCS_IDEMPOTENCY_TOKEN = + Metadata.Key.of("x-goog-gcs-idempotency-token", Metadata.ASCII_STRING_MARSHALLER); + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private Storage storage; + private GrpcRequestAuditing requestAuditing; + + @Before + public void setUp() throws Exception { + requestAuditing = new GrpcRequestAuditing(); + storage = + StorageOptions.grpc() + .setGrpcInterceptorProvider(() -> ImmutableList.of(requestAuditing)) + .setEnableGrpcClientMetrics(false) + .setAttemptDirectPath(false) + .build() + .getService(); + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @Test + public void simpleUnary() throws Exception { + Bucket gen1 = storage.get(bucket.getName()); + + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + assertAll(() -> subject.hasSize(1)); + } + + @Test + public void pageObjects() throws Exception { + String baseName = generator.randomObjectName(); + Blob blob1 = storage.create(BlobInfo.newBuilder(bucket, baseName + "1").build()); + Blob blob2 = storage.create(BlobInfo.newBuilder(bucket, baseName + "2").build()); + + requestAuditing.clear(); + ImmutableList expectedNamess = ImmutableList.of(blob1.getName(), blob2.getName()); + Page page = + storage.list(bucket.getName(), BlobListOption.prefix(baseName), BlobListOption.pageSize(1)); + + List collect = page.streamAll().map(BlobInfo::getName).collect(Collectors.toList()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + assertAll( + () -> assertThat(collect).hasSize(2), + () -> assertThat(collect).containsExactlyElementsIn(expectedNamess), + () -> subject.hasSize(2)); + } + + @Test + public void readObject() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + Blob gen1 = + storage.create( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + expected, + BlobTargetOption.doesNotExist()); + + requestAuditing.clear(); + byte[] actual = storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.generationMatch()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + String actualXxd = xxd(actual); + + assertAll(() -> subject.hasSize(1), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void directUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + requestAuditing.clear(); + Blob gen1 = storage.create(info, expected, BlobTargetOption.doesNotExist()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + + byte[] actual = storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.generationMatch()); + String actualXxd = xxd(actual); + + assertAll(() -> subject.hasSize(1), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void resumableUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (WriteChannel writer = storage.writer(info, BlobWriteOption.doesNotExist())) { + writer.setChunkSize(256 * 1024); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 0, 256 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 256 * 1024, 512 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 512 * 1024, expected.length))); + } + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + + byte[] actual = storage.readAllBytes(info.getBlobId()); + String actualXxd = xxd(actual); + + // We expect 4 distinct requests: + // 1. start resumable session + // 2. PUT first 256KiB + // 3. PUT second 256KiB + // 4. Finalize session and put final 45B + assertAll(() -> subject.hasSize(4), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcInterceptorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcInterceptorTest.java new file mode 100644 index 000000000000..3ad6592bd3ae --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcInterceptorTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.GrpcStorageOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.common.collect.ImmutableList; +import io.grpc.Attributes; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ClientStreamTracer; +import io.grpc.ClientStreamTracer.StreamInfo; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public class ITGrpcInterceptorTest { + private static final Metadata.Key X_GOOG_REQUEST_PARAMS = + Metadata.Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + + @Inject + @StorageFixture(Transport.GRPC) + public Storage storage; + + @Inject public BucketInfo bucket; + + @Test + public void grpcStorageOptions_allowSpecifyingInterceptor() throws Exception { + TracerFactory factory = new TracerFactory(); + Interceptor interceptor = new Interceptor(factory); + StorageOptions options = + ((GrpcStorageOptions) storage.getOptions()) + .toBuilder().setGrpcInterceptorProvider(() -> ImmutableList.of(interceptor)).build(); + + try (Storage storage = options.getService()) { + Page page = storage.list(BucketListOption.prefix(bucket.getName())); + List bucketNames = + page.streamAll().map(BucketInfo::getName).collect(Collectors.toList()); + assertThat(bucketNames).contains(bucket.getName()); + } + + assertThat(factory.metadatas).isNotEmpty(); + List requestParams = + factory.metadatas.stream() + .map(m -> m.get(X_GOOG_REQUEST_PARAMS)) + .collect(Collectors.toList()); + + String expected = String.format(Locale.US, "project=projects/%s", options.getProjectId()); + String expectedEncoded = + String.format(Locale.US, "project=projects%%2F%s", options.getProjectId()); + assertThat(requestParams).containsAnyOf(expected, expectedEncoded); + } + + private static final class Interceptor implements ClientInterceptor { + + private final TracerFactory factory; + + public Interceptor(TracerFactory factory) { + this.factory = factory; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + CallOptions callOptions1 = callOptions.withStreamTracerFactory(factory); + return next.newCall(method, callOptions1); + } + } + + private static final class TracerFactory extends ClientStreamTracer.Factory { + + private final List metadatas = Collections.synchronizedList(new ArrayList<>()); + + @Override + public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) { + return new ClientStreamTracer() { + @Override + public void streamCreated(Attributes transportAttrs, Metadata headers) { + metadatas.add(headers); + } + }; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcTest.java new file mode 100644 index 000000000000..26a788ac0966 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGrpcTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Locale; +import java.util.stream.IntStream; +import java.util.stream.StreamSupport; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = {Transport.GRPC}) +public final class ITGrpcTest { + + @Inject public Storage storage; + + @Inject public BucketInfo bucketInfo; + + @Inject public Generator generator; + + @Test + public void testCreateBucket() { + String bucketName = generator.randomBucketName(); + Bucket bucket = storage.create(BucketInfo.of(bucketName)); + assertThat(bucket.getName()).isEqualTo(bucketName); + } + + @Test + public void listBlobs() { + byte[] content = "Hello, World!".getBytes(StandardCharsets.UTF_8); + String prefix = generator.randomObjectName(); + List blobs = + IntStream.rangeClosed(1, 10) + .mapToObj(i -> String.format(Locale.US, "%s/%02d", prefix, i)) + .map(n -> BlobInfo.newBuilder(bucketInfo, n).build()) + .map(info -> storage.create(info, content, BlobTargetOption.doesNotExist())) + .collect(ImmutableList.toImmutableList()); + + List expected = + blobs.stream().map(Blob::getName).collect(ImmutableList.toImmutableList()); + + Page list = storage.list(bucketInfo.getName(), BlobListOption.prefix(prefix)); + ImmutableList actual = + StreamSupport.stream(list.iterateAll().spliterator(), false) + .map(Blob::getName) + .collect(ImmutableList.toImmutableList()); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void listBuckets() { + Page list = storage.list(); + ImmutableList bucketNames = + StreamSupport.stream(list.iterateAll().spliterator(), false) + .map(Bucket::getName) + .collect(ImmutableList.toImmutableList()); + + assertThat(bucketNames).contains(bucketInfo.getName()); + } + + @Test + public void object_writeGetRead() { + Storage s = storage; + BlobInfo info = BlobInfo.newBuilder(bucketInfo, "writeGetRead").build(); + byte[] content = "hello, world".getBytes(StandardCharsets.UTF_8); + s.create(info, content, BlobTargetOption.doesNotExist()); + + Blob blob = s.get(info.getBlobId()); + + byte[] actualContent = blob.getContent(); + assertThat(actualContent).isEqualTo(content); + } + + @Test + public void objectWrite_storage_create() { + BlobInfo info = BlobInfo.newBuilder(bucketInfo, generator.randomObjectName()).build(); + byte[] content = "Hello, World!".getBytes(StandardCharsets.UTF_8); + Blob blob = storage.create(info, content, BlobTargetOption.doesNotExist()); + byte[] actual = blob.getContent(); + assertThat(actual).isEqualTo(content); + } + + @Test + public void objectWrite_storage_create_stream() { + BlobInfo info = BlobInfo.newBuilder(bucketInfo, generator.randomObjectName()).build(); + byte[] content = "Hello, World!".getBytes(StandardCharsets.UTF_8); + Blob blob = + storage.create(info, new ByteArrayInputStream(content), BlobWriteOption.doesNotExist()); + byte[] actual = blob.getContent(); + assertThat(actual).isEqualTo(content); + } + + @Test + public void objectWrite_storage_writer() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucketInfo, generator.randomObjectName()).build(); + byte[] content = "Hello, World!".getBytes(StandardCharsets.UTF_8); + try (WriteChannel c = storage.writer(info, BlobWriteOption.doesNotExist())) { + c.write(ByteBuffer.wrap(content)); + } + byte[] actual = storage.readAllBytes(info.getBlobId()); + assertThat(actual).isEqualTo(content); + } + + @Test + public void storageCopy() { + Storage s = storage; + + byte[] expected = "Hello, World!".getBytes(StandardCharsets.UTF_8); + + BlobInfo info = + BlobInfo.newBuilder(bucketInfo, generator.randomObjectName() + "copy/src").build(); + Blob cpySrc = s.create(info, expected, BlobTargetOption.doesNotExist()); + + BlobInfo dst = + BlobInfo.newBuilder(bucketInfo, generator.randomObjectName() + "copy/dst").build(); + + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(cpySrc.getBlobId()) + .setSourceOptions(BlobSourceOption.generationMatch(cpySrc.getGeneration())) + .setTarget(dst, BlobTargetOption.doesNotExist()) + .build(); + + CopyWriter copyWriter = s.copy(copyRequest); + Blob result = copyWriter.getResult(); + + byte[] actualBytes = s.readAllBytes(result.getBlobId()); + assertThat(actualBytes).isEqualTo(expected); + } + + @Test + public void lockBucketRetentionPolicy() { + Storage s = storage; + + Bucket bucket = s.create(BucketInfo.of(generator.randomBucketName())); + + Bucket locked = bucket.lockRetentionPolicy(BucketTargetOption.metagenerationMatch()); + try { + assertThat(locked.retentionPolicyIsLocked()).isTrue(); + } finally { + s.delete(bucket.getName()); + } + } + + @Test + public void testGrpcUniverseDomainMatchesHost() throws Exception { + StorageOptions storageOptions = + StorageOptions.grpc().setUniverseDomain("my-universe-domain.com").build(); + assertAll( + () -> assertThat(storageOptions.getUniverseDomain().equals("my-universe-domain.com")), + () -> + assertThat(storageOptions.getHost().equals("https://storage.my-universe-domain.com"))); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGzipTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGzipTest.java new file mode 100644 index 000000000000..5603762c4771 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITGzipTest.java @@ -0,0 +1,102 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITGzipTest { + + private static final ChecksummedTestContent CHECKSUMMED_TEST_CONTENT = + ChecksummedTestContent.of("Hello, to the world!!!"); + private static final byte[] GZIPPED_CONTENT = + TestUtils.gzipBytes(CHECKSUMMED_TEST_CONTENT.getBytes()); + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void noMetadata_noOption() { + String name = generator.randomObjectName(); + BlobInfo info = BlobInfo.newBuilder(bucket, name).build(); + Blob gen1 = storage.create(info, CHECKSUMMED_TEST_CONTENT.getBytes()); + assertThat(gen1.getContentEncoding()).isAnyOf(null, ""); // json null, grpc "" + byte[] actual = + storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(true)); + assertThat(xxd(actual)).isEqualTo(xxd(CHECKSUMMED_TEST_CONTENT.getBytes())); + } + + @Test + public void yesMetadata_noOption() { + String name = generator.randomObjectName(); + BlobInfo info = BlobInfo.newBuilder(bucket, name).setContentEncoding("gzip").build(); + Blob gen1 = storage.create(info, GZIPPED_CONTENT); + assertThat(gen1.getContentEncoding()).isEqualTo("gzip"); + byte[] actual = + storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(true)); + assertThat(xxd(actual)).isEqualTo(xxd(GZIPPED_CONTENT)); + } + + @Test + public void noMetadata_yesOption() { + String name = generator.randomObjectName(); + BlobInfo info = BlobInfo.newBuilder(bucket, name).build(); + Blob gen1 = + storage.create( + info, CHECKSUMMED_TEST_CONTENT.getBytes(), BlobTargetOption.disableGzipContent()); + assertThat(gen1.getContentEncoding()).isAnyOf(null, ""); // json null, grpc "" + byte[] actual = + storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(true)); + assertThat(xxd(actual)).isEqualTo(xxd(CHECKSUMMED_TEST_CONTENT.getBytes())); + } + + @Test + public void yesMetadata_yesOption() { + String name = generator.randomObjectName(); + BlobInfo info = BlobInfo.newBuilder(bucket, name).setContentEncoding("gzip").build(); + Blob gen1 = storage.create(info, GZIPPED_CONTENT, BlobTargetOption.disableGzipContent()); + assertThat(gen1.getContentEncoding()).isEqualTo("gzip"); + byte[] actual = + storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(true)); + assertThat(xxd(actual)).isEqualTo(xxd(GZIPPED_CONTENT)); + + byte[] actualUncompressed = + storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(false)); + assertThat(xxd(actualUncompressed)).isEqualTo(xxd(CHECKSUMMED_TEST_CONTENT.getBytes())); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHmacTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHmacTest.java new file mode 100644 index 000000000000..191514570e52 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHmacTest.java @@ -0,0 +1,130 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import java.time.Duration; +import java.time.Instant; +import java.util.stream.StreamSupport; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP}, + backends = {Backend.TEST_BENCH}) +public class ITHmacTest { + + @Inject public Storage storage; + + // when modifying this test or {@link #cleanUpHmacKeys} be sure to remember multiple simultaneous + // runs of the integration suite can run with the same service account. Be sure to not clobber + // any possible run state for the other run. + @Test + public void testHmacKey() { + String serviceAccountEmail = System.getenv("IT_SERVICE_ACCOUNT_EMAIL"); + assertNotNull("Unable to determine service account email", serviceAccountEmail); + ServiceAccount serviceAccount = ServiceAccount.of(serviceAccountEmail); + cleanUpHmacKeys(serviceAccount); + + HmacKey hmacKey = storage.createHmacKey(serviceAccount); + String secretKey = hmacKey.getSecretKey(); + assertNotNull(secretKey); + HmacKey.HmacKeyMetadata metadata = hmacKey.getMetadata(); + String accessId = metadata.getAccessId(); + + assertNotNull(accessId); + assertNotNull(metadata.getEtag()); + assertNotNull(metadata.getId()); + assertEquals(storage.getOptions().getProjectId(), metadata.getProjectId()); + assertEquals(serviceAccount.getEmail(), metadata.getServiceAccount().getEmail()); + assertEquals(HmacKey.HmacKeyState.ACTIVE, metadata.getState()); + assertNotNull(metadata.getCreateTime()); + assertNotNull(metadata.getUpdateTime()); + + Page metadatas = + storage.listHmacKeys(Storage.ListHmacKeysOption.serviceAccount(serviceAccount)); + boolean createdInList = + StreamSupport.stream(metadatas.iterateAll().spliterator(), false) + .map(HmacKey.HmacKeyMetadata::getAccessId) + .anyMatch(accessId::equals); + + assertWithMessage("Created an HMAC key but it didn't show up in list()") + .that(createdInList) + .isTrue(); + + HmacKey.HmacKeyMetadata getResult = storage.getHmacKey(accessId); + assertEquals(metadata, getResult); + + storage.updateHmacKeyState(metadata, HmacKey.HmacKeyState.INACTIVE); + + storage.deleteHmacKey(metadata); + + metadatas = storage.listHmacKeys(Storage.ListHmacKeysOption.serviceAccount(serviceAccount)); + boolean deletedInList = + StreamSupport.stream(metadatas.iterateAll().spliterator(), false) + .map(HmacKey.HmacKeyMetadata::getAccessId) + .anyMatch(accessId::equals); + + assertWithMessage("Deleted an HMAC key but it showed up in list()") + .that(deletedInList) + .isFalse(); + } + + private void cleanUpHmacKeys(ServiceAccount serviceAccount) { + Instant now = Instant.now(); + Instant yesterday = now.minus(Duration.ofDays(1L)); + + Page metadatas = + storage.listHmacKeys(Storage.ListHmacKeysOption.serviceAccount(serviceAccount)); + for (HmacKey.HmacKeyMetadata hmacKeyMetadata : metadatas.iterateAll()) { + Instant updated = Instant.ofEpochMilli(hmacKeyMetadata.getUpdateTime()); + if (updated.isBefore(yesterday)) { + + if (hmacKeyMetadata.getState() == HmacKeyState.ACTIVE) { + hmacKeyMetadata = storage.updateHmacKeyState(hmacKeyMetadata, HmacKeyState.INACTIVE); + } + + if (hmacKeyMetadata.getState() == HmacKeyState.INACTIVE) { + try { + storage.deleteHmacKey(hmacKeyMetadata); + } catch (StorageException e) { + // attempted to delete concurrently, if the other succeeded swallow the error + if (!(e.getReason().equals("invalid") && e.getMessage().contains("deleted"))) { + throw e; + } + } + } + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHttpIdempotencyTokenTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHttpIdempotencyTokenTest.java new file mode 100644 index 000000000000..8ac22bc98871 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITHttpIdempotencyTokenTest.java @@ -0,0 +1,220 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageBatchResult; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.truth.IterableSubject; +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITHttpIdempotencyTokenTest { + + private static final String X_GOOG_GCS_IDEMPOTENCY_TOKEN = "x-goog-gcs-idempotency-token"; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private Storage storage; + private RequestAuditing requestAuditing; + + @Before + public void setUp() throws Exception { + requestAuditing = new RequestAuditing(); + storage = StorageOptions.http().setTransportOptions(requestAuditing).build().getService(); + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @Test + public void simpleUnary() throws Exception { + Bucket gen1 = storage.get(bucket.getName()); + + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + assertAll(() -> subject.hasSize(1)); + } + + @Test + public void pageObjects() throws Exception { + String baseName = generator.randomObjectName(); + Blob blob1 = storage.create(BlobInfo.newBuilder(bucket, baseName + "1").build()); + Blob blob2 = storage.create(BlobInfo.newBuilder(bucket, baseName + "2").build()); + + requestAuditing.clear(); + ImmutableList expectedNamess = ImmutableList.of(blob1.getName(), blob2.getName()); + Page page = + storage.list(bucket.getName(), BlobListOption.prefix(baseName), BlobListOption.pageSize(1)); + + List collect = page.streamAll().map(BlobInfo::getName).collect(Collectors.toList()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + assertAll( + () -> assertThat(collect).hasSize(2), + () -> assertThat(collect).containsExactlyElementsIn(expectedNamess), + () -> subject.hasSize(2)); + } + + @Test + public void pageBucket() throws Exception { + String baseName = generator.randomBucketName(); + BucketInfo info1 = BucketInfo.of(baseName + "1"); + BucketInfo info2 = BucketInfo.of(baseName + "2"); + try (TemporaryBucket tmp1 = + TemporaryBucket.newBuilder().setBucketInfo(info1).setStorage(storage).build(); + TemporaryBucket tmp2 = + TemporaryBucket.newBuilder().setBucketInfo(info2).setStorage(storage).build()) { + requestAuditing.clear(); + Page page = + storage.list(BucketListOption.prefix(baseName), BucketListOption.pageSize(1)); + + List collect = page.streamAll().collect(Collectors.toList()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + assertAll(() -> assertThat(collect).hasSize(2), () -> subject.hasSize(2)); + } + } + + @Test + public void readObject() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + Blob gen1 = + storage.create( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + expected, + BlobTargetOption.doesNotExist()); + + requestAuditing.clear(); + byte[] actual = storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.generationMatch()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + String actualXxd = xxd(actual); + + assertAll(() -> subject.hasSize(1), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void directUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + requestAuditing.clear(); + Blob gen1 = storage.create(info, expected, BlobTargetOption.doesNotExist()); + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + + byte[] actual = storage.readAllBytes(gen1.getBlobId(), BlobSourceOption.generationMatch()); + String actualXxd = xxd(actual); + + assertAll(() -> subject.hasSize(1), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void resumableUpload() throws Exception { + byte[] expected = DataGenerator.base64Characters().genBytes(512 * 1024 + 45); + String expectedXxd = xxd(expected); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (WriteChannel writer = storage.writer(info, BlobWriteOption.doesNotExist())) { + writer.setChunkSize(256 * 1024); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 0, 256 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 256 * 1024, 512 * 1024))); + writer.write(ByteBuffer.wrap(Arrays.copyOfRange(expected, 512 * 1024, expected.length))); + } + IterableSubject subject = requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + + byte[] actual = storage.readAllBytes(info.getBlobId()); + String actualXxd = xxd(actual); + + // We expect 4 distinct requests: + // 1. start resumable session + // 2. PUT first 256KiB + // 3. PUT second 256KiB + // 4. Finalize session and put final 45B + assertAll(() -> subject.hasSize(4), () -> assertThat(actualXxd).isEqualTo(expectedXxd)); + } + + @Test + public void batch() throws Exception { + BlobInfo info1 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info2 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo info3 = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + storage.create(info1, BlobTargetOption.doesNotExist()); + storage.create(info2, BlobTargetOption.doesNotExist()); + storage.create(info3, BlobTargetOption.doesNotExist()); + + requestAuditing.clear(); + OffsetDateTime now = Clock.systemUTC().instant().atOffset(ZoneOffset.UTC); + + StorageBatch batch = storage.batch(); + StorageBatchResult r1 = batch.get(info1.getBlobId()); + StorageBatchResult r2 = + batch.update(info2.toBuilder().setCustomTimeOffsetDateTime(now).build()); + StorageBatchResult r3 = batch.delete(info3.getBlobId()); + + batch.submit(); + assertAll( + () -> assertThat(r1).isNotNull(), + () -> + assertThat(r2.get().getCustomTimeOffsetDateTime().truncatedTo(ChronoUnit.MILLIS)) + .isEqualTo(now.truncatedTo(ChronoUnit.MILLIS)), + () -> assertThat(r3.get()).isTrue(), + () -> { + IterableSubject subject = + requestAuditing.assertRequestHeader(X_GOOG_GCS_IDEMPOTENCY_TOKEN); + subject.hasSize(3); + }); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJournalingBlobWriteSessionConfigTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJournalingBlobWriteSessionConfigTest.java new file mode 100644 index 000000000000..84bfafe8fabe --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJournalingBlobWriteSessionConfigTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.JournalingBlobWriteSessionConfig; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITJournalingBlobWriteSessionConfigTest { + private static final int _1MiB = 1024 * 1024; + private static final int _256MiB = 256 * _1MiB; + + @Inject + @StorageFixture(Transport.GRPC) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + + private Storage journalingStorage; + private Path tempDir; + + @Before + public void setUp() throws Exception { + tempDir = temporaryFolder.newFolder(generator.randomObjectName()).toPath(); + JournalingBlobWriteSessionConfig journaling = + BlobWriteSessionConfigs.journaling(ImmutableList.of(tempDir)); + journalingStorage = + this.storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(journaling) + .build() + .getService(); + } + + @After + public void tearDown() throws Exception { + if (journalingStorage != null) { + journalingStorage.close(); + } + } + + @Test + public void allBytesProperlyTransmitted() throws Exception { + + Random rand = new Random(bucket.getName().hashCode()); + int objectSize = _256MiB; + byte[] bytes = DataGenerator.rand(rand).genBytes(objectSize); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + BlobWriteSession blobWriteSession = + journalingStorage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + try (WritableByteChannel w = blobWriteSession.open()) { + w.write(ByteBuffer.wrap(bytes)); + } + + BlobInfo resource = blobWriteSession.getResult().get(1, TimeUnit.SECONDS); + + byte[] actual = storage.readAllBytes(info.getBlobId()); + assertAll( + () -> assertThat(resource.getSize()).isEqualTo(objectSize), + () -> assertThat(actual).isEqualTo(bytes)); + } + + @Test + public void journalFileMustNotBeLeftOnDiskAfterSuccess() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + byte[] bytes = DataGenerator.base64Characters().genBytes(17); + BlobWriteSession blobWriteSession = + journalingStorage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + try (WritableByteChannel w = blobWriteSession.open()) { + w.write(ByteBuffer.wrap(bytes)); + } + + try (Stream stream = Files.list(tempDir)) { + ImmutableList leftOverFiles = stream.collect(ImmutableList.toImmutableList()); + assertThat(leftOverFiles).isEmpty(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJsonPatchTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJsonPatchTest.java new file mode 100644 index 000000000000..bb2bdfe69180 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITJsonPatchTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; +import com.google.cloud.storage.Cors; +import com.google.cloud.storage.Cors.Origin; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITJsonPatchTest { + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void object() throws Exception { + String cacheControl = "max-age=60"; + String contentDisposition = "attachment"; + String contentEncoding = "identity"; + String contentLanguage = "en-US"; + String contentType = "text/plain"; + BlobInfo info = + BlobInfo.newBuilder(bucket, generator.randomObjectName()) + .setCacheControl(cacheControl) + .setContentDisposition(contentDisposition) + .setContentEncoding(contentEncoding) + .setContentLanguage(contentLanguage) + .setContentType(contentType) + .build(); + + Blob gen1 = storage.create(info); + assertAll( + () -> assertThat(gen1.getCacheControl()).isEqualTo(cacheControl), + () -> assertThat(gen1.getContentDisposition()).isEqualTo(contentDisposition), + () -> assertThat(gen1.getContentEncoding()).isEqualTo(contentEncoding), + () -> assertThat(gen1.getContentLanguage()).isEqualTo(contentLanguage), + () -> assertThat(gen1.getContentType()).isEqualTo(contentType)); + BlobInfo update = + gen1.toBuilder() + .setCacheControl(null) + .setContentDisposition(null) + .setContentEncoding(null) + .setContentLanguage(null) + .setContentType(null) + .build(); + Blob gen2 = + storage.update(update, BlobTargetOption.metagenerationMatch(gen1.getMetageneration())); + assertAll( + () -> assertThat(gen2.getCacheControl()).isAnyOf("", null), + () -> assertThat(gen2.getContentDisposition()).isAnyOf("", null), + () -> assertThat(gen2.getContentEncoding()).isAnyOf("", null), + () -> assertThat(gen2.getContentLanguage()).isAnyOf("", null), + () -> assertThat(gen2.getContentType()).isAnyOf("", null)); + } + + @Test + public void bucket() throws Exception { + ImmutableList lifecycleRules = + ImmutableList.of( + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder() + .setMatchesPrefix(ImmutableList.of("blahblahblah")) + .build())); + ImmutableList cors = + ImmutableList.of( + Cors.newBuilder() + .setMaxAgeSeconds(300) + .setMethods(ImmutableList.of(HttpMethod.GET)) + .setOrigins(ImmutableList.of(Origin.any())) + .setResponseHeaders(ImmutableList.of("blah2blah")) + .build()); + String indexPage = "index.html"; + String notFoundPage = "404.html"; + BucketInfo info = + BucketInfo.newBuilder(generator.randomBucketName()) + .setLifecycleRules(lifecycleRules) + .setCors(cors) + .setIndexPage(indexPage) + .setNotFoundPage(notFoundPage) + .build(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder().setBucketInfo(info).setStorage(storage).build()) { + BucketInfo gen1 = tmpBucket.getBucket(); + + assertAll( + () -> assertThat(gen1.getLifecycleRules()).isEqualTo(lifecycleRules), + () -> assertThat(gen1.getCors()).isEqualTo(cors), + () -> assertThat(gen1.getIndexPage()).isEqualTo(indexPage), + () -> assertThat(gen1.getNotFoundPage()).isEqualTo(notFoundPage)); + BucketInfo update = + gen1.toBuilder() + .setLifecycleRules(ImmutableList.of()) + .setCors(ImmutableList.of()) + .setIndexPage(null) + .setNotFoundPage(null) + .build(); + Bucket gen2 = storage.update(update, BucketTargetOption.metagenerationMatch()); + assertAll( + () -> assertThat(gen2.getLifecycleRules()).isAnyOf(ImmutableList.of(), null), + () -> assertThat(gen2.getCors()).isAnyOf(ImmutableList.of(), null), + () -> assertThat(gen2.getIndexPage()).isAnyOf("", null), + () -> assertThat(gen2.getNotFoundPage()).isAnyOf("", null)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITKmsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITKmsTest.java new file mode 100644 index 000000000000..8753ab6173f4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITKmsTest.java @@ -0,0 +1,258 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.KmsFixture; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.BaseEncoding; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.Key; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = Backend.PROD) +public class ITKmsTest { + + private static final byte[] BLOB_BYTE_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + private static final Key KEY = + new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256"); + private static final String CONTENT_TYPE = "text/plain"; + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + @Inject public KmsFixture kms; + + @Test + public void testClearBucketDefaultKmsKeyName() { + String bucketName = generator.randomBucketName(); + Bucket remoteBucket = + storage.create( + BucketInfo.newBuilder(bucketName) + .setDefaultKmsKeyName(kms.getKey1().getName()) + .setLocation(kms.getKeyRingLocation()) + .build()); + try { + assertEquals(kms.getKey1().getName(), remoteBucket.getDefaultKmsKeyName()); + Bucket updatedBucket = remoteBucket.toBuilder().setDefaultKmsKeyName(null).build().update(); + assertNull(updatedBucket.getDefaultKmsKeyName()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testUpdateBucketDefaultKmsKeyName() { + String bucketName = generator.randomBucketName(); + Bucket remoteBucket = + storage.create( + BucketInfo.newBuilder(bucketName) + .setDefaultKmsKeyName(kms.getKey1().getName()) + .setLocation(kms.getKeyRingLocation()) + .build()); + try { + assertEquals(kms.getKey1().getName(), remoteBucket.getDefaultKmsKeyName()); + Bucket updatedBucket = + remoteBucket.toBuilder().setDefaultKmsKeyName(kms.getKey2().getName()).build().update(); + assertEquals(kms.getKey2().getName(), updatedBucket.getDefaultKmsKeyName()); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testCreateBlobWithKmsKeyName() { + String blobName = generator.randomObjectName(); + String bucketName = bucket.getName(); + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = + storage.create( + blob, BLOB_BYTE_CONTENT, Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName())); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertNotNull(remoteBlob.getKmsKeyName()); + assertTrue(remoteBlob.getKmsKeyName().startsWith(kms.getKey1().getName())); + byte[] readBytes = storage.readAllBytes(bucketName, blobName); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } + + @Test(expected = StorageException.class) + public void testCreateBlobWithKmsKeyNameAndCustomerSuppliedKeyFails() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + storage.create( + blob, + BLOB_BYTE_CONTENT, + Storage.BlobTargetOption.encryptionKey(KEY), + Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName())); + } + + @Test + public void testCreateBlobWithDefaultKmsKeyName() { + String bucketName = generator.randomBucketName(); + Bucket bucket = + storage.create( + BucketInfo.newBuilder(bucketName) + .setDefaultKmsKeyName(kms.getKey1().getName()) + .setLocation(kms.getKeyRingLocation()) + .build()); + assertEquals(bucket.getDefaultKmsKeyName(), kms.getKey1().getName()); + + try { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertNotNull(remoteBlob.getKmsKeyName()); + assertTrue(remoteBlob.getKmsKeyName().startsWith(kms.getKey1().getName())); + byte[] readBytes = storage.readAllBytes(bucketName, blobName); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } finally { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testGetBlobKmsKeyNameField() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).setContentType(CONTENT_TYPE).build(); + assertNotNull( + storage.create(blob, Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName()))); + Blob remoteBlob = + storage.get(blob.getBlobId(), Storage.BlobGetOption.fields(BlobField.KMS_KEY_NAME)); + assertEquals(blob.getBlobId(), remoteBlob.getBlobId()); + assertTrue(remoteBlob.getKmsKeyName().startsWith(kms.getKey1().getName())); + assertNull(remoteBlob.getContentType()); + } + + @Test + public void testRotateFromCustomerEncryptionToKmsKey() { + String sourceBlobName = generator.randomObjectName(); + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + Blob remoteBlob = + storage.create( + BlobInfo.newBuilder(source).build(), + BLOB_BYTE_CONTENT, + Storage.BlobTargetOption.encryptionKey(KEY)); + assertNotNull(remoteBlob); + String targetBlobName = generator.randomObjectName(); + BlobInfo target = + BlobInfo.newBuilder(bucket, targetBlobName) + .setContentType(CONTENT_TYPE) + .setMetadata(metadata) + .build(); + Storage.CopyRequest req = + Storage.CopyRequest.newBuilder() + .setSource(source) + .setSourceOptions(Storage.BlobSourceOption.decryptionKey(BASE64_KEY)) + .setTarget(target, Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName())) + .build(); + CopyWriter copyWriter = storage.copy(req); + assertEquals(bucket.getName(), copyWriter.getResult().getBucket()); + assertEquals(targetBlobName, copyWriter.getResult().getName()); + assertEquals(CONTENT_TYPE, copyWriter.getResult().getContentType()); + assertNotNull(copyWriter.getResult().getKmsKeyName()); + assertTrue(copyWriter.getResult().getKmsKeyName().startsWith(kms.getKey1().getName())); + assertArrayEquals(BLOB_BYTE_CONTENT, copyWriter.getResult().getContent()); + assertEquals(metadata, copyWriter.getResult().getMetadata()); + assertTrue(copyWriter.isDone()); + assertTrue(storage.delete(bucket.getName(), targetBlobName)); + } + + @Test(expected = StorageException.class) + public void testRotateFromCustomerEncryptionToKmsKeyWithCustomerEncryption() { + String sourceBlobName = generator.randomObjectName(); + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + Blob remoteBlob = + storage.create( + BlobInfo.newBuilder(source).build(), + BLOB_BYTE_CONTENT, + Storage.BlobTargetOption.encryptionKey(KEY)); + assertNotNull(remoteBlob); + String targetBlobName = generator.randomObjectName(); + BlobInfo target = + BlobInfo.newBuilder(bucket, targetBlobName) + .setContentType(CONTENT_TYPE) + .setMetadata(metadata) + .build(); + Storage.CopyRequest req = + Storage.CopyRequest.newBuilder() + .setSource(source) + .setSourceOptions(Storage.BlobSourceOption.decryptionKey(BASE64_KEY)) + .setTarget( + target, + Storage.BlobTargetOption.encryptionKey(KEY), + Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName())) + .build(); + storage.copy(req); + } + + @Test + public void testWriterWithKmsKeyName() throws IOException { + // Write an empty object with a kmsKeyName. + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build(); + Blob blob = + storage.create(blobInfo, Storage.BlobTargetOption.kmsKeyName(kms.getKey1().getName())); + + // Create a writer using blob that already has metadata received from Storage API. + int numberOfBytes; + byte[] content = BLOB_STRING_CONTENT.getBytes(UTF_8); + try (WriteChannel writer = blob.writer()) { + numberOfBytes = writer.write(ByteBuffer.wrap(content, 0, content.length)); + } + assertThat(numberOfBytes).isEqualTo(content.length); + assertThat(blob.getKmsKeyName()).isNotNull(); + assertThat(storage.delete(bucket.getName(), blobName)).isTrue(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITListBucketTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITListBucketTest.java new file mode 100644 index 000000000000..f539e01cc21c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITListBucketTest.java @@ -0,0 +1,102 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = {Transport.HTTP, Transport.GRPC}) +public class ITListBucketTest { + @Inject public Storage storage; + + @Inject public BucketInfo defaultBucket; + + @Inject public Generator generator; + + @Inject + @BucketFixture(BucketType.HNS) + public BucketInfo hnsBucket; + + private static final String UNREACHABLE_BUCKET_SUFFIX = ".unreachable"; + + @Test + public void testListBucketWithPartialSuccess() throws Exception { + doTest(Reachability.Unreachable, BucketListOption.returnPartialSuccess(true)); + } + + @Test + public void testListBucketWithoutPartialSuccess() throws Exception { + doTest(Reachability.Reachable); + } + + private void doTest( + Reachability expectedReachabilityOfUnreachableBucket, BucketListOption... bucketListOption) + throws Exception { + // TESTBENCH considers a bucket to be unreachable if the bucket name contains "unreachable" + String name = generator.randomBucketName() + UNREACHABLE_BUCKET_SUFFIX; + BucketInfo info = BucketInfo.of(name); + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder().setBucketInfo(info).setStorage(storage).build()) { + Map expected = + ImmutableMap.of( + defaultBucket.getName(), Reachability.Reachable, + hnsBucket.getName(), Reachability.Reachable, + tmpBucket.getBucket().getName(), expectedReachabilityOfUnreachableBucket); + + Page page = storage.list(bucketListOption); + + Map actual = + page.streamAll().collect(Collectors.toMap(BucketInfo::getName, Reachability::forBucket)); + + assertThat(actual).containsAtLeastEntriesIn(expected); + } + } + + private enum Reachability { + Reachable, + Unreachable; + + static Reachability forBucket(BucketInfo b) { + if (b.isUnreachable() != null && b.isUnreachable()) { + return Unreachable; + } else { + return Reachable; + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITMultipartUploadClientTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITMultipartUploadClientTest.java new file mode 100644 index 000000000000..1b60312750f9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITMultipartUploadClientTest.java @@ -0,0 +1,472 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.RequestBody; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompletedMultipartUpload; +import com.google.cloud.storage.multipartupload.model.CompletedPart; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP}, + backends = {Backend.PROD}) +public final class ITMultipartUploadClientTest { + + private static final int _5MiB = 5 * 1024 * 1024; + + @Inject public BucketInfo bucket; + + @Inject public Storage injectedStorage; + + @Inject public Transport transport; + + @Inject public Generator generator; + + private MultipartUploadClient multipartUploadClient; + + @Before + public void setUp() { + multipartUploadClient = + MultipartUploadClient.create( + MultipartUploadSettings.of((HttpStorageOptions) injectedStorage.getOptions())); + } + + @Test + public void testMultipartUpload() throws IOException { + doTest(12 * _5MiB + 37); + } + + @Test + public void testMultipartUpload_parallel() throws Exception { + // This test is slow and resource-intensive. + long objectSize = 513 * 1024 * 1024 + 29; // 513 MiB + 29 bytes + int partSize = 8 * 1024 * 1024; // 8 MiB + + Path tempFile = Files.createTempFile("multipart-upload-it", ".bin"); + try { + createRandomFile(tempFile, objectSize); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + List completedParts = + parallelUpload(info, uploadId, tempFile, objectSize, partSize); + + completeMultipartUpload(info, uploadId, completedParts); + + Blob result = injectedStorage.get(info.getBlobId()); + assertThat(result).isNotNull(); + assertThat(result.getSize()).isEqualTo(objectSize); + + verifyContents(info, tempFile); + } finally { + Files.deleteIfExists(tempFile); + } + } + + @Test + public void testAbort() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + byte[] bytes = DataGenerator.rand(new Random()).genBytes(_5MiB); + + uploadPart(info, uploadId, 1, bytes); + abortMultipartUpload(info, uploadId); + + Blob blob = injectedStorage.get(info.getBlobId()); + assertThat(blob).isNull(); + } + + @Test + public void testMultipartUpload_singlePart() throws IOException { + doTest(_5MiB - 1); + } + + @Test + public void testMultipartUpload_zeroByteFile() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + byte[] bytes = new byte[0]; + + UploadPartResponse uploadPartResponse = uploadPart(info, uploadId, 1, bytes); + + List completedParts = new ArrayList<>(); + completedParts.add( + CompletedPart.builder().partNumber(1).eTag(uploadPartResponse.eTag()).build()); + + completeMultipartUpload(info, uploadId, completedParts); + + Blob result = injectedStorage.get(info.getBlobId()); + byte[] actual = injectedStorage.readAllBytes(info.getBlobId()); + + assertThat(result).isNotNull(); + assertThat(result.getSize()).isEqualTo(0); + assertBytesEqual(actual, new byte[0]); + } + + @Test + public void testComplete_noParts() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + List completedParts = new ArrayList<>(); + try { + completeMultipartUpload(info, uploadId, completedParts); + fail("Expected StorageException"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("MalformedCompleteMultipartUploadRequest"); + } finally { + // cleanup + abortMultipartUpload(info, uploadId); + } + } + + @Test + public void testListParts_afterAbort() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + byte[] bytes = DataGenerator.rand(new Random()).genBytes(_5MiB); + uploadPart(info, uploadId, 1, bytes); + + abortMultipartUpload(info, uploadId); + + try { + ListPartsRequest.Builder listPartsBuilder = + ListPartsRequest.builder() + .bucket(info.getBucket()) + .key(info.getName()) + .uploadId(uploadId); + multipartUploadClient.listParts(listPartsBuilder.build()); + fail("Expected StorageException"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("The requested upload was not found."); + } + } + + @Test + public void testComplete_wrongETag() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + Random rand = new Random(); + byte[] bytes1 = DataGenerator.rand(rand).genBytes(_5MiB); + UploadPartResponse uploadPartResponse1 = uploadPart(info, uploadId, 1, bytes1); + + byte[] bytes2 = DataGenerator.rand(rand).genBytes(_5MiB); + uploadPart(info, uploadId, 2, bytes2); + + List completedParts = new ArrayList<>(); + completedParts.add( + CompletedPart.builder().partNumber(1).eTag(uploadPartResponse1.eTag()).build()); + completedParts.add( + CompletedPart.builder().partNumber(2).eTag("\"dummytag\"").build()); // wrong etag + + try { + completeMultipartUpload(info, uploadId, completedParts); + fail("Expected StorageException"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("The requested upload part was not found."); + } finally { + abortMultipartUpload(info, uploadId); + } + } + + @Test + public void testUploadPart_withCustomChecksum() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + ChecksummedTestContent content = + ChecksummedTestContent.of("hello world".getBytes(StandardCharsets.UTF_8)); + + UploadPartRequest request = + UploadPartRequest.builder() + .bucket(info.getBucket()) + .key(info.getName()) + .uploadId(uploadId) + .partNumber(1) + .crc32c(content.getCrc32cBase64()) + .build(); + UploadPartResponse response = + multipartUploadClient.uploadPart(request, RequestBody.of(content.asByteBuffer())); + assertThat(response).isNotNull(); + assertThat(response.eTag()).isNotNull(); + + abortMultipartUpload(info, uploadId); + } + + @Test + public void testUploadPart_withCustomChecksum_fail() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + + ChecksummedTestContent content = + ChecksummedTestContent.of("hello world".getBytes(StandardCharsets.UTF_8)); + + UploadPartRequest request = + UploadPartRequest.builder() + .bucket(info.getBucket()) + .key(info.getName()) + .uploadId(uploadId) + .partNumber(1) + .crc32c("1234") // Invalid checksum + .build(); + try { + multipartUploadClient.uploadPart(request, RequestBody.of(content.asByteBuffer())); + fail("Expected StorageException"); + } catch (StorageException e) { + assertThat(e.getMessage()) + .contains("The CRC32C you specified did not match what we computed."); + } finally { + abortMultipartUpload(info, uploadId); + } + } + + private void doTest(int objectSizeBytes) throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + CreateMultipartUploadResponse createResponse = createMultipartUpload(info); + String uploadId = createResponse.uploadId(); + byte[] bytes = DataGenerator.rand(new Random()).genBytes(objectSizeBytes); + + List completedParts = new ArrayList<>(); + int partNumber = 1; + for (int i = 0; i < objectSizeBytes; i += _5MiB) { + int len = Math.min(_5MiB, objectSizeBytes - i); + byte[] partBuffer = java.util.Arrays.copyOfRange(bytes, i, i + len); + UploadPartResponse uploadPartResponse = uploadPart(info, uploadId, partNumber, partBuffer); + completedParts.add( + CompletedPart.builder().partNumber(partNumber).eTag(uploadPartResponse.eTag()).build()); + partNumber++; + } + completedParts.sort(Comparator.comparingInt(CompletedPart::partNumber)); + + ListPartsRequest.Builder listPartsBuilder = + ListPartsRequest.builder().bucket(info.getBucket()).key(info.getName()).uploadId(uploadId); + ListPartsResponse listPartsResponse = multipartUploadClient.listParts(listPartsBuilder.build()); + assertThat(listPartsResponse.parts()).hasSize(completedParts.size()); + + completeMultipartUpload(info, uploadId, completedParts); + + Blob result = injectedStorage.get(info.getBlobId()); + byte[] actual = injectedStorage.readAllBytes(info.getBlobId()); + + assertThat(result).isNotNull(); + assertBytesEqual(actual, bytes); + } + + private void assertBytesEqual(byte[] actual, byte[] expected) { + assertThat(actual).isEqualTo(expected); + assertThat(xxd(actual)).isEqualTo(xxd(expected)); + } + + private CreateMultipartUploadResponse createMultipartUpload(BlobInfo info) { + CreateMultipartUploadRequest createRequest = + CreateMultipartUploadRequest.builder().bucket(info.getBucket()).key(info.getName()).build(); + return multipartUploadClient.createMultipartUpload(createRequest); + } + + private UploadPartResponse uploadPart( + BlobInfo info, String uploadId, int partNumber, byte[] bytes) { + RequestBody body = RequestBody.of(ByteBuffer.wrap(bytes)); + return uploadPart(info, uploadId, partNumber, body); + } + + private UploadPartResponse uploadPart( + BlobInfo info, String uploadId, int partNumber, RequestBody body) { + UploadPartRequest uploadPartRequest = + UploadPartRequest.builder() + .partNumber(partNumber) + .uploadId(uploadId) + .bucket(info.getBucket()) + .key(info.getName()) + .build(); + return multipartUploadClient.uploadPart(uploadPartRequest, body); + } + + private void completeMultipartUpload(BlobInfo info, String uploadId, List parts) { + CompletedMultipartUpload completedMultipartUpload = + CompletedMultipartUpload.builder().parts(parts).build(); + CompleteMultipartUploadRequest completeRequest = + CompleteMultipartUploadRequest.builder() + .bucket(info.getBucket()) + .key(info.getName()) + .uploadId(uploadId) + .multipartUpload(completedMultipartUpload) + .build(); + multipartUploadClient.completeMultipartUpload(completeRequest); + } + + private void abortMultipartUpload(BlobInfo info, String uploadId) { + AbortMultipartUploadRequest abortRequest = + AbortMultipartUploadRequest.builder() + .bucket(info.getBucket()) + .key(info.getName()) + .uploadId(uploadId) + .build(); + multipartUploadClient.abortMultipartUpload(abortRequest); + } + + private void createRandomFile(Path path, long size) throws IOException { + try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(path))) { + byte[] buffer = new byte[1024 * 1024]; // 1MB buffer + Random random = new Random(); + for (long i = 0; i < size; i += buffer.length) { + random.nextBytes(buffer); + int len = (int) Math.min(buffer.length, size - i); + os.write(buffer, 0, len); + } + } + } + + private List parallelUpload( + BlobInfo info, String uploadId, Path localFile, long objectSize, int partSize) + throws ExecutionException, InterruptedException { + int numThreads = Runtime.getRuntime().availableProcessors(); + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + List> futures = new ArrayList<>(); + + long numParts = (objectSize + partSize - 1) / partSize; + + for (int i = 0; i < numParts; i++) { + final int partNumber = i + 1; + final long offset = (long) i * partSize; + final long len = Math.min(partSize, objectSize - offset); + + Callable uploadTask = + () -> { + ByteBuffer partBuffer = ByteBuffer.allocate((int) len); + try (FileChannel fileChannel = FileChannel.open(localFile, StandardOpenOption.READ)) { + fileChannel.read(partBuffer, offset); + } + partBuffer.flip(); + RequestBody partBody = RequestBody.of(partBuffer); + UploadPartResponse uploadPartResponse = + uploadPart(info, uploadId, partNumber, partBody); + return CompletedPart.builder() + .partNumber(partNumber) + .eTag(uploadPartResponse.eTag()) + .build(); + }; + futures.add(executor.submit(uploadTask)); + } + + List completedParts = new ArrayList<>(); + for (Future future : futures) { + completedParts.add(future.get()); + } + executor.shutdown(); + + completedParts.sort(Comparator.comparingInt(CompletedPart::partNumber)); + return completedParts; + } + + private void verifyContents(BlobInfo info, Path expectedFile) throws IOException { + try (ReadChannel reader = injectedStorage.reader(info.getBlobId()); + InputStream expectedStream = new BufferedInputStream(Files.newInputStream(expectedFile))) { + + ByteBuffer cloudBuffer = ByteBuffer.allocate(1024 * 1024); // 1MB buffer + + while (reader.read(cloudBuffer) > 0) { + cloudBuffer.flip(); + + byte[] actualBytes = new byte[cloudBuffer.remaining()]; + cloudBuffer.get(actualBytes); + + byte[] expectedBytes = new byte[actualBytes.length]; + int bytesRead = 0; + while (bytesRead < expectedBytes.length) { + int readResult = + expectedStream.read(expectedBytes, bytesRead, expectedBytes.length - bytesRead); + if (readResult == -1) { + break; + } + bytesRead += readResult; + } + + assertThat(bytesRead).isEqualTo(expectedBytes.length); + assertBytesEqual(actualBytes, expectedBytes); + cloudBuffer.clear(); + } + assertThat(expectedStream.read()).isEqualTo(-1); // Ensure we have read the whole local file + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNestedUpdateMaskTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNestedUpdateMaskTest.java new file mode 100644 index 000000000000..b40eb1d8e258 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNestedUpdateMaskTest.java @@ -0,0 +1,253 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.hashMapOf; +import static com.google.common.truth.Truth.assertThat; +import static java.util.Objects.requireNonNull; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ITNestedUpdateMaskTest.NestedUpdateMaskParametersProvider; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import java.util.Map; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; + +/** + * A set of tests to specifically test scenarios related to update handling of {@link + * BlobInfo#getMetadata()} and {@link BucketInfo#getLabels()} and the various permutations which can + * be used to add and remove keys. + */ +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP, Transport.GRPC}) +@Parameterized(NestedUpdateMaskParametersProvider.class) +public final class ITNestedUpdateMaskTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Parameter public Param param; + + public static final class NestedUpdateMaskParametersProvider implements ParametersProvider { + private static final Map empty = ImmutableMap.of(); + private static final Map k1a = ImmutableMap.of("k1", "a"); + private static final Map k2b = ImmutableMap.of("k2", "b"); + private static final Map k1z = ImmutableMap.of("k1", "z"); + private static final Map k1a_k2b = ImmutableMap.of("k1", "a", "k2", "b"); + private static final Map k1z_k2b = ImmutableMap.of("k1", "z", "k2", "b"); + private static final Map k1a_k2null = hashMapOf("k1", "a", "k2", null); + private static final Map k1null = hashMapOf("k1", null); + private static final Map k2null = hashMapOf("k2", null); + private static final Map k1null_k2null = hashMapOf("k1", null, "k2", null); + + /** + * + * + *

+     * | base                | update               | expected            |
+     * |---------------------|----------------------|---------------------|
+     * | null                | {"k1":"a"}           | {"k1":"a"}          |
+     * | {}                  | {"k1":"a"}           | {"k1":"a"}          |
+     * | {"k1":"a"}          | {"k1":"a","k2":"b"}  | {"k1":"a","k2":"b"} |
+     * | {"k1":"a"}          | {"k2":"b"}           | {"k1":"a","k2":"b"} |
+     * | {"k1":"a","k2":"b"} | {"k1":"z","k2":"b"}  | {"k1":"z","k2":"b"} |
+     * | {"k1":"a","k2":"b"} | {"k1":"z"}           | {"k1":"z","k2":"b"} |
+     * | {"k1":"a","k2":"b"} | {"k1":"a","k2":null} | {"k1":"a"}          |
+     * | {"k1":"a","k2":"b"} | {"k2":null}          | {"k1":"a"}          |
+     * | {"k1":"a"}          | {}                   | null                |
+     * | {"k1":"a"}          | {"k1":null}          | null                |
+     * | {"k1":"a","k2":"b"} | null                 | null                |
+     * | {"k1":"a","k2":"b"} | {"k1":null,"k2":null}| null                |
+     * 
+ */ + @Override + public ImmutableList parameters() { + return ImmutableList.of( + new Param("null to 1", null, k1a, k1a), + new Param("empty to 1", empty, k1a, k1a), + new Param("1 to 2 set", k1a, k1a_k2b, k1a_k2b), + new Param("1 to 2 add", k1a, k2b, k1a_k2b), + new Param("2 keys, modify 1 value (full)", k1a_k2b, k1z_k2b, k1z_k2b), + new Param("2 keys, modify 1 value (fine)", k1a_k2b, k1z, k1z_k2b), + new Param("2 keys, modify 1 null (full)", k1a_k2b, k1a_k2null, k1a), + new Param("2 keys, modify 1 null (fine)", k1a_k2b, k2null, k1a), + new Param("1 key, set empty", k1a, empty, null), + new Param("1 key, null key", k1a, k1null, null), + new Param("2 keys, set null (full)", k1a_k2b, null, null), + new Param("2 keys, set null (fine)", k1a_k2b, k1null_k2null, null)); + } + } + + @Test + public void testBucketLabels() throws Exception { + BucketInfo bucket = newBucketInfo(param.initial); + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder().setBucketInfo(bucket).setStorage(storage).build()) { + BucketInfo gen1 = tempB.getBucket(); + BucketInfo modified = gen1.toBuilder().setLabels(param.update).build(); + Bucket gen2 = storage.update(modified, BucketTargetOption.metagenerationMatch()); + assertThat(gen2.getLabels()).isEqualTo(param.expected); + } + } + + @Test + public void testBlobMetadata() { + BlobInfo blob = newBlobInfo(param.initial); + Blob gen1 = storage.create(blob, BlobTargetOption.doesNotExist()); + BlobInfo modified = gen1.toBuilder().setMetadata(param.update).build(); + Blob gen2 = storage.update(modified, BlobTargetOption.metagenerationMatch()); + assertThat(gen2.getMetadata()).isEqualTo(param.expected); + } + + @Test + public void testBlobContexts() { + ObjectContexts initial = contextsFromMap(param.initial); + ObjectContexts update = contextsFromMap(param.update); + ObjectContexts expected = contextsFromMap(param.expected); + + String blobName = generator.randomObjectName(); + BlobInfo.Builder builder = BlobInfo.newBuilder(bucket, blobName); + if (initial != null) { + builder.setContexts(initial); + } + BlobInfo info = builder.build(); + Blob gen1 = storage.create(info, BlobTargetOption.doesNotExist()); + + BlobInfo modified = gen1.toBuilder().setContexts(update).build(); + Blob gen2 = storage.update(modified, BlobTargetOption.metagenerationMatch()); + assertContextsWithEqualValues(gen2.getContexts(), expected); + } + + @Test + public void testBlob_metadataAndContext() { + ObjectContexts initial = contextsFromMap(param.initial); + ObjectContexts update = contextsFromMap(param.update); + ObjectContexts expected = contextsFromMap(param.expected); + + String blobName = generator.randomObjectName(); + BlobInfo.Builder builder = BlobInfo.newBuilder(bucket, blobName); + if (initial != null) { + builder.setContexts(initial); + } + if (param.initial != null) { + builder.setMetadata(param.initial); + } + + BlobInfo info = builder.build(); + Blob gen1 = storage.create(info, BlobTargetOption.doesNotExist()); + + BlobInfo modified = gen1.toBuilder().setContexts(update).setMetadata(param.update).build(); + Blob gen2 = storage.update(modified, BlobTargetOption.metagenerationMatch()); + assertContextsWithEqualValues(gen2.getContexts(), expected); + assertThat(gen2.getMetadata()).isEqualTo(param.expected); + } + + private static void assertContextsWithEqualValues( + @Nullable ObjectContexts actual, @Nullable ObjectContexts expected) { + if (expected != null && !expected.getCustom().isEmpty() && actual != null) { + Map actualCustom = actual.getCustom(); + Map expectedCustom = expected.getCustom(); + + Map actualValues = + Maps.transformValues(actualCustom, ObjectCustomContextPayload::getValue); + Map expectedValues = + Maps.transformValues(expectedCustom, ObjectCustomContextPayload::getValue); + + assertThat(actualValues).isEqualTo(expectedValues); + } else { + assertThat(actual).isEqualTo(expected); + } + } + + private BlobInfo newBlobInfo(Map metadata) { + String blobName = generator.randomObjectName(); + BlobInfo.Builder builder = BlobInfo.newBuilder(bucket, blobName); + if (metadata != null) { + builder.setMetadata(metadata); + } + return builder.build(); + } + + private BucketInfo newBucketInfo(Map metadata) { + BucketInfo.Builder builder = BucketInfo.newBuilder(generator.randomBucketName()); + if (metadata != null) { + builder.setLabels(metadata); + } + return builder.build(); + } + + private @Nullable ObjectContexts contextsFromMap( + @Nullable Map<@NonNull String, @Nullable String> m) { + if (m == null) { + return null; + } + Map<@NonNull String, ObjectCustomContextPayload> transformed = + Maps.transformValues( + m, v -> v == null ? null : ObjectCustomContextPayload.newBuilder().setValue(v).build()); + return ObjectContexts.newBuilder().setCustom(transformed).build(); + } + + private static final class Param { + private final String description; + @Nullable private final Map<@NonNull String, @Nullable String> initial; + @Nullable private final Map<@NonNull String, @Nullable String> update; + @Nullable private final Map<@NonNull String, @Nullable String> expected; + + private Param( + String description, + @Nullable Map<@NonNull String, @Nullable String> initial, + @Nullable Map<@NonNull String, @Nullable String> update, + @Nullable Map<@NonNull String, @Nullable String> expected) { + requireNonNull(description, "description must be non null"); + this.description = description; + this.initial = initial; + this.update = update; + this.expected = expected; + } + + @Override + public String toString() { + return description; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNotificationTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNotificationTest.java new file mode 100644 index 000000000000..5e4e2497cf77 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITNotificationTest.java @@ -0,0 +1,189 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.pubsub.v1.TopicAdminClient; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableMap; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.SetIamPolicyRequest; +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP}, + backends = {Backend.PROD}) +public class ITNotificationTest { + private static final Notification.PayloadFormat PAYLOAD_FORMAT = PayloadFormat.JSON_API_V1; + private static final Map CUSTOM_ATTRIBUTES = ImmutableMap.of("label1", "value1"); + private static final Logger LOGGER = LoggerFactory.getLogger(ITNotificationTest.class); + private static final String DOES_NOT_EXIST_ID = "something-that-does-not-exist-probably"; + + @Inject public Backend backend; + @Inject public Transport transport; + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private TopicAdminClient topicAdminClient; + private String topic; + private NotificationInfo notificationInfo; + + @Before + public void setup() throws IOException { + String projectId = storage.getOptions().getProjectId(); + // square brackets are not acceptable characters for topic names, replace them with dash + // https://cloud.google.com/pubsub/docs/admin#resource_names + String name = generator.randomObjectName().replaceAll("[\\[\\]]", "-"); + topic = String.format(Locale.US, "projects/%s/topics/%s", projectId, name).trim(); + notificationInfo = + NotificationInfo.newBuilder(topic) + .setCustomAttributes(CUSTOM_ATTRIBUTES) + .setPayloadFormat(PAYLOAD_FORMAT) + .build(); + + if (backend == Backend.PROD && transport == Transport.HTTP) { + + // Configure topic admin client for notification. + topicAdminClient = TopicAdminClient.create(); + topicAdminClient.createTopic(this.topic); + + GetIamPolicyRequest getIamPolicyRequest = + GetIamPolicyRequest.newBuilder().setResource(this.topic).build(); + + com.google.iam.v1.Policy policy = topicAdminClient.getIamPolicy(getIamPolicyRequest); + + Binding binding = + Binding.newBuilder().setRole("roles/owner").addMembers("allAuthenticatedUsers").build(); + + SetIamPolicyRequest setIamPolicyRequest = + SetIamPolicyRequest.newBuilder() + .setResource(this.topic) + .setPolicy(policy.toBuilder().addBindings(binding).build()) + .build(); + topicAdminClient.setIamPolicy(setIamPolicyRequest); + } + } + + @After + public void cleanup() { + /* Delete the Pub/Sub topic */ + if (topicAdminClient != null) { + try { + topicAdminClient.deleteTopic(topic); + topicAdminClient.close(); + } catch (Exception e) { + LOGGER.warn("Error while trying to delete topic and shutdown topic client", e); + } + topicAdminClient = null; + } + } + + @Test + public void listNotification_doesNotExist() throws Exception { + // create a temporary bucket to ensure we're immune from ordering on other tests + try (TemporaryBucket tempB = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.of(generator.randomBucketName())) + .setStorage(storage) + .build()) { + List notifications = storage.listNotifications(tempB.getBucket().getName()); + assertThat(notifications).isEmpty(); + } + } + + @Test + public void listNotification_exists() { + Notification notification = storage.createNotification(bucket.getName(), notificationInfo); + List notifications = storage.listNotifications(bucket.getName()); + assertThat(notifications).isNotEmpty(); + assertThat(notifications).contains(notification); + } + + @Test + public void createNotification_doesNotExist() throws Exception { + Notification notification = storage.createNotification(bucket.getName(), notificationInfo); + assertAll( + () -> assertThat(notification.getNotificationId()).isNotNull(), + () -> assertThat(notification.getCustomAttributes()).isEqualTo(CUSTOM_ATTRIBUTES), + () -> assertThat(notification.getPayloadFormat()).isEqualTo(PAYLOAD_FORMAT), + () -> assertThat(notification.getTopic()).contains(topic)); + } + + @Test + public void getNotification_exists() throws Exception { + Notification notification = storage.createNotification(bucket.getName(), notificationInfo); + + Notification getResult = + storage.getNotification(bucket.getName(), notification.getNotificationId()); + + assertAll( + () -> assertThat(getResult.getNotificationId()).isEqualTo(notification.getNotificationId()), + () -> assertThat(getResult.getTopic()).isEqualTo(notification.getTopic()), + () -> assertThat(getResult.getEtag()).isEqualTo(notification.getEtag()), + () -> assertThat(getResult.getEventTypes()).isEqualTo(notification.getEventTypes()), + () -> assertThat(getResult.getPayloadFormat()).isEqualTo(notification.getPayloadFormat()), + () -> + assertThat(getResult.getCustomAttributes()) + .isEqualTo(notification.getCustomAttributes()), + () -> assertThat(getResult).isEqualTo(notification)); + } + + @Test + public void getNotification_doesNotExists() { + Notification getResult = storage.getNotification(bucket.getName(), DOES_NOT_EXIST_ID); + + assertThat(getResult).isNull(); + } + + @Test + public void deleteNotification_exists() { + Notification notification = storage.createNotification(bucket.getName(), notificationInfo); + boolean deleteResult = + storage.deleteNotification(bucket.getName(), notification.getNotificationId()); + assertThat(deleteResult).isTrue(); + } + + @Test + public void deleteNotification_doesNotExists() { + boolean deleteResult = storage.deleteNotification(bucket.getName(), DOES_NOT_EXIST_ID); + assertThat(deleteResult).isFalse(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectAclTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectAclTest.java new file mode 100644 index 000000000000..bc395698757d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectAclTest.java @@ -0,0 +1,338 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.retry429s; +import static com.google.cloud.storage.it.ITAccessTest.dropEtag; +import static com.google.cloud.storage.it.ITAccessTest.dropEtags; +import static com.google.cloud.storage.it.ITAccessTest.hasProjectRole; +import static com.google.cloud.storage.it.ITAccessTest.hasRole; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +@ParallelFriendly +public final class ITObjectAclTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Inject public BucketInfo bucketInfo; + + @Inject public ObjectsFixture objectsFixture; + + private BlobId idWith404Bucket; + + private static final Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + + @Before + public void setUp() throws Exception { + String bucketName = bucketInfo.getName() + "x"; + idWith404Bucket = BlobId.of(bucketName, "does-not-matter"); + } + + /** When a bucket doesn't exist, return null for the acl value */ + @Test + public void object_acl_404_bucket_get() { + Acl acl = retry429s(() -> storage.getAcl(idWith404Bucket, User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + @Test + public void object_acl_404_bucket_list() { + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.listAcls(idWith404Bucket), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_bucket_create() { + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.createAcl(idWith404Bucket, readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_bucket_update() { + StorageException storageException = + assertThrows( + StorageException.class, + () -> retry429s(() -> storage.updateAcl(idWith404Bucket, readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_bucket_delete() { + boolean actual = + retry429s(() -> storage.deleteAcl(idWith404Bucket, User.ofAllUsers()), storage); + + assertThat(actual).isFalse(); + } + + /** When a blob doesn't exist, return null for the acl value */ + @Test + public void object_acl_404_object_get() { + BlobId id = tmpId(); + Acl acl = retry429s(() -> storage.getAcl(id, User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + @Test + public void object_acl_404_object_list() { + BlobId id = tmpId(); + StorageException storageException = + assertThrows(StorageException.class, () -> retry429s(() -> storage.listAcls(id), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_object_create() { + BlobId id = tmpId(); + StorageException storageException = + assertThrows( + StorageException.class, () -> retry429s(() -> storage.createAcl(id, readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_object_update() { + BlobId id = tmpId(); + StorageException storageException = + assertThrows( + StorageException.class, () -> retry429s(() -> storage.updateAcl(id, readAll), storage)); + + assertThat(storageException.getCode()).isEqualTo(404); + } + + @Test + public void object_acl_404_object_delete() { + BlobId id = tmpId(); + // todo: json non-idempotent? + boolean actual = retry429s(() -> storage.deleteAcl(id, User.ofAllUsers()), storage); + + assertThat(actual).isFalse(); + } + + /** When an object does exist, but an acl for the specified entity is not defined return null */ + @Test + public void object_acl_404_acl_get() { + BlobId id = tmpId(); + Acl acl = retry429s(() -> storage.getAcl(id, User.ofAllUsers()), storage); + + assertThat(acl).isNull(); + } + + /** Update of an acl that doesn't exist should create it */ + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void object_acl_404_acl_update() { + Blob mgen1 = tmpObject(); + + Acl actual = + // todo: json non-idempotent? + retry429s(() -> storage.updateAcl(mgen1.getBlobId(), readAll), storage); + + assertThat(dropEtag(actual)).isEqualTo(readAll); + } + + @Test + public void object_acl_404_acl_delete() { + Blob mgen1 = tmpObject(); + + boolean actual = + // todo: json non-idempotent? + retry429s(() -> storage.deleteAcl(mgen1.getBlobId(), User.ofAllUsers()), storage); + + assertThat(actual).isFalse(); + } + + @Test + public void object_acl_200_get() { + // lookup an entity from the blob which is known to exist + BlobId id = objectsFixture.getInfo1().getBlobId(); + Blob withAcls = storage.get(id, BlobGetOption.fields(BlobField.ACL)); + + Acl expected = withAcls.getAcl().iterator().next(); + + Acl acl = retry429s(() -> storage.getAcl(id, expected.getEntity()), storage); + + assertThat(acl).isEqualTo(expected); + } + + @Test + public void object_acl_200_list() { + BlobId id = objectsFixture.getInfo1().getBlobId(); + // lookup an entity from the blob which is known to exist + Blob withAcls = storage.get(id, BlobGetOption.fields(BlobField.ACL)); + + Acl expected = withAcls.getAcl().iterator().next(); + + List acls = retry429s(() -> storage.listAcls(id), storage); + + assertThat(acls).contains(expected); + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void object_acl_200_create() { + Blob mgen1 = tmpObject(); + + Acl readAll = Acl.of(User.ofAllAuthenticatedUsers(), Role.READER); + Acl actual = retry429s(() -> storage.createAcl(mgen1.getBlobId(), readAll), storage); + + assertThat(actual.getEntity()).isEqualTo(readAll.getEntity()); + assertThat(actual.getRole()).isEqualTo(readAll.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Blob updated = storage.get(mgen1.getBlobId(), BlobGetOption.fields(BlobField.values())); + assertThat(updated.getMetageneration()).isNotEqualTo(mgen1.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = dropEtags(mgen1.getAcl()); + List actualAcls = dropEtags(updated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).contains(readAll); + } + + @Test + public void object_acl_200_update() { + Blob mgen1 = tmpObject(); + + List acls = mgen1.getAcl(); + assertThat(acls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.EDITORS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectEditorAsOwner = + acls.stream().filter(hasRole(Role.OWNER).and(isProjectEditor)).findFirst().get(); + + // lower the privileges of project editors to writer from owner + Entity entity = projectEditorAsOwner.getEntity(); + Acl projectEditorAsReader = Acl.of(entity, Role.READER); + + Acl actual = + retry429s(() -> storage.updateAcl(mgen1.getBlobId(), projectEditorAsReader), storage); + + assertThat(actual.getEntity()).isEqualTo(projectEditorAsReader.getEntity()); + assertThat(actual.getRole()).isEqualTo(projectEditorAsReader.getRole()); + assertThat(actual.getEtag()).isNotEmpty(); + + Blob updated = storage.get(mgen1.getBlobId(), BlobGetOption.fields(BlobField.values())); + assertThat(updated.getMetageneration()).isNotEqualTo(mgen1.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = + dropEtags( + mgen1.getAcl().stream().filter(isProjectEditor.negate()).collect(Collectors.toList())); + List actualAcls = dropEtags(updated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).doesNotContain(projectEditorAsOwner); + assertThat(actualAcls).contains(projectEditorAsReader); + } + + @Test + public void object_acl_200_delete() { + Blob mgen1 = tmpObject(); + + List acls = mgen1.getAcl(); + assertThat(acls).isNotEmpty(); + + Predicate isProjectEditor = hasProjectRole(ProjectRole.EDITORS); + + //noinspection OptionalGetWithoutIsPresent + Acl projectEditorAsOwner = + acls.stream().filter(hasRole(Role.OWNER).and(isProjectEditor)).findFirst().get(); + + // lower the privileges of project editors to writer from owner + Entity entity = projectEditorAsOwner.getEntity(); + + boolean actual = retry429s(() -> storage.deleteAcl(mgen1.getBlobId(), entity), storage); + + assertThat(actual).isTrue(); + + Blob updated = storage.get(mgen1.getBlobId(), BlobGetOption.fields(BlobField.values())); + assertThat(updated.getMetageneration()).isNotEqualTo(mgen1.getMetageneration()); + + // etags change when updates happen, drop before our comparison + List expectedAcls = + dropEtags( + mgen1.getAcl().stream().filter(isProjectEditor.negate()).collect(Collectors.toList())); + List actualAcls = dropEtags(updated.getAcl()); + assertThat(actualAcls).containsAtLeastElementsIn(expectedAcls); + assertThat(actualAcls).doesNotContain(projectEditorAsOwner); + Optional search = + actualAcls.stream().map(Acl::getEntity).filter(e -> e.equals(entity)).findAny(); + assertThat(search.isPresent()).isFalse(); + } + + private BlobId tmpId() { + return BlobId.of(bucketInfo.getName(), generator.randomObjectName()); + } + + private Blob tmpObject() { + BlobId id = tmpId(); + BlobInfo info = BlobInfo.newBuilder(id).build(); + return storage.create(info, BlobTargetOption.doesNotExist()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectChecksumSupportTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectChecksumSupportTest.java new file mode 100644 index 000000000000..5153ea127417 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectChecksumSupportTest.java @@ -0,0 +1,424 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TmpDir; +import com.google.cloud.storage.TmpFile; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ITObjectChecksumSupportTest.ChecksummedTestContentProvider; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = Backend.PROD) +@Parameterized(ChecksummedTestContentProvider.class) +public final class ITObjectChecksumSupportTest { + + private static final Path tmpDir = Paths.get(System.getProperty("java.io.tmpdir")); + + @Inject public Generator generator; + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + + @Inject public Transport transport; + + @Parameter public ChecksummedTestContent content; + + @Rule public final TestName testName = new TestName(); + + public static final class ChecksummedTestContentProvider implements ParametersProvider { + + @Override + public ImmutableList parameters() { + DataGenerator gen = DataGenerator.base64Characters(); + int _256KiB = 256 * 1024; + int _2MiB = 2 * 1024 * 1024; + int _24MiB = 24 * 1024 * 1024; + + return ImmutableList.of( + // empty object content + ChecksummedTestContent.of(new byte[0]), + // small, single message single stream when resumable + ChecksummedTestContent.of(gen.genBytes(15)), + // med, multiple messages single stream when resumable + ChecksummedTestContent.of(gen.genBytes(_2MiB + 3)), + // large, multiple messages and multiple streams when resumable + ChecksummedTestContent.of(gen.genBytes(_24MiB + 5)), + // quantum aligned number of bytes + ChecksummedTestContent.of(gen.genBytes(_2MiB * 8 + _256KiB))); + } + } + + @Test + public void testCrc32cValidated_createFrom_expectFailure() { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.concat('x'); + StorageException expected = + assertThrows( + StorageException.class, + () -> + storage.createFrom( + blobInfo, + new ByteArrayInputStream(bytes), + BlobWriteOption.doesNotExist(), + BlobWriteOption.crc32cMatch())); + assertThat(expected.getCode()).isEqualTo(400); + } + + @Test + public void testCrc32cValidated_createFrom_expectSuccess() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.getBytes(); + Blob blob = + storage.createFrom( + blobInfo, + new ByteArrayInputStream(bytes), + BlobWriteOption.doesNotExist(), + BlobWriteOption.crc32cMatch()); + assertThat(blob.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + + @Test + public void testCrc32cValidated_createFrom_path_expectFailure() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + try (TmpFile tmpFile = TmpFile.of(tmpDir, "prefix", "bin")) { + try (SeekableByteChannel writer = tmpFile.writer()) { + writer.write(ByteBuffer.wrap(content.concat('x'))); + } + StorageException expected = + assertThrows( + StorageException.class, + () -> + storage.createFrom( + blobInfo, + tmpFile.getPath(), + BlobWriteOption.doesNotExist(), + BlobWriteOption.crc32cMatch())); + assertThat(expected.getCode()).isEqualTo(400); + } + } + + @Test + public void testCrc32cValidated_createFrom_path_expectSuccess() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + try (TmpFile tmpFile = TmpFile.of(tmpDir, "prefix", "bin")) { + try (SeekableByteChannel writer = tmpFile.writer()) { + writer.write(ByteBuffer.wrap(content.getBytes())); + } + + Blob blob = + storage.createFrom( + blobInfo, + tmpFile.getPath(), + BlobWriteOption.doesNotExist(), + BlobWriteOption.crc32cMatch()); + assertThat(blob.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + } + + @Test + public void testCrc32cValidated_writer_expectFailure() { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.concat('x'); + StorageException expected = + assertThrows( + StorageException.class, + () -> { + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WriteChannel dst = + storage.writer( + blobInfo, + BlobWriteOption.doesNotExist(), + BlobWriteOption.crc32cMatch())) { + ByteStreams.copy(src, dst); + } + }); + assertThat(expected.getCode()).isEqualTo(400); + } + + @Test + public void testCrc32cValidated_writer_expectSuccess() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.getBytes(); + + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WriteChannel dst = + storage.writer( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.crc32cMatch())) { + ByteStreams.copy(src, dst); + } + + Blob blob = storage.get(blobId); + assertThat(blob.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + + @Test + public void testMd5Validated_createFrom_expectFailure() { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setMd5(content.getMd5Base64()).build(); + + byte[] bytes = content.concat('x'); + StorageException expected = + assertThrows( + StorageException.class, + () -> + storage.createFrom( + blobInfo, + new ByteArrayInputStream(bytes), + BlobWriteOption.doesNotExist(), + BlobWriteOption.md5Match())); + assertThat(expected.getCode()).isEqualTo(400); + } + + @Test + public void testMd5Validated_createFrom_expectSuccess() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setMd5(content.getMd5Base64()).build(); + + byte[] bytes = content.getBytes(); + Blob blob = + storage.createFrom( + blobInfo, + new ByteArrayInputStream(bytes), + BlobWriteOption.doesNotExist(), + BlobWriteOption.md5Match()); + assertThat(blob.getMd5()).isEqualTo(content.getMd5Base64()); + } + + @Test + public void testMd5Validated_writer_expectFailure() { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setMd5(content.getMd5Base64()).build(); + + byte[] bytes = content.concat('x'); + StorageException expected = + assertThrows( + StorageException.class, + () -> { + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WriteChannel dst = + storage.writer( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.md5Match())) { + ByteStreams.copy(src, dst); + } + }); + assertThat(expected.getCode()).isEqualTo(400); + } + + @Test + public void testMd5Validated_writer_expectSuccess() throws IOException { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setMd5(content.getMd5Base64()).build(); + + byte[] bytes = content.getBytes(); + + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WriteChannel dst = + storage.writer(blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.md5Match())) { + ByteStreams.copy(src, dst); + } + + Blob blob = storage.get(blobId); + assertThat(blob.getMd5()).isEqualTo(content.getMd5Base64()); + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void testCrc32cValidated_bidiWrite_expectSuccess() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.getBytes(); + + StorageOptions optionsWithBidi = + this.storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()) + .build(); + + try (Storage storage = optionsWithBidi.getService()) { + BlobWriteSession session = + storage.blobWriteSession( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.crc32cMatch()); + + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WritableByteChannel dst = session.open()) { + ByteStreams.copy(src, dst); + } + + BlobInfo gen1 = session.getResult().get(5, TimeUnit.SECONDS); + assertThat(gen1.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void testCrc32cValidated_bidiWrite_expectFailure() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.concat('x'); + + StorageOptions optionsWithBidi = + this.storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()) + .build(); + + try (Storage storage = optionsWithBidi.getService()) { + BlobWriteSession session = + storage.blobWriteSession( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.crc32cMatch()); + + WritableByteChannel dst = session.open(); + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes))) { + ByteStreams.copy(src, dst); + } + + StorageException expected = assertThrows(StorageException.class, dst::close); + + assertThat(expected.getCode()).isEqualTo(400); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void testCrc32cValidated_journaling_expectSuccess() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.getBytes(); + + try (TmpDir journalingDir = TmpDir.of(tmpDir, testName.getMethodName())) { + StorageOptions options = + this.storage.getOptions().toBuilder() + .setBlobWriteSessionConfig( + BlobWriteSessionConfigs.journaling(ImmutableList.of(journalingDir.getPath()))) + .build(); + + try (Storage storage = options.getService()) { + BlobWriteSession session = + storage.blobWriteSession( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.crc32cMatch()); + + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes)); + WritableByteChannel dst = session.open()) { + ByteStreams.copy(src, dst); + } + + BlobInfo gen1 = session.getResult().get(5, TimeUnit.SECONDS); + assertThat(gen1.getCrc32c()).isEqualTo(content.getCrc32cBase64()); + } + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void testCrc32cValidated_journaling_expectFailure() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setCrc32c(content.getCrc32cBase64()).build(); + + byte[] bytes = content.concat('x'); + + try (TmpDir journalingDir = TmpDir.of(tmpDir, generator.randomObjectName())) { + StorageOptions options = + this.storage.getOptions().toBuilder() + .setBlobWriteSessionConfig( + BlobWriteSessionConfigs.journaling(ImmutableList.of(journalingDir.getPath()))) + .build(); + + try (Storage storage = options.getService()) { + BlobWriteSession session = + storage.blobWriteSession( + blobInfo, BlobWriteOption.doesNotExist(), BlobWriteOption.crc32cMatch()); + + WritableByteChannel dst = session.open(); + try (ReadableByteChannel src = Channels.newChannel(new ByteArrayInputStream(bytes))) { + ByteStreams.copy(src, dst); + } + + StorageException expected = assertThrows(StorageException.class, dst::close); + + assertThat(expected.getCode()).isEqualTo(400); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java new file mode 100644 index 000000000000..83a7aeb5a912 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java @@ -0,0 +1,1595 @@ +/* + * Copyright 2015 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.api.gax.paging.Page; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.PackagePrivateMethodWorkarounds; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.CrossRun.Exclude; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.common.collect.Iterators; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.common.io.ByteStreams; +import com.google.common.primitives.Ints; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.file.Paths; +import java.security.Key; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import javax.crypto.spec.SecretKeySpec; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +@ParallelFriendly +public class ITObjectTest { + + private static final String CONTENT_TYPE = "text/plain"; + private static final byte[] BLOB_BYTE_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + private static final String BLOB_STRING_CONTENT_CRC32C = + BaseEncoding.base64() + .encode( + Ints.toByteArray( + Hashing.crc32c().hashBytes(BLOB_STRING_CONTENT.getBytes(UTF_8)).asInt())); + private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA="; + private static final String OTHER_BASE64_KEY = "IcOIQGlliNr5pr3vJb63l+XMqc7NjXqjfw/deBoNxPA="; + private static final Key KEY = + new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256"); + + private static final Long RETENTION_PERIOD = 5L; + private static final Long RETENTION_PERIOD_IN_MILLISECONDS = RETENTION_PERIOD * 1000; + + @Inject public Generator generator; + + @Inject + @BucketFixture(BucketType.DEFAULT) + public BucketInfo bucket; + + @Inject + @BucketFixture(BucketType.REQUESTER_PAYS) + public BucketInfo requesterPaysBucket; + + @Inject + @BucketFixture(BucketType.VERSIONED) + public BucketInfo versionedBucket; + + @Inject public Storage storage; + + @Test + public void testCreateBlob() { + String blobName = generator.randomObjectName(); + BlobInfo blob = + BlobInfo.newBuilder(bucket, blobName).setCustomTime(System.currentTimeMillis()).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT, BlobTargetOption.doesNotExist()); + assertNotNull(remoteBlob); + assertNotNull(remoteBlob.getCustomTime()); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + assertTrue(remoteBlob.delete()); + } + + @Test + public void testCreateBlobMd5Crc32cFromHexString() { + String blobName = generator.randomObjectName(); + BlobInfo blob = + BlobInfo.newBuilder(bucket, blobName) + .setContentType(CONTENT_TYPE) + .setMd5FromHexString("3b54781b51c94835084898e821899585") + .setCrc32cFromHexString("f4ddc43d") + .build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT, BlobTargetOption.doesNotExist()); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertEquals(blob.getMd5ToHexString(), remoteBlob.getMd5ToHexString()); + assertEquals(blob.getCrc32cToHexString(), remoteBlob.getCrc32cToHexString()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + assertTrue(remoteBlob.delete()); + } + + @Test + public void testCreateGetBlobWithEncryptionKey() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = + storage.create( + blob, + BLOB_BYTE_CONTENT, + BlobTargetOption.encryptionKey(KEY), + BlobTargetOption.doesNotExist()); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + byte[] readBytes = + storage.readAllBytes( + bucket.getName(), blobName, BlobSourceOption.decryptionKey(BASE64_KEY)); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + remoteBlob = + storage.get( + blob.getBlobId(), + BlobGetOption.decryptionKey(BASE64_KEY), + BlobGetOption.fields(BlobField.CRC32C, BlobField.MD5HASH)); + assertNotNull(remoteBlob.getCrc32c()); + assertNotNull(remoteBlob.getMd5()); + } + + @Test + public void testCreateEmptyBlob() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob, BlobTargetOption.doesNotExist()); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertArrayEquals(new byte[0], readBytes); + } + + @Test + public void testZeroByteFileUpload() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + File zeroByteFile = File.createTempFile("zerobyte", null); + zeroByteFile.deleteOnExit(); + + storage.createFrom( + blobInfo, Paths.get(zeroByteFile.getAbsolutePath()), BlobWriteOption.doesNotExist()); + + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertArrayEquals(new byte[0], readBytes); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobStream() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).setContentType(CONTENT_TYPE).build(); + ByteArrayInputStream stream = new ByteArrayInputStream(BLOB_STRING_CONTENT.getBytes(UTF_8)); + Blob remoteBlob = storage.create(blob, stream, BlobWriteOption.doesNotExist()); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertEquals(blob.getContentType(), remoteBlob.getContentType()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertEquals(BLOB_STRING_CONTENT, new String(readBytes, UTF_8)); + } + + @Test + @SuppressWarnings({"unchecked", "deprecation"}) + public void testCreateBlobStreamDisableGzipContent() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).setContentType(CONTENT_TYPE).build(); + ByteArrayInputStream stream = new ByteArrayInputStream(BLOB_STRING_CONTENT.getBytes(UTF_8)); + Blob remoteBlob = + storage.create( + blob, stream, BlobWriteOption.disableGzipContent(), BlobWriteOption.doesNotExist()); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertEquals(blob.getContentType(), remoteBlob.getContentType()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), blobName); + assertEquals(BLOB_STRING_CONTENT, new String(readBytes, UTF_8)); + } + + @Test + public void testCreateBlobFail() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob, BlobTargetOption.doesNotExist()); + assertNotNull(remoteBlob); + BlobInfo wrongGenerationBlob = BlobInfo.newBuilder(bucket, blobName, -1L).build(); + try { + storage.create(wrongGenerationBlob, BLOB_BYTE_CONTENT, BlobTargetOption.generationMatch()); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + } + + @Test + public void testGetBlobEmptySelectedFields() { + + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).setContentType(CONTENT_TYPE).build(); + assertNotNull(storage.create(blob)); + Blob remoteBlob = storage.get(blob.getBlobId(), BlobGetOption.fields()); + assertEquals(blob.getBlobId(), remoteBlob.getBlobId()); + assertNull(remoteBlob.getContentType()); + } + + @Test + public void testGetBlobSelectedFields() { + + String blobName = generator.randomObjectName(); + BlobInfo blob = + BlobInfo.newBuilder(bucket, blobName) + .setContentType(CONTENT_TYPE) + .setMetadata(ImmutableMap.of("k", "v")) + .build(); + assertNotNull(storage.create(blob)); + Blob remoteBlob = storage.get(blob.getBlobId(), BlobGetOption.fields(BlobField.METADATA)); + assertEquals(blob.getBlobId(), remoteBlob.getBlobId()); + assertEquals(ImmutableMap.of("k", "v"), remoteBlob.getMetadata()); + assertNull(remoteBlob.getContentType()); + } + + @Test + public void testGetBlobAllSelectedFields() { + + String blobName = generator.randomObjectName(); + BlobInfo blob = + BlobInfo.newBuilder(bucket, blobName) + .setContentType(CONTENT_TYPE) + .setMetadata(ImmutableMap.of("k", "v")) + .build(); + assertNotNull(storage.create(blob)); + Blob remoteBlob = storage.get(blob.getBlobId(), BlobGetOption.fields(BlobField.values())); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + assertEquals(ImmutableMap.of("k", "v"), remoteBlob.getMetadata()); + } + + @Test + public void testGetBlobFail() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + BlobId wrongGenerationBlob = BlobId.of(bucket.getName(), blobName); + try { + storage.get(wrongGenerationBlob, BlobGetOption.generationMatch(-1)); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + } + + @Test + public void testGetBlobFailNonExistingGeneration() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + BlobId wrongGenerationBlob = BlobId.of(bucket.getName(), blobName, -1L); + try { + assertNull(storage.get(wrongGenerationBlob)); + fail("Expected an 'Invalid argument' exception"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("Invalid argument"); + } + } + + @Test + public void testListBlobsSelectedFields() { + String baseName = generator.randomObjectName(); + + String name1 = baseName + "1"; + String name2 = baseName + "2"; + + ImmutableMap metadata = ImmutableMap.of("k", "v"); + BlobInfo blob1 = BlobInfo.newBuilder(bucket, name1).setMetadata(metadata).build(); + BlobInfo blob2 = BlobInfo.newBuilder(bucket, name2).setMetadata(metadata).build(); + Blob remoteBlob1 = storage.create(blob1); + Blob remoteBlob2 = storage.create(blob2); + + ImmutableSet> expected = + Stream.of(remoteBlob1, remoteBlob2) + .map(BlobInfo::getMetadata) + .collect(ImmutableSet.toImmutableSet()); + + Page page = + storage.list( + bucket.getName(), + Storage.BlobListOption.prefix(baseName), + Storage.BlobListOption.fields(BlobField.METADATA)); + + ImmutableSet blobs = ImmutableSet.copyOf(page.iterateAll()); + + ImmutableSet> actual = + blobs.stream().map(BlobInfo::getMetadata).collect(ImmutableSet.toImmutableSet()); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void getBlobReturnNullOn404() { + String bucketName = bucket.getName(); + String objectName = generator.randomObjectName() + "__d_o_e_s__n_o_t__e_x_i_s_t__"; + BlobId id = BlobId.of(bucketName, objectName); + Blob blob = storage.get(id); + assertThat(blob).isNull(); + } + + @Test + public void testListBlobRequesterPays() throws InterruptedException { + String projectId = storage.getOptions().getProjectId(); + + String prefix = generator.randomObjectName(); + BlobInfo blobInfo1 = + BlobInfo.newBuilder(requesterPaysBucket.getName(), prefix + "1") + .setContentType(CONTENT_TYPE) + .build(); + Blob blob1 = storage.create(blobInfo1, BlobTargetOption.userProject(projectId)); + assertNotNull(blob1); + + // Test listing a Requester Pays bucket. + Bucket remoteBucket = + storage.get( + requesterPaysBucket.getName(), + BucketGetOption.fields(BucketField.ID, BucketField.BILLING), + BucketGetOption.userProject(projectId)); + + assertTrue(remoteBucket.requesterPays()); + // TODO: split to own test which modifies a temp bucket + // Bucket updatedBucket = storage.update(remoteBucket); + // assertTrue(updatedBucket.requesterPays()); + try { + storage.list( + requesterPaysBucket.getName(), + BlobListOption.prefix(prefix), + BlobListOption.fields(), + BlobListOption.userProject("fakeBillingProjectId")); + fail("Expected bad user project error."); + } catch (StorageException e) { + assertTrue(e.getMessage().contains("User project specified in the request is invalid")); + } + + Page page = + storage.list( + requesterPaysBucket.getName(), + BlobListOption.prefix(prefix), + BlobListOption.userProject(projectId)); + List blobs = + StreamSupport.stream(page.iterateAll().spliterator(), false) + .map(PackagePrivateMethodWorkarounds::noAcl) + .collect(ImmutableList.toImmutableList()); + // gRPC and json have differing defaults on projections b/258835631 + assertThat(blobs).contains(PackagePrivateMethodWorkarounds.noAcl(blob1)); + } + + @Test + public void testListBlobsVersioned() throws ExecutionException, InterruptedException { + String bucketName = versionedBucket.getName(); + String baseName = generator.randomObjectName(); + String[] blobNames = {baseName + "-blob1", baseName + "-blob2"}; + BlobInfo blob1 = + BlobInfo.newBuilder(versionedBucket, blobNames[0]).setContentType(CONTENT_TYPE).build(); + BlobInfo blob2 = + BlobInfo.newBuilder(versionedBucket, blobNames[1]).setContentType(CONTENT_TYPE).build(); + Blob remoteBlob1 = storage.create(blob1); + Blob remoteBlob2 = storage.create(blob2); + Blob remoteBlob3 = storage.create(blob2); + assertNotNull(remoteBlob1); + assertNotNull(remoteBlob2); + assertNotNull(remoteBlob3); + Page page = + storage.list( + bucketName, BlobListOption.prefix(baseName + "-blob"), BlobListOption.versions(true)); + // https://cloud.google.com/storage/docs/consistency#strongly_consistent_operations + // enabling versioning on an existing bucket seems to have some backpressure on when new + // versions can safely be made, but listing is not eventually consistent. + + // TODO: make hermetic + // Listing blobs is eventually consistent, we loop until the list is of the expected size. The + // test fails if timeout is reached. + while (Iterators.size(page.iterateAll().iterator()) != 3) { + Thread.sleep(500); + page = + storage.list( + bucketName, BlobListOption.prefix(baseName + "-blob"), BlobListOption.versions(true)); + } + Set blobSet = ImmutableSet.of(blobNames[0], blobNames[1]); + Iterator iterator = page.iterateAll().iterator(); + while (iterator.hasNext()) { + Blob remoteBlob = iterator.next(); + assertEquals(bucketName, remoteBlob.getBucket()); + assertTrue(blobSet.contains(remoteBlob.getName())); + assertNotNull(remoteBlob.getGeneration()); + } + } + + @Test + public void testListBlobsWithOffset() throws Exception { + String bucketName = bucket.getName(); + String baseName = generator.randomObjectName(); + + List blobs = + IntStream.rangeClosed(0, 2) + .mapToObj(i -> baseName + "-" + i) + .map(n -> BlobInfo.newBuilder(bucket, n).build()) + .map(info -> storage.create(info, BlobTargetOption.doesNotExist())) + .map(BlobInfo::getBlobId) + .collect(Collectors.toList()); + + assertAll( + () -> { + // Listing blobs without BlobListOptions. + Page page1 = storage.list(bucketName, BlobListOption.prefix(baseName)); + assertThat( + page1 + .streamAll() + .map(BlobInfo::getBlobId) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())) + .isEqualTo( + blobs.stream() + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())); + }, + () -> { + // Listing blobs starting from 1. + Page page2 = + storage.list( + bucketName, + BlobListOption.prefix(baseName), + BlobListOption.startOffset(blobs.get(1).getName())); + assertThat( + page2 + .streamAll() + .map(BlobInfo::getBlobId) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())) + .isEqualTo( + blobs.stream() + .skip(1) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())); + }, + () -> { + // Listing blobs until 2. + Page page3 = + storage.list( + bucketName, + BlobListOption.prefix(baseName), + BlobListOption.endOffset(blobs.get(2).getName())); + assertThat( + page3 + .streamAll() + .map(BlobInfo::getBlobId) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())) + .isEqualTo( + blobs.stream() + .limit(2) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())); + }, + () -> { + // Listing blobs with startOffset and endOffset. + Page page4 = + storage.list( + bucketName, + BlobListOption.prefix(baseName), + BlobListOption.startOffset(blobs.get(1).getName()), + BlobListOption.endOffset(blobs.get(2).getName())); + assertThat( + page4 + .streamAll() + .map(BlobInfo::getBlobId) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())) + .isEqualTo( + blobs.stream() + .skip(1) + .limit(1) + .map(BlobId::toGsUtilUriWithGeneration) + .collect(Collectors.toList())); + }); + } + + @Test + public void testListBlobsCurrentDirectoryIncludesBothObjectsAndSyntheticDirectories() { + String bucketName = bucket.getName(); + String directoryName = generator.randomObjectName(); + String subdirectoryName = "subdirectory"; + + String uriSubDir = + String.format(Locale.US, "gs://%s/%s/%s/", bucketName, directoryName, subdirectoryName); + String uri1 = + String.format( + Locale.US, "gs://%s/%s/%s/blob1", bucketName, directoryName, subdirectoryName); + String uri2 = String.format(Locale.US, "gs://%s/%s/blob2", bucketName, directoryName); + + BlobId id1 = BlobId.fromGsUtilUri(uri1); + BlobId id2 = BlobId.fromGsUtilUri(uri2); + BlobId idSubDir = BlobId.fromGsUtilUri(uriSubDir); + + BlobInfo blob1 = BlobInfo.newBuilder(id1).build(); + BlobInfo blob2 = BlobInfo.newBuilder(id2).build(); + BlobInfo obj1Gen1 = storage.create(blob1, BLOB_BYTE_CONTENT).asBlobInfo(); + BlobInfo obj2Gen1 = storage.create(blob2, BLOB_BYTE_CONTENT).asBlobInfo(); + + Page page1 = + storage.list( + bucketName, + BlobListOption.prefix(directoryName + "/"), + BlobListOption.currentDirectory()); + + ImmutableList blobs = ImmutableList.copyOf(page1.iterateAll()); + + ImmutableSet actual = + blobs.stream() + .map(Blob::asBlobInfo) + .map(PackagePrivateMethodWorkarounds::noAcl) + .collect(ImmutableSet.toImmutableSet()); + + // obj1Gen1 is "in subdirectory" and we don't expect to receive it as a result when listing + // object in "the current directory" + assertThat(actual).doesNotContain(obj1Gen1); + + // make sure one of the results we received is the "subdirectory" blob1 is "in" + Optional first = actual.stream().filter(BlobInfo::isDirectory).findFirst(); + assertThat(first.isPresent()).isTrue(); + assertThat(first.get().getBlobId()).isEqualTo(idSubDir); + + assertThat(actual).contains(PackagePrivateMethodWorkarounds.noAcl(obj2Gen1)); + } + + @Test + public void testListBlobsWithMatchGlob() throws Exception { + assertNotNull(storage.create(BlobInfo.newBuilder(bucket, "foo/bar").build())); + assertNotNull(storage.create(BlobInfo.newBuilder(bucket, "foo/baz").build())); + assertNotNull(storage.create(BlobInfo.newBuilder(bucket, "foo/foobar").build())); + assertNotNull(storage.create(BlobInfo.newBuilder(bucket, "foobar").build())); + + Page page1 = storage.list(bucket.getName(), BlobListOption.matchGlob("foo*bar")); + Page page2 = storage.list(bucket.getName(), BlobListOption.matchGlob("foo**bar")); + Page page3 = storage.list(bucket.getName(), BlobListOption.matchGlob("**/foobar")); + Page page4 = storage.list(bucket.getName(), BlobListOption.matchGlob("*/ba[rz]")); + Page page5 = storage.list(bucket.getName(), BlobListOption.matchGlob("*/ba[!a-y]")); + Page page6 = storage.list(bucket.getName(), BlobListOption.matchGlob("**/{foobar,baz}")); + Page page7 = storage.list(bucket.getName(), BlobListOption.matchGlob("foo/{foo*,*baz}")); + assertAll( + () -> + assertThat(Iterables.transform(page1.iterateAll(), BlobInfo::getName)) + .containsExactly("foobar") + .inOrder(), + () -> + assertThat(Iterables.transform(page2.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/bar", "foo/foobar", "foobar") + .inOrder(), + () -> + assertThat(Iterables.transform(page3.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/foobar", "foobar") + .inOrder(), + () -> + assertThat(Iterables.transform(page4.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/bar", "foo/baz") + .inOrder(), + () -> + assertThat(Iterables.transform(page5.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/baz") + .inOrder(), + () -> + assertThat(Iterables.transform(page6.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/baz", "foo/foobar", "foobar") + .inOrder(), + () -> + assertThat(Iterables.transform(page7.iterateAll(), BlobInfo::getName)) + .containsExactly("foo/baz", "foo/foobar") + .inOrder()); + } + + @Test + public void testListBlobsMultiplePages() { + String basePath = generator.randomObjectName(); + + ImmutableList expected = + IntStream.rangeClosed(1, 10) + .mapToObj(i -> String.format(Locale.US, "%s/%2d", basePath, i)) + .map(name -> BlobInfo.newBuilder(bucket, name).build()) + .map(info -> storage.create(info, BlobTargetOption.doesNotExist())) + .map(PackagePrivateMethodWorkarounds::noAcl) + .collect(ImmutableList.toImmutableList()); + + Page page = + storage.list(bucket.getName(), BlobListOption.prefix(basePath), BlobListOption.pageSize(3)); + + ImmutableList actual = + ImmutableList.copyOf(page.iterateAll()).stream() + .map(PackagePrivateMethodWorkarounds::noAcl) + .collect(ImmutableList.toImmutableList()); + + try { + assertThat(actual).isEqualTo(expected); + } finally { + // delete all the objects we created + expected.stream().map(BlobInfo::getBlobId).forEach(storage::delete); + } + } + + @Test + public void testUpdateBlob() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + Blob updatedBlob = + remoteBlob.toBuilder() + .setContentType(CONTENT_TYPE) + .build() + .update(BlobTargetOption.metagenerationMatch()); + assertNotNull(updatedBlob); + assertEquals(blob.getName(), updatedBlob.getName()); + assertEquals(blob.getBucket(), updatedBlob.getBucket()); + assertEquals(CONTENT_TYPE, updatedBlob.getContentType()); + } + + @Test + public void testUpdateBlobFail() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + BlobInfo wrongGenerationBlob = + BlobInfo.newBuilder(bucket, blobName, -1L).setContentType(CONTENT_TYPE).build(); + try { + storage.update( + wrongGenerationBlob, + BlobTargetOption.metagenerationMatch(remoteBlob.getMetageneration())); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + } + + @Test + public void testDeleteNonExistingBlob() { + String blobName = generator.randomObjectName(); + assertFalse(storage.delete(bucket.getName(), blobName)); + } + + @Test + public void testDeleteBlobNonExistingGeneration() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + assertNotNull(storage.create(blob)); + try { + assertFalse(storage.delete(BlobId.of(bucket.getName(), blobName, -1L))); + fail("Expected an 'Invalid argument' exception"); + } catch (StorageException e) { + assertThat(e.getMessage()).contains("Invalid argument"); + } + } + + @Test + public void testDeleteBlobFail() { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Blob remoteBlob = storage.create(blob); + assertNotNull(remoteBlob); + try { + storage.delete(bucket.getName(), blob.getName(), BlobSourceOption.generationMatch(-1L)); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + assertTrue(remoteBlob.delete()); + } + + @Test + public void testComposeBlob() { + String baseName = generator.randomObjectName(); + String sourceBlobName1 = baseName + "-1"; + String sourceBlobName2 = baseName + "-2"; + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + Blob remoteSourceBlob1 = storage.create(sourceBlob1, BLOB_BYTE_CONTENT); + Blob remoteSourceBlob2 = storage.create(sourceBlob2, BLOB_BYTE_CONTENT); + assertNotNull(remoteSourceBlob1); + assertNotNull(remoteSourceBlob2); + String targetBlobName = baseName + "-target"; + BlobInfo targetBlob = BlobInfo.newBuilder(bucket, targetBlobName).build(); + ComposeRequest req = + ComposeRequest.of(ImmutableList.of(sourceBlobName1, sourceBlobName2), targetBlob); + Blob remoteTargetBlob = storage.compose(req); + assertNotNull(remoteTargetBlob); + assertEquals(targetBlob.getName(), remoteTargetBlob.getName()); + assertEquals(targetBlob.getBucket(), remoteTargetBlob.getBucket()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), targetBlobName); + byte[] composedBytes = Arrays.copyOf(BLOB_BYTE_CONTENT, BLOB_BYTE_CONTENT.length * 2); + System.arraycopy( + BLOB_BYTE_CONTENT, 0, composedBytes, BLOB_BYTE_CONTENT.length, BLOB_BYTE_CONTENT.length); + assertArrayEquals(composedBytes, readBytes); + } + + @Test + public void testComposeBlobWithContentType() { + String baseName = generator.randomObjectName(); + String sourceBlobName1 = baseName + "-source-1"; + String sourceBlobName2 = baseName + "-source-2"; + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + Blob remoteSourceBlob1 = storage.create(sourceBlob1, BLOB_BYTE_CONTENT); + Blob remoteSourceBlob2 = storage.create(sourceBlob2, BLOB_BYTE_CONTENT); + assertNotNull(remoteSourceBlob1); + assertNotNull(remoteSourceBlob2); + String targetBlobName = baseName + "-target"; + BlobInfo targetBlob = + BlobInfo.newBuilder(bucket, targetBlobName).setContentType(CONTENT_TYPE).build(); + ComposeRequest req = + ComposeRequest.of(ImmutableList.of(sourceBlobName1, sourceBlobName2), targetBlob); + Blob remoteTargetBlob = storage.compose(req); + assertNotNull(remoteTargetBlob); + assertEquals(targetBlob.getName(), remoteTargetBlob.getName()); + assertEquals(targetBlob.getBucket(), remoteTargetBlob.getBucket()); + assertEquals(CONTENT_TYPE, remoteTargetBlob.getContentType()); + byte[] readBytes = storage.readAllBytes(bucket.getName(), targetBlobName); + byte[] composedBytes = Arrays.copyOf(BLOB_BYTE_CONTENT, BLOB_BYTE_CONTENT.length * 2); + System.arraycopy( + BLOB_BYTE_CONTENT, 0, composedBytes, BLOB_BYTE_CONTENT.length, BLOB_BYTE_CONTENT.length); + assertArrayEquals(composedBytes, readBytes); + } + + @Test + public void testComposeBlobFail() { + String baseName = generator.randomObjectName(); + String sourceBlobName1 = baseName + "-source-1"; + String sourceBlobName2 = baseName + "-source-2"; + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + Blob remoteSourceBlob1 = storage.create(sourceBlob1); + Blob remoteSourceBlob2 = storage.create(sourceBlob2); + assertNotNull(remoteSourceBlob1); + assertNotNull(remoteSourceBlob2); + String targetBlobName = baseName + "-target"; + BlobInfo targetBlob = BlobInfo.newBuilder(bucket, targetBlobName).build(); + ComposeRequest req = + ComposeRequest.newBuilder() + .addSource(sourceBlobName1, -1L) + .addSource(sourceBlobName2, -1L) + .setTarget(targetBlob) + .build(); + try { + storage.compose(req); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + } + + @Test + public void testCopyBlob() { + + String sourceBlobName = generator.randomObjectName() + "-source"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + BlobInfo blob = + BlobInfo.newBuilder(source).setContentType(CONTENT_TYPE).setMetadata(metadata).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + String targetBlobName = generator.randomObjectName() + "-target"; + CopyRequest req = CopyRequest.of(source, BlobId.of(bucket.getName(), targetBlobName)); + CopyWriter copyWriter = storage.copy(req); + Blob gen1 = copyWriter.getResult(); + assertEquals(bucket.getName(), gen1.getBucket()); + assertEquals(targetBlobName, gen1.getName()); + assertEquals(CONTENT_TYPE, gen1.getContentType()); + assertEquals(metadata, gen1.getMetadata()); + assertTrue(copyWriter.isDone()); + assertTrue(remoteBlob.delete()); + assertTrue(storage.delete(gen1.getBlobId())); + } + + @Test + public void copyBlob_classChange_multipleChunks() { + + String sourceBlobName = generator.randomObjectName() + "-source"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + BlobInfo blob = BlobInfo.newBuilder(source).setMetadata(metadata).build(); + int _5MiB = 5 * 1024 * 1024; + byte[] bytes = DataGenerator.base64Characters().genBytes(_5MiB); + Blob remoteBlob = storage.create(blob, bytes); + assertThat(remoteBlob).isNotNull(); + String targetBlobName = generator.randomObjectName() + "-target"; + CopyRequest req = + CopyRequest.newBuilder() + .setSource(source) + .setTarget( + BlobInfo.newBuilder(bucket, targetBlobName) + // change the storage class to force GCS to copy bytes + .setStorageClass(StorageClass.NEARLINE) + .build(), + BlobTargetOption.doesNotExist()) + .setMegabytesCopiedPerChunk(2L) + .build(); + CopyWriter copyWriter = storage.copy(req); + BlobInfo remoteBlob2 = copyWriter.getResult(); + assertThat(copyWriter.isDone()).isTrue(); + assertThat(remoteBlob2).isNotNull(); + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void testCopyBlobWithPredefinedAcl() { + + String sourceBlobName = generator.randomObjectName() + "-source"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + BlobInfo blob = + BlobInfo.newBuilder(source).setContentType(CONTENT_TYPE).setMetadata(metadata).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + String targetBlobName = generator.randomObjectName() + "-target"; + CopyRequest req = + CopyRequest.newBuilder() + .setSource(source) + .setTarget( + BlobId.of(bucket.getName(), targetBlobName), + BlobTargetOption.predefinedAcl(PredefinedAcl.PUBLIC_READ), + BlobTargetOption.doesNotExist()) + .build(); + CopyWriter copyWriter = storage.copy(req); + Blob gen1 = copyWriter.getResult(); + assertEquals(bucket.getName(), gen1.getBucket()); + assertEquals(targetBlobName, gen1.getName()); + assertEquals(CONTENT_TYPE, gen1.getContentType()); + assertEquals(metadata, gen1.getMetadata()); + assertNotNull(gen1.getAcl(User.ofAllUsers())); + assertTrue(copyWriter.isDone()); + assertTrue(remoteBlob.delete()); + assertTrue(storage.delete(gen1.getBlobId())); + } + + @Test + public void testCopyBlobWithEncryptionKeys() { + + String sourceBlobName = generator.randomObjectName() + "-source"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + ImmutableMap metadata = ImmutableMap.of("k", "v"); + Blob remoteBlob = + storage.create( + BlobInfo.newBuilder(source).build(), + BLOB_BYTE_CONTENT, + BlobTargetOption.encryptionKey(KEY)); + assertNotNull(remoteBlob); + String targetBlobName = generator.randomObjectName() + "-target"; + BlobInfo target = + BlobInfo.newBuilder(bucket, targetBlobName) + .setContentType(CONTENT_TYPE) + .setMetadata(metadata) + .build(); + CopyRequest req1 = + CopyRequest.newBuilder() + .setSource(source) + .setTarget( + target, + BlobTargetOption.encryptionKey(OTHER_BASE64_KEY), + BlobTargetOption.doesNotExist()) + .setSourceOptions(BlobSourceOption.decryptionKey(BASE64_KEY)) + .build(); + CopyWriter copyWriter1 = storage.copy(req1); + Blob copy1Gen1 = copyWriter1.getResult(); + assertEquals(bucket.getName(), copy1Gen1.getBucket()); + assertEquals(targetBlobName, copy1Gen1.getName()); + assertEquals(CONTENT_TYPE, copy1Gen1.getContentType()); + assertArrayEquals( + BLOB_BYTE_CONTENT, + copy1Gen1.getContent(Blob.BlobSourceOption.decryptionKey(OTHER_BASE64_KEY))); + assertEquals(metadata, copy1Gen1.getMetadata()); + assertTrue(copyWriter1.isDone()); + CopyRequest req2 = + CopyRequest.newBuilder() + .setSource(source) + .setTarget(target) + .setSourceOptions(BlobSourceOption.decryptionKey(BASE64_KEY)) + .build(); + CopyWriter copyWriter2 = storage.copy(req2); + Blob copy2Gen1 = copyWriter2.getResult(); + assertEquals(bucket.getName(), copy2Gen1.getBucket()); + assertEquals(targetBlobName, copy2Gen1.getName()); + assertEquals(CONTENT_TYPE, copy2Gen1.getContentType()); + assertArrayEquals(BLOB_BYTE_CONTENT, copy2Gen1.getContent()); + assertEquals(metadata, copy2Gen1.getMetadata()); + assertTrue(copyWriter2.isDone()); + assertTrue(remoteBlob.delete()); + assertTrue(storage.delete(copy2Gen1.getBlobId())); + } + + @Test + public void testCopyBlobUpdateMetadata() { + + String sourceBlobName = generator.randomObjectName() + "-source"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName); + Blob remoteSourceBlob = storage.create(BlobInfo.newBuilder(source).build(), BLOB_BYTE_CONTENT); + assertNotNull(remoteSourceBlob); + String targetBlobName = generator.randomObjectName() + "-target"; + ImmutableMap metadata = ImmutableMap.of("k", "v"); + BlobInfo target = + BlobInfo.newBuilder(bucket, targetBlobName) + .setContentType(CONTENT_TYPE) + .setMetadata(metadata) + .build(); + CopyRequest req = + CopyRequest.newBuilder() + .setSource(source) + .setTarget(target, BlobTargetOption.doesNotExist()) + .build(); + CopyWriter copyWriter = storage.copy(req); + Blob gen1 = copyWriter.getResult(); + assertEquals(bucket.getName(), gen1.getBucket()); + assertEquals(targetBlobName, gen1.getName()); + assertEquals(CONTENT_TYPE, gen1.getContentType()); + assertEquals(metadata, gen1.getMetadata()); + assertTrue(copyWriter.isDone()); + assertTrue(remoteSourceBlob.delete()); + assertTrue(storage.delete(gen1.getBlobId())); + } + + @Test + public void testCopyBlobFail() { + + String baseName = generator.randomObjectName(); + String sourceBlobName = baseName + "-source-fail"; + BlobId source = BlobId.of(bucket.getName(), sourceBlobName, -1L); + Blob remoteSourceBlob = storage.create(BlobInfo.newBuilder(source).build(), BLOB_BYTE_CONTENT); + assertNotNull(remoteSourceBlob); + String targetBlobName = baseName + "-target-fail"; + BlobInfo target = + BlobInfo.newBuilder(bucket, targetBlobName).setContentType(CONTENT_TYPE).build(); + CopyRequest req = + CopyRequest.newBuilder() + .setSource(bucket.getName(), sourceBlobName) + .setSourceOptions(BlobSourceOption.generationMatch(-1L)) + .setTarget(target, BlobTargetOption.doesNotExist()) + .build(); + try { + storage.copy(req); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + CopyRequest req2 = + CopyRequest.newBuilder() + .setSource(source) + .setSourceOptions(BlobSourceOption.generationMatch()) + .setTarget(target) + .build(); + try { + storage.copy(req2); + fail("StorageException was expected"); + } catch (StorageException ex) { + // expected + } + } + + @Test + public void testReadAndWriteChannelWithEncryptionKey() throws IOException { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + byte[] stringBytes; + try (WriteChannel writer = + storage.writer( + blob, BlobWriteOption.encryptionKey(BASE64_KEY), BlobWriteOption.doesNotExist())) { + stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); + writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); + writer.write(ByteBuffer.wrap(stringBytes)); + } + ByteBuffer readBytes; + ByteBuffer readStringBytes; + try (ReadChannel reader = + storage.reader(blob.getBlobId(), BlobSourceOption.decryptionKey(KEY))) { + readBytes = ByteBuffer.allocate(BLOB_BYTE_CONTENT.length); + readStringBytes = ByteBuffer.allocate(stringBytes.length); + reader.read(readBytes); + reader.read(readStringBytes); + } + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes.array()); + assertEquals(BLOB_STRING_CONTENT, new String(readStringBytes.array(), UTF_8)); + assertTrue(storage.delete(bucket.getName(), blobName)); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_0B() throws IOException { + doTestReadAndWriteChannelsWithSize(0); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_700B() throws IOException { + doTestReadAndWriteChannelsWithSize(700); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_8193B() throws IOException { + doTestReadAndWriteChannelsWithSize(4 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_256KiB() throws IOException { + doTestReadAndWriteChannelsWithSize(256 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_2MiB() throws IOException { + doTestReadAndWriteChannelsWithSize(2 * 1024 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_4MiB() throws IOException { + doTestReadAndWriteChannelsWithSize(4 * 1024 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_4MiB_plus1() throws IOException { + doTestReadAndWriteChannelsWithSize((4 * 1024 * 1024) + 1); + } + + private void doTestReadAndWriteChannelsWithSize(int blobSize) throws IOException { + String blobName = String.format(Locale.US, "%s-%d", generator.randomObjectName(), blobSize); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + Random rnd = new Random(); + byte[] bytes = new byte[blobSize]; + rnd.nextBytes(bytes); + try (WriteChannel writer = storage.writer(blob, BlobWriteOption.doesNotExist())) { + writer.write(ByteBuffer.wrap(bytes)); + } + ByteArrayOutputStream output = new ByteArrayOutputStream(); + try (ReadChannel reader = storage.reader(blob.getBlobId())) { + ByteStreams.copy(reader, Channels.newChannel(output)); + } + byte[] actual = output.toByteArray(); + assertThat(actual).isEqualTo(bytes); + assertTrue(storage.delete(bucket.getName(), blobName)); + } + + @Test + // Capture not implemented yet + @Exclude(transports = Transport.GRPC) + public void testReadAndWriteCaptureChannels() throws IOException { + + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); + byte[] stringBytes; + WriteChannel writer = storage.writer(blob, BlobWriteOption.doesNotExist()); + stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); + writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); + RestorableState writerState = writer.capture(); + WriteChannel secondWriter = writerState.restore(); + secondWriter.write(ByteBuffer.wrap(stringBytes)); + secondWriter.close(); + ByteBuffer readBytes; + ByteBuffer readStringBytes; + ReadChannel reader = storage.reader(blob.getBlobId()); + reader.setChunkSize(BLOB_BYTE_CONTENT.length); + readBytes = ByteBuffer.allocate(BLOB_BYTE_CONTENT.length); + reader.read(readBytes); + RestorableState readerState = reader.capture(); + ReadChannel secondReader = readerState.restore(); + readStringBytes = ByteBuffer.allocate(stringBytes.length); + secondReader.read(readStringBytes); + reader.close(); + secondReader.close(); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes.array()); + assertEquals(BLOB_STRING_CONTENT, new String(readStringBytes.array(), UTF_8)); + assertTrue(storage.delete(bucket.getName(), blobName)); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testGetBlobs() { + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + assertNotNull(storage.create(sourceBlob1)); + assertNotNull(storage.create(sourceBlob2)); + List remoteBlobs = storage.get(sourceBlob1.getBlobId(), sourceBlob2.getBlobId()); + assertEquals(sourceBlob1.getBucket(), remoteBlobs.get(0).getBucket()); + assertEquals(sourceBlob1.getName(), remoteBlobs.get(0).getName()); + assertEquals(sourceBlob2.getBucket(), remoteBlobs.get(1).getBucket()); + assertEquals(sourceBlob2.getName(), remoteBlobs.get(1).getName()); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testGetBlobsFail() { + + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + assertNotNull(storage.create(sourceBlob1)); + List remoteBlobs = storage.get(sourceBlob1.getBlobId(), sourceBlob2.getBlobId()); + assertEquals(sourceBlob1.getBucket(), remoteBlobs.get(0).getBucket()); + assertEquals(sourceBlob1.getName(), remoteBlobs.get(0).getName()); + assertNull(remoteBlobs.get(1)); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testDeleteBlobs() { + + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + assertNotNull(storage.create(sourceBlob1)); + assertNotNull(storage.create(sourceBlob2)); + List deleteStatus = storage.delete(sourceBlob1.getBlobId(), sourceBlob2.getBlobId()); + assertTrue(deleteStatus.get(0)); + assertTrue(deleteStatus.get(1)); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testDeleteBlobsFail() { + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + assertNotNull(storage.create(sourceBlob1)); + List deleteStatus = storage.delete(sourceBlob1.getBlobId(), sourceBlob2.getBlobId()); + assertTrue(deleteStatus.get(0)); + assertFalse(deleteStatus.get(1)); + } + + @Test + public void testDeleteBlob() { + String sourceBlobName = generator.randomObjectName(); + BlobInfo sourceBlob = BlobInfo.newBuilder(bucket, sourceBlobName).build(); + assertNotNull(storage.create(sourceBlob)); + boolean result = storage.delete(sourceBlob.getBlobId()); + assertTrue(result); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testUpdateBlobs() { + + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + Blob remoteBlob1 = storage.create(sourceBlob1); + Blob remoteBlob2 = storage.create(sourceBlob2); + assertNotNull(remoteBlob1); + assertNotNull(remoteBlob2); + List updatedBlobs = + storage.update( + remoteBlob1.toBuilder().setContentType(CONTENT_TYPE).build(), + remoteBlob2.toBuilder().setContentType(CONTENT_TYPE).build()); + assertEquals(sourceBlob1.getBucket(), updatedBlobs.get(0).getBucket()); + assertEquals(sourceBlob1.getName(), updatedBlobs.get(0).getName()); + assertEquals(CONTENT_TYPE, updatedBlobs.get(0).getContentType()); + assertEquals(sourceBlob2.getBucket(), updatedBlobs.get(1).getBucket()); + assertEquals(sourceBlob2.getName(), updatedBlobs.get(1).getName()); + assertEquals(CONTENT_TYPE, updatedBlobs.get(1).getContentType()); + } + + @Test + // Only supported in JSON right now + @Exclude(transports = Transport.GRPC) + public void testUpdateBlobsFail() { + + String sourceBlobName1 = generator.randomObjectName(); + String sourceBlobName2 = generator.randomObjectName(); + BlobInfo sourceBlob1 = BlobInfo.newBuilder(bucket, sourceBlobName1).build(); + BlobInfo sourceBlob2 = BlobInfo.newBuilder(bucket, sourceBlobName2).build(); + BlobInfo remoteBlob1 = storage.create(sourceBlob1); + assertNotNull(remoteBlob1); + List updatedBlobs = + storage.update( + remoteBlob1.toBuilder().setContentType(CONTENT_TYPE).build(), + sourceBlob2.toBuilder().setContentType(CONTENT_TYPE).build()); + assertEquals(sourceBlob1.getBucket(), updatedBlobs.get(0).getBucket()); + assertEquals(sourceBlob1.getName(), updatedBlobs.get(0).getName()); + assertEquals(CONTENT_TYPE, updatedBlobs.get(0).getContentType()); + assertNull(updatedBlobs.get(1)); + } + + @Test + public void testAttemptObjectDeleteWithRetentionPolicy() + throws ExecutionException, InterruptedException { + String bucketName = generator.randomBucketName(); + Bucket remoteBucket = + storage.create( + BucketInfo.newBuilder(bucketName).setRetentionPeriod(RETENTION_PERIOD).build()); + assertEquals(RETENTION_PERIOD, remoteBucket.getRetentionPeriod()); + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blobInfo); + assertNotNull(remoteBlob.getRetentionExpirationTime()); + try { + remoteBlob.delete(); + fail("Expected failure on delete from retentionPolicy"); + } catch (StorageException ex) { + // expected + } finally { + Thread.sleep(RETENTION_PERIOD_IN_MILLISECONDS); + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testEnableDisableTemporaryHold() { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).setTemporaryHold(true).build(); + Blob remoteBlob = storage.create(blobInfo, BlobTargetOption.doesNotExist()); + assertTrue(remoteBlob.getTemporaryHold()); + remoteBlob = + storage.get( + remoteBlob.getBlobId(), + BlobGetOption.fields(BlobField.TEMPORARY_HOLD, BlobField.METAGENERATION)); + assertTrue(remoteBlob.getTemporaryHold()); + remoteBlob = + remoteBlob.toBuilder() + .setTemporaryHold(false) + .build() + .update(BlobTargetOption.metagenerationMatch()); + assertFalse(remoteBlob.getTemporaryHold()); + } + + @Test + public void testAttemptObjectDeleteWithEventBasedHold() { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).setEventBasedHold(true).build(); + Blob remoteBlob = storage.create(blobInfo, BlobTargetOption.doesNotExist()); + assertTrue(remoteBlob.getEventBasedHold()); + try { + remoteBlob.delete(); + fail("Expected failure on delete from eventBasedHold"); + } catch (StorageException ex) { + // expected + } finally { + remoteBlob.toBuilder().setEventBasedHold(false).build().update(); + } + } + + @Test + public void testAttemptDeletionObjectTemporaryHold() { + String blobName = generator.randomObjectName(); + BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).setTemporaryHold(true).build(); + Blob remoteBlob = storage.create(blobInfo, BlobTargetOption.doesNotExist()); + assertTrue(remoteBlob.getTemporaryHold()); + try { + remoteBlob.delete(); + fail("Expected failure on delete from temporaryHold"); + } catch (StorageException ex) { + // expected + } finally { + remoteBlob.toBuilder().setTemporaryHold(false).build().update(); + } + } + + @Test + public void testBlobReload() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + Blob blob = storage.create(blobInfo, new byte[] {0, 1, 2}, BlobTargetOption.doesNotExist()); + + Blob blobUnchanged = blob.reload(); + // gRPC and json have differing defaults on projections b/258835631 + assertThat(blobUnchanged).isAnyOf(blob, PackagePrivateMethodWorkarounds.noAcl(blob)); + + blob.writer().close(); + try { + blob.reload(Blob.BlobSourceOption.generationMatch()); + fail("StorageException was expected"); + } catch (StorageException e) { + assertEquals(412, e.getCode()); + } + + Blob updated = blob.reload(); + assertEquals(blob.getBucket(), updated.getBucket()); + assertEquals(blob.getName(), updated.getName()); + assertNotEquals(blob.getGeneration(), updated.getGeneration()); + assertEquals(new Long(0), updated.getSize()); + + updated.delete(); + assertNull(updated.reload()); + } + + @Test + public void testUploadWithEncryption() throws Exception { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + ByteArrayInputStream content = new ByteArrayInputStream(BLOB_BYTE_CONTENT); + Blob blob = + storage.createFrom( + blobInfo, content, BlobWriteOption.encryptionKey(KEY), BlobWriteOption.doesNotExist()); + + try { + blob.getContent(); + fail("StorageException was expected"); + } catch (StorageException e) { + String expectedMessage = + "The target object is encrypted by a customer-supplied encryption key."; + assertTrue(e.getMessage().contains(expectedMessage)); + assertEquals(400, e.getCode()); + } + byte[] readBytes = blob.getContent(Blob.BlobSourceOption.decryptionKey(KEY)); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } + + private Blob createBlob(String method, BlobInfo blobInfo, boolean detectType) throws IOException { + switch (method) { + case "create": + return detectType + ? storage.create( + blobInfo, BlobTargetOption.detectContentType(), BlobTargetOption.doesNotExist()) + : storage.create(blobInfo, BlobTargetOption.doesNotExist()); + case "createFrom": + InputStream inputStream = new ByteArrayInputStream(BLOB_BYTE_CONTENT); + return detectType + ? storage.createFrom( + blobInfo, + inputStream, + BlobWriteOption.detectContentType(), + BlobWriteOption.doesNotExist()) + : storage.createFrom(blobInfo, inputStream, BlobWriteOption.doesNotExist()); + case "writer": + if (detectType) { + storage + .writer(blobInfo, BlobWriteOption.detectContentType(), BlobWriteOption.doesNotExist()) + .close(); + } else { + storage.writer(blobInfo, BlobWriteOption.doesNotExist()).close(); + } + return storage.get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); + default: + throw new IllegalArgumentException("Unknown method " + method); + } + } + + @Test + public void testBlobTimeStorageClassUpdated() { + + String blobName = generator.randomObjectName(); + StorageClass storageClass = StorageClass.COLDLINE; + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).setStorageClass(storageClass).build(); + Blob remoteBlob = storage.create(blob, BlobTargetOption.doesNotExist()); + assertThat(remoteBlob).isNotNull(); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertThat(remoteBlob.getName()).isEqualTo(blob.getName()); + assertThat(remoteBlob.getCreateTime()).isNotNull(); + assertThat(remoteBlob.getUpdateTime()).isEqualTo(remoteBlob.getCreateTime()); + assertThat(remoteBlob.getTimeStorageClassUpdated()).isEqualTo(remoteBlob.getCreateTime()); + + // We can't change an object's storage class directly, the only way is to rewrite the object + // with the desired storage class. + BlobId blobId = BlobId.of(bucket.getName(), blobName); + CopyRequest request = + CopyRequest.newBuilder() + .setSource(blobId) + .setTarget( + BlobInfo.newBuilder(blobId).setStorageClass(StorageClass.STANDARD).build(), + BlobTargetOption.generationMatch(remoteBlob.getGeneration())) + .build(); + Blob updatedBlob1 = storage.copy(request).getResult(); + assertThat(updatedBlob1.getTimeStorageClassUpdated()).isNotNull(); + assertThat(updatedBlob1.getCreateTime()).isGreaterThan(remoteBlob.getCreateTime()); + assertThat(updatedBlob1.getUpdateTime()).isGreaterThan(remoteBlob.getCreateTime()); + assertThat(updatedBlob1.getTimeStorageClassUpdated()) + .isGreaterThan(remoteBlob.getTimeStorageClassUpdated()); + + // Updates the other properties of the blob's to check the difference between blob updateTime + // and timeStorageClassUpdated. + Blob updatedBlob2 = + updatedBlob1.toBuilder() + .setContentType(CONTENT_TYPE) + .build() + .update(BlobTargetOption.metagenerationMatch()); + assertThat(updatedBlob2.getUpdateTime()) + .isGreaterThan(updatedBlob2.getTimeStorageClassUpdated()); + assertThat(updatedBlob2.getTimeStorageClassUpdated()) + .isEqualTo(updatedBlob1.getTimeStorageClassUpdated()); + assertThat(updatedBlob2.delete()).isTrue(); + } + + @Test + public void testUpdateBlob_noModification() { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + + // in grpc, create will return acls but update does not. re-get the metadata with default fields + Blob gen1 = storage.create(info, BlobTargetOption.doesNotExist()); + gen1 = storage.get(gen1.getBlobId()); + Blob gen2 = storage.update(gen1, BlobTargetOption.metagenerationMatch()); + assertThat(gen2).isEqualTo(gen1); + } + + @Test + public void blob_update() throws Exception { + ImmutableMap<@NonNull String, @NonNull String> meta1 = ImmutableMap.of("k1", "v1"); + ImmutableMap<@NonNull String, @NonNull String> meta2 = ImmutableMap.of("k1", "v2"); + ImmutableMap<@NonNull String, @NonNull String> meta3 = ImmutableMap.of("k1", "v1", "k2", "n1"); + + String randomObjectName = generator.randomObjectName(); + BlobInfo info1 = + BlobInfo.newBuilder(versionedBucket, randomObjectName).setMetadata(meta1).build(); + BlobInfo info2 = + BlobInfo.newBuilder(versionedBucket, randomObjectName).setMetadata(meta2).build(); + + BlobInfo gen1 = storage.create(info1, BlobTargetOption.doesNotExist()); + BlobInfo gen2 = storage.create(info2, BlobTargetOption.generationMatch(gen1.getGeneration())); + + BlobInfo update1 = gen1.toBuilder().setMetadata(meta3).build(); + + BlobInfo gen1_2 = storage.update(update1); + + assertAll( + () -> assertThat(gen1_2.getMetadata()).isEqualTo(meta3), + () -> assertThat(gen1_2.getGeneration()).isEqualTo(gen1.getGeneration())); + } + + @Test + public void listBlob_includeTrailingDelimiter() throws Exception { + final byte[] A = new byte[] {(byte) 'A'}; + + String basePath = generator.randomObjectName(); + // create a series of objects under a stable test specific path + BlobId a = BlobId.of(bucket.getName(), String.format("%s/a", basePath)); + BlobId b = BlobId.of(bucket.getName(), String.format("%s/b", basePath)); + BlobId c = BlobId.of(bucket.getName(), String.format("%s/c", basePath)); + BlobId a_ = BlobId.of(bucket.getName(), String.format("%s/a/", basePath)); + BlobId b_ = BlobId.of(bucket.getName(), String.format("%s/b/", basePath)); + BlobId c_ = BlobId.of(bucket.getName(), String.format("%s/c/", basePath)); + BlobId d_ = BlobId.of(bucket.getName(), String.format("%s/d/", basePath)); + BlobId a_A1 = BlobId.of(bucket.getName(), String.format("%s/a/A1", basePath)); + BlobId a_A2 = BlobId.of(bucket.getName(), String.format("%s/a/A2", basePath)); + BlobId b_B1 = BlobId.of(bucket.getName(), String.format("%s/b/B1", basePath)); + BlobId c_C2 = BlobId.of(bucket.getName(), String.format("%s/c/C2", basePath)); + + storage.create(BlobInfo.newBuilder(a).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(b).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(c).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(a_).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(b_).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(c_).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(d_).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(a_A1).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(a_A2).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(b_B1).build(), A, BlobTargetOption.doesNotExist()); + storage.create(BlobInfo.newBuilder(c_C2).build(), A, BlobTargetOption.doesNotExist()); + + // define all our options + BlobListOption[] blobListOptions = + new BlobListOption[] { + BlobListOption.currentDirectory(), + BlobListOption.includeTrailingDelimiter(), + BlobListOption.fields(BlobField.NAME, BlobField.GENERATION, BlobField.SIZE), + BlobListOption.prefix(basePath + "/") + }; + // list and collect all the object names + List blobs = + storage.list(bucket.getName(), blobListOptions).streamAll().collect(Collectors.toList()); + + // figure out what the base prefix of the objects is, so we can trim it down to make assertions + // more terse. + int trimLen = String.format(Locale.US, "gs://%s/%s", bucket.getName(), basePath).length(); + List names = + blobs.stream() + .map( + bi -> { + String uri = bi.getBlobId().toGsUtilUriWithGeneration(); + int genIdx = uri.indexOf("#"); + String substring; + if (genIdx > -1) { + // trim the string representation of the generation to make assertions easier. + // We really only need to know that a generation is present, not it's exact + // value. + substring = uri.substring(trimLen, genIdx + 1); + } else { + substring = uri.substring(trimLen); + } + return "..." + substring; + }) + .collect(Collectors.toList()); + + assertThat(names) + .containsExactly( + // items + ".../a#", + ".../b#", + ".../c#", + // items included due to includeTrailingDelimiter + ".../a/#", + ".../b/#", + ".../c/#", + ".../d/#", + // prefixes + ".../a/", + ".../b/", + ".../c/", + ".../d/"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITOptionRegressionTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITOptionRegressionTest.java new file mode 100644 index 000000000000..28e88582f711 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITOptionRegressionTest.java @@ -0,0 +1,1213 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Bucket.BlobTargetOption; +import com.google.cloud.storage.Bucket.BlobWriteOption; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobField; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.Storage.BucketSourceOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.Storage.CreateHmacKeyOption; +import com.google.cloud.storage.Storage.DeleteHmacKeyOption; +import com.google.cloud.storage.Storage.GetHmacKeyOption; +import com.google.cloud.storage.Storage.ListHmacKeysOption; +import com.google.cloud.storage.Storage.PredefinedAcl; +import com.google.cloud.storage.Storage.UpdateHmacKeyOption; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.util.Locale; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.Function; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@SuppressWarnings("ConstantConditions") +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.TEST_BENCH) +public final class ITOptionRegressionTest { + + private static final ChecksummedTestContent CONTENT = ChecksummedTestContent.of("Hello, World!"); + private static final ChecksummedTestContent CONTENT2 = + ChecksummedTestContent.of("Goodbye, World!"); + private static final CSEKSupport csekSupport = CSEKSupport.create(); + private static final ServiceAccount SERVICE_ACCOUNT = ServiceAccount.of("x@y.z"); + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storageFixture; + + @Inject public BucketInfo bucket; + private Storage s; + private RequestAuditing requestAuditing; + private Bucket b; + private Blob o; + private Blob e; + + private static int bucketCounter = 0; + private static int objectCounter = 0; + + @Before + public void setUp() throws Exception { + requestAuditing = new RequestAuditing(); + s = + storageFixture.getOptions().toBuilder() + .setTransportOptions(requestAuditing) + .setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(1).build()) + .build() + .getService(); + b = s.get(bucket.getName()); + o = s.create(BlobInfo.newBuilder(b, "ddeeffaauulltt").build(), CONTENT.getBytes()); + e = + s.create( + BlobInfo.newBuilder(b, "encrypteddetpyrcne").build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.encryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.clear(); + } + + @Test + public void storage_BucketTargetOption_predefinedAcl_PredefinedAcl() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.predefinedAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedAcl", "publicRead"); + } + + @Test + public void storage_BucketTargetOption_predefinedDefaultObjectAcl_PredefinedAcl() { + s.create( + BucketInfo.of(bucketName()), + BucketTargetOption.predefinedDefaultObjectAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedDefaultObjectAcl", "publicRead"); + } + + @Test + public void storage_BucketTargetOption_metagenerationMatch_() { + Bucket bucket = s.create(BucketInfo.of(bucketName())); + requestAuditing.clear(); + Bucket updated = bucket.toBuilder().setLabels(ImmutableMap.of("foo", "bar")).build(); + s.update(updated, BucketTargetOption.metagenerationMatch()); + requestAuditing.assertQueryParam( + "ifMetagenerationMatch", bucket.getMetageneration().toString()); + } + + @Test + public void storage_BucketTargetOption_metagenerationNotMatch_() { + Bucket bucket1 = s.create(BucketInfo.of(bucketName())); + Bucket updated = bucket1.toBuilder().setLabels(ImmutableMap.of("foo", "bar")).build(); + s.update(updated); + requestAuditing.clear(); + s.update( + bucket1.toBuilder().setStorageClass(StorageClass.COLDLINE).build(), + BucketTargetOption.metagenerationNotMatch()); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "1"); + } + + @Test + public void storage_BucketTargetOption_userProject_String() { + s.create(BucketInfo.of(bucketName()), BucketTargetOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BucketTargetOption_projection_String() { + Bucket bucket = s.create(BucketInfo.of(bucketName())); + requestAuditing.clear(); + Bucket update = bucket.toBuilder().setLabels(ImmutableMap.of("a", "b")).build(); + s.update(update, BucketTargetOption.projection("noAcl")); + requestAuditing.assertQueryParam("projection", "noAcl"); + } + + @Test + public void storage_BucketSourceOption_metagenerationMatch_long() { + s.get(o.getBlobId(), BlobGetOption.metagenerationMatch(o.getMetageneration())); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "1"); + } + + @Test + public void storage_BucketSourceOption_metagenerationNotMatch_long() { + s.get(o.getBlobId(), BlobGetOption.metagenerationNotMatch(0L)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "0"); + } + + @Test + public void storage_BucketSourceOption_userProject_String() { + s.getIamPolicy(b.getName(), BucketSourceOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BucketSourceOption_requestedPolicyVersion_long() { + s.getIamPolicy(b.getName(), BucketSourceOption.requestedPolicyVersion(3L)); + requestAuditing.assertQueryParam("optionsRequestedPolicyVersion", "3"); + } + + @Test + public void storage_ListHmacKeysOption_serviceAccount_ServiceAccount() { + s.listHmacKeys(ListHmacKeysOption.serviceAccount(SERVICE_ACCOUNT)); + requestAuditing.assertQueryParam("serviceAccountEmail", SERVICE_ACCOUNT.getEmail()); + } + + @Test + public void storage_ListHmacKeysOption_maxResults_long() { + s.listHmacKeys(ListHmacKeysOption.maxResults(1)); + requestAuditing.assertQueryParam("maxResults", "1"); + } + + @Test + public void storage_ListHmacKeysOption_pageToken_String() { + s.listHmacKeys(ListHmacKeysOption.pageToken("asdfghjkl")); + requestAuditing.assertQueryParam("pageToken", "asdfghjkl"); + } + + @Test + public void storage_ListHmacKeysOption_showDeletedKeys_boolean() { + s.listHmacKeys(ListHmacKeysOption.showDeletedKeys(true)); + requestAuditing.assertQueryParam("showDeletedKeys", "true"); + } + + @Test + public void storage_ListHmacKeysOption_userProject_String() { + s.listHmacKeys(ListHmacKeysOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_ListHmacKeysOption_projectId_String() { + s.listHmacKeys(ListHmacKeysOption.projectId("proj")); + requestAuditing.assertPathParam("projects", "proj"); + } + + @Test + public void storage_CreateHmacKeyOption_userProject_String() { + s.createHmacKey(SERVICE_ACCOUNT, CreateHmacKeyOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_CreateHmacKeyOption_projectId_String() { + s.createHmacKey(SERVICE_ACCOUNT, CreateHmacKeyOption.projectId("proj")); + requestAuditing.assertPathParam("projects", "proj"); + } + + @Test + public void storage_GetHmacKeyOption_userProject_String() { + try { + s.getHmacKey("x", GetHmacKeyOption.userProject("proj")); + } catch (StorageException ignore) { + } + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_GetHmacKeyOption_projectId_String() { + try { + s.getHmacKey("x", GetHmacKeyOption.projectId("proj")); + } catch (StorageException ignore) { + } + requestAuditing.assertPathParam("projects", "proj"); + } + + @Test + public void storage_DeleteHmacKeyOption_userProject_String() { + HmacKeyMetadata hmacKeyMetadata = + HmacKeyMetadata.newBuilder(SERVICE_ACCOUNT).setAccessId("x").setProjectId("proj").build(); + try { + s.deleteHmacKey(hmacKeyMetadata, DeleteHmacKeyOption.userProject("proj")); + } catch (StorageException ignore) { + } + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_UpdateHmacKeyOption_userProject_String() { + HmacKeyMetadata hmacKeyMetadata = + HmacKeyMetadata.newBuilder(SERVICE_ACCOUNT).setAccessId("x").setProjectId("proj").build(); + try { + s.updateHmacKeyState( + hmacKeyMetadata, HmacKeyState.INACTIVE, UpdateHmacKeyOption.userProject("proj")); + } catch (StorageException ignore) { + } + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BucketGetOption_metagenerationMatch_long() { + s.get(b.getName(), BucketGetOption.metagenerationMatch(b.getMetageneration())); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "1"); + } + + @Test + public void storage_BucketGetOption_metagenerationNotMatch_long() { + s.get(b.getName(), BucketGetOption.metagenerationNotMatch(0L)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "0"); + } + + @Test + public void storage_BucketGetOption_userProject_String() { + s.get(b.getName(), BucketGetOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BucketGetOption_fields_BucketField() { + Set expected = + treeSet( + "acl", + "autoclass", + "billing", + "cors", + "customPlacementConfig", + "defaultEventBasedHold", + "defaultObjectAcl", + "encryption", + "etag", + "iamConfiguration", + "id", + "ipFilter", + "labels", + "lifecycle", + "location", + "locationType", + "logging", + "metageneration", + "name", + "owner", + "retentionPolicy", + "rpo", + "selfLink", + "storageClass", + "timeCreated", + "updated", + "versioning", + "website", + "softDeletePolicy", + "hierarchicalNamespace", + "projectNumber"); + s.get( + b.getName(), + BucketGetOption.fields(TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + requestAuditing.assertQueryParam("fields", expected, splitOnCommaToSet()); + } + + @Test + public void storage_BlobTargetOption_predefinedAcl_PredefinedAcl() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.predefinedAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedAcl", "publicRead"); + } + + @Test + public void storage_BlobTargetOption_doesNotExist_() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.doesNotExist()); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void storage_BlobTargetOption_generationMatch_() { + Blob blob = s.create(BlobInfo.newBuilder(b, objectName()).build()); + requestAuditing.clear(); + Blob updated = blob.toBuilder().setMetadata(ImmutableMap.of("foo", "bar")).build(); + s.update(updated, Storage.BlobTargetOption.generationMatch()); + requestAuditing.assertQueryParam("ifGenerationMatch", blob.getGeneration().toString()); + } + + @Test + public void storage_BlobTargetOption_generationNotMatch_() { + Blob blob1 = s.create(BlobInfo.newBuilder(b, objectName()).build()); + Blob updated = blob1.toBuilder().setMetadata(ImmutableMap.of("foo", "bar")).build(); + s.create(updated, CONTENT2.getBytes()); + requestAuditing.clear(); + s.create(updated, CONTENT.getBytes(), Storage.BlobTargetOption.generationNotMatch()); + requestAuditing.assertQueryParam("ifGenerationNotMatch", blob1.getGeneration().toString()); + } + + @Test + public void storage_BlobTargetOption_metagenerationMatch_() { + Blob blob = s.create(BlobInfo.newBuilder(b, objectName()).build()); + requestAuditing.clear(); + Blob updated = blob.toBuilder().setMetadata(ImmutableMap.of("foo", "bar")).build(); + s.update(updated, Storage.BlobTargetOption.metagenerationMatch()); + requestAuditing.assertQueryParam("ifMetagenerationMatch", blob.getMetageneration().toString()); + } + + @Test + public void storage_BlobTargetOption_metagenerationNotMatch_() { + Blob blob1 = s.create(BlobInfo.newBuilder(b, objectName()).build()); + Blob updated = blob1.toBuilder().setMetadata(ImmutableMap.of("foo", "bar")).build(); + s.update(updated); + requestAuditing.clear(); + s.update( + blob1.toBuilder().setStorageClass(StorageClass.COLDLINE).build(), + Storage.BlobTargetOption.metagenerationNotMatch()); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "1"); + } + + @Test + public void storage_BlobTargetOption_disableGzipContent_() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.disableGzipContent()); + requestAuditing.assertNoContentEncoding(); + } + + @Test + public void storage_BlobTargetOption_detectContentType_() { + s.create( + BlobInfo.newBuilder(b, objectName() + ".txt").build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.detectContentType()); + requestAuditing.assertMultipartContentJsonAndText(); + } + + @Test + public void storage_BlobTargetOption_encryptionKey_Key() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.encryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobTargetOption_userProject_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BlobTargetOption_encryptionKey_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.encryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobTargetOption_kmsKeyName_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.getBytes(), + Storage.BlobTargetOption.kmsKeyName("kms-key")); + requestAuditing.assertQueryParam("kmsKeyName", "kms-key"); + } + + @Test + public void storage_BlobWriteOption_predefinedAcl_PredefinedAcl() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.predefinedAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedAcl", "publicRead"); + } + + @Test + public void storage_BlobWriteOption_doesNotExist_() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.doesNotExist()); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void storage_BlobWriteOption_generationMatch_() { + Blob blob = s.create(BlobInfo.newBuilder(b, objectName()).build()); + requestAuditing.clear(); + Blob updated = + blob.toBuilder() + .setMetadata(ImmutableMap.of("foo", "bar")) + .setMd5(null) + .setCrc32c(null) + .build(); + s.create(updated, CONTENT2.bytesAsInputStream(), Storage.BlobWriteOption.generationMatch()); + requestAuditing.assertQueryParam("ifGenerationMatch", blob.getGeneration().toString()); + } + + @Test + public void storage_BlobWriteOption_generationNotMatch_() { + Blob blob1 = s.create(BlobInfo.newBuilder(b, objectName()).build()); + Blob updated = + blob1.toBuilder() + .setMetadata(ImmutableMap.of("foo", "bar")) + .setMd5(null) + .setCrc32c(null) + .build(); + s.create(updated, CONTENT2.getBytes()); + requestAuditing.clear(); + s.create(updated, CONTENT.bytesAsInputStream(), Storage.BlobWriteOption.generationNotMatch()); + requestAuditing.assertQueryParam("ifGenerationNotMatch", blob1.getGeneration().toString()); + } + + @Test + public void storage_BlobWriteOption_metagenerationMatch_() { + Blob blob = s.create(BlobInfo.newBuilder(b, objectName()).build()); + requestAuditing.clear(); + Blob updated = + blob.toBuilder() + .setMetadata(ImmutableMap.of("foo", "bar")) + .setMd5(null) + .setCrc32c(null) + .build(); + s.create(updated, CONTENT2.bytesAsInputStream(), Storage.BlobWriteOption.metagenerationMatch()); + requestAuditing.assertQueryParam("ifMetagenerationMatch", blob.getMetageneration().toString()); + } + + @Test + public void storage_BlobWriteOption_metagenerationNotMatch_() { + Blob blob1 = s.create(BlobInfo.newBuilder(b, objectName()).build()); + Blob updated = + blob1.toBuilder() + .setMetadata(ImmutableMap.of("foo", "bar")) + .setMd5(null) + .setCrc32c(null) + .build(); + s.update(updated); + requestAuditing.clear(); + s.create( + updated.toBuilder().setStorageClass(StorageClass.COLDLINE).build(), + CONTENT2.bytesAsInputStream(), + Storage.BlobWriteOption.metagenerationNotMatch()); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "1"); + } + + @Test + public void storage_BlobWriteOption_md5Match_() { + BlobInfo info = BlobInfo.newBuilder(b, objectName()).setMd5(CONTENT.getMd5Base64()).build(); + s.create(info, CONTENT.bytesAsInputStream(), Storage.BlobWriteOption.md5Match()); + requestAuditing.assertMultipartJsonField("md5Hash", CONTENT.getMd5Base64()); + } + + @Test + public void storage_BlobWriteOption_crc32cMatch_() { + BlobInfo info = + BlobInfo.newBuilder(b, objectName()).setCrc32c(CONTENT.getCrc32cBase64()).build(); + s.create(info, CONTENT.bytesAsInputStream(), Storage.BlobWriteOption.crc32cMatch()); + requestAuditing.assertMultipartJsonField("crc32c", CONTENT.getCrc32cBase64()); + } + + @Test + public void storage_BlobWriteOption_encryptionKey_Key() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.encryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobWriteOption_encryptionKey_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.encryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobWriteOption_kmsKeyName_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.kmsKeyName("kms-key")); + requestAuditing.assertQueryParam("kmsKeyName", "kms-key"); + } + + @Test + public void storage_BlobWriteOption_userProject_String() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BlobWriteOption_disableGzipContent_() { + s.create( + BlobInfo.newBuilder(b, objectName()).build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.disableGzipContent()); + requestAuditing.assertNoContentEncoding(); + } + + @Test + public void storage_BlobWriteOption_detectContentType_() { + s.create( + BlobInfo.newBuilder(b, objectName() + ".txt").build(), + CONTENT.bytesAsInputStream(), + Storage.BlobWriteOption.detectContentType()); + requestAuditing.assertMultipartContentJsonAndText(); + } + + @Test + public void storage_BlobSourceOption_generationMatch_() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.generationMatch()); + requestAuditing.assertQueryParam("ifGenerationMatch", o.getGeneration().toString()); + } + + @Test + public void storage_BlobSourceOption_generationMatch_long() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.generationMatch(o.getGeneration())); + requestAuditing.assertQueryParam("ifGenerationMatch", o.getGeneration().toString()); + } + + @Test + public void storage_BlobSourceOption_generationNotMatch_() { + try { + s.readAllBytes( + BlobId.of(o.getBucket(), o.getName(), 1L), BlobSourceOption.generationNotMatch()); + } catch (StorageException ignore) { + // this option doesn't make much sense. + // The generation which is read from to construct the ifGenerationNotMatch condition comes + // from the BlobId. However, the same generation value is also included as the generation + // query param, thereby leading to a condition that can NEVER be met... + // This test is only here to verify plumbing, but it should be deprecated and removed + } + requestAuditing.assertQueryParam("ifGenerationNotMatch", "1"); + } + + @Test + public void storage_BlobSourceOption_generationNotMatch_long() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.generationNotMatch(0)); + requestAuditing.assertQueryParam("ifGenerationNotMatch", "0"); + } + + @Test + public void storage_BlobSourceOption_metagenerationMatch_long() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.metagenerationMatch(o.getMetageneration())); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "1"); + } + + @Test + public void storage_BlobSourceOption_metagenerationNotMatch_long() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.metagenerationNotMatch(0)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "0"); + } + + @Test + public void storage_BlobSourceOption_decryptionKey_Key() { + s.readAllBytes(e.getBlobId(), BlobSourceOption.decryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobSourceOption_decryptionKey_String() { + s.readAllBytes(e.getBlobId(), BlobSourceOption.decryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobSourceOption_userProject_String() { + s.readAllBytes(o.getBlobId(), BlobSourceOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BlobGetOption_generationMatch_() { + s.get(o.getBlobId(), BlobGetOption.generationMatch()); + requestAuditing.assertQueryParam("ifGenerationMatch", o.getGeneration().toString()); + } + + @Test + public void storage_BlobGetOption_generationMatch_long() { + s.get(o.getBlobId(), BlobGetOption.generationMatch(o.getGeneration())); + requestAuditing.assertQueryParam("ifGenerationMatch", o.getGeneration().toString()); + } + + @Test + public void storage_BlobGetOption_generationNotMatch_() { + try { + s.get(BlobId.of(o.getBucket(), o.getName(), 1L), BlobGetOption.generationNotMatch()); + } catch (StorageException ignore) { + // this option doesn't make much sense. + // The generation which is read from to construct the ifGenerationNotMatch condition comes + // from the BlobId. However, the same generation value is also included as the generation + // query param, thereby leading to a condition that can NEVER be met... + // This test is only here to verify plumbing, but it should be deprecated and removed + } + requestAuditing.assertQueryParam("ifGenerationNotMatch", "1"); + } + + @Test + public void storage_BlobGetOption_generationNotMatch_long() { + s.get(o.getBlobId(), BlobGetOption.generationNotMatch(0)); + requestAuditing.assertQueryParam("ifGenerationNotMatch", "0"); + } + + @Test + public void storage_BlobGetOption_metagenerationMatch_long() { + s.get(o.getBlobId(), BlobGetOption.metagenerationMatch(o.getMetageneration())); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "1"); + } + + @Test + public void storage_BlobGetOption_metagenerationNotMatch_long() { + s.get(o.getBlobId(), BlobGetOption.metagenerationNotMatch(0)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "0"); + } + + @Test + public void storage_BlobGetOption_fields_BlobField() { + Set expected = + treeSet( + "acl", + "bucket", + "cacheControl", + "componentCount", + "contentDisposition", + "contentEncoding", + "contentLanguage", + "contentType", + "crc32c", + "customTime", + "customerEncryption", + "etag", + "eventBasedHold", + "generation", + "id", + "kind", + "kmsKeyName", + "md5Hash", + "mediaLink", + "metadata", + "metageneration", + "name", + "owner", + "retentionExpirationTime", + "selfLink", + "size", + "storageClass", + "temporaryHold", + "timeCreated", + "timeDeleted", + "timeStorageClassUpdated", + "updated", + "retention", + "softDeleteTime", + "hardDeleteTime", + "contexts"); + s.get(o.getBlobId(), BlobGetOption.fields(BlobField.values())); + requestAuditing.assertQueryParam("fields", expected, splitOnCommaToSet()); + } + + @Test + public void storage_BlobGetOption_userProject_String() { + s.get(o.getBlobId(), BlobGetOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BlobGetOption_decryptionKey_Key() { + s.get(e.getBlobId(), BlobGetOption.decryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BlobGetOption_decryptionKey_String() { + s.get(e.getBlobId(), BlobGetOption.decryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void storage_BucketListOption_pageSize_long() { + s.list(BucketListOption.pageSize(1)); + requestAuditing.assertQueryParam("maxResults", "1"); + } + + @Test + public void storage_BucketListOption_pageToken_String() { + s.list(BucketListOption.pageToken("asdfghjkl")); + requestAuditing.assertQueryParam("pageToken", "asdfghjkl"); + } + + @Test + public void storage_BucketListOption_prefix_String() { + s.list(BucketListOption.prefix("opt")); + requestAuditing.assertQueryParam("prefix", "opt"); + } + + @Test + public void storage_BucketListOption_userProject_String() { + s.list(BucketListOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BucketListOption_fields_BucketField() { + Set expected = + treeSet( + "nextPageToken", + "items/acl", + "items/autoclass", + "items/billing", + "items/cors", + "items/customPlacementConfig", + "items/defaultEventBasedHold", + "items/defaultObjectAcl", + "items/encryption", + "items/etag", + "items/iamConfiguration", + "items/id", + "items/ipFilter", + "items/labels", + "items/lifecycle", + "items/location", + "items/locationType", + "items/logging", + "items/metageneration", + "items/name", + "items/owner", + "items/retentionPolicy", + "items/rpo", + "items/selfLink", + "items/storageClass", + "items/timeCreated", + "items/updated", + "items/versioning", + "items/website", + "items/softDeletePolicy", + "items/hierarchicalNamespace", + "items/projectNumber"); + s.list(BucketListOption.fields(TestUtils.filterOutHttpOnlyBucketFields(BucketField.values()))); + requestAuditing.assertQueryParam("fields", expected, splitOnCommaToSet()); + } + + @Test + public void storage_BlobListOption_pageSize_long() { + s.list(b.getName(), BlobListOption.pageSize(1)); + requestAuditing.assertQueryParam("maxResults", "1"); + } + + @Test + public void storage_BlobListOption_pageToken_String() { + s.list(b.getName(), BlobListOption.pageToken("asdfghjkl")); + requestAuditing.assertQueryParam("pageToken", "asdfghjkl"); + } + + @Test + public void storage_BlobListOption_prefix_String() { + s.list(b.getName(), BlobListOption.prefix("obj")); + requestAuditing.assertQueryParam("prefix", "obj"); + } + + @Test + public void storage_BlobListOption_currentDirectory_() { + s.list(b.getName(), BlobListOption.currentDirectory()); + requestAuditing.assertQueryParam("delimiter", "/"); + } + + @Test + public void storage_BlobListOption_delimiter_String() { + s.list(b.getName(), BlobListOption.delimiter(":")); + requestAuditing.assertQueryParam("delimiter", ":"); + } + + @Test + public void storage_BlobListOption_startOffset_String() { + s.list(b.getName(), BlobListOption.startOffset("x")); + requestAuditing.assertQueryParam("startOffset", "x"); + } + + @Test + public void storage_BlobListOption_endOffset_String() { + s.list(b.getName(), BlobListOption.endOffset("x")); + requestAuditing.assertQueryParam("endOffset", "x"); + } + + @Test + public void storage_BlobListOption_userProject_String() { + s.list(b.getName(), BlobListOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_BlobListOption_versions_boolean() { + s.list(b.getName(), BlobListOption.versions(true)); + requestAuditing.assertQueryParam("versions", "true"); + } + + @Test + public void storage_BlobListOption_fields_BlobField() { + Set expected = + treeSet( + "nextPageToken", + "prefixes", + "items/acl", + "items/bucket", + "items/cacheControl", + "items/componentCount", + "items/contentDisposition", + "items/contentEncoding", + "items/contentLanguage", + "items/contentType", + "items/crc32c", + "items/customTime", + "items/customerEncryption", + "items/etag", + "items/eventBasedHold", + "items/generation", + "items/id", + "items/kind", + "items/kmsKeyName", + "items/md5Hash", + "items/mediaLink", + "items/metadata", + "items/metageneration", + "items/name", + "items/owner", + "items/retentionExpirationTime", + "items/selfLink", + "items/size", + "items/storageClass", + "items/temporaryHold", + "items/timeCreated", + "items/timeDeleted", + "items/timeStorageClassUpdated", + "items/updated", + "items/retention", + "items/softDeleteTime", + "items/hardDeleteTime", + "items/contexts"); + s.list(b.getName(), BlobListOption.fields(BlobField.values())); + requestAuditing.assertQueryParam("fields", expected, splitOnCommaToSet()); + } + + @Test + public void bucket_BucketSourceOption_metagenerationMatch_() { + b.exists(Bucket.BucketSourceOption.metagenerationMatch()); + requestAuditing.assertQueryParam("ifMetagenerationMatch", b.getMetageneration().toString()); + } + + @Test + public void bucket_BucketSourceOption_metagenerationNotMatch_() { + Bucket bucket1 = s.create(BucketInfo.of(bucketName())); + s.update(bucket1.toBuilder().setStorageClass(StorageClass.COLDLINE).build()); + requestAuditing.clear(); + bucket1.exists(Bucket.BucketSourceOption.metagenerationNotMatch()); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", b.getMetageneration().toString()); + } + + @Test + public void bucket_BucketSourceOption_userProject_String() { + // attempt to delete a bucket that doesn't exist + boolean delete = s.delete(bucketName(), BucketSourceOption.userProject("proj")); + assertThat(delete).isFalse(); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void bucket_BlobTargetOption_predefinedAcl_PredefinedAcl() { + b.create( + objectName(), + CONTENT.getBytes(), + BlobTargetOption.predefinedAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedAcl", "publicRead"); + } + + @Test + public void bucket_BlobTargetOption_doesNotExist_() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.doesNotExist()); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void bucket_BlobTargetOption_generationMatch_long() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.generationMatch(0)); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void bucket_BlobTargetOption_generationNotMatch_long() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.generationNotMatch(1L)); + requestAuditing.assertQueryParam("ifGenerationNotMatch", "1"); + } + + @Test + public void bucket_BlobTargetOption_metagenerationMatch_long() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.metagenerationMatch(0)); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "0"); + } + + @Test + public void bucket_BlobTargetOption_metagenerationNotMatch_long() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.metagenerationNotMatch(1L)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "1"); + } + + @Test + public void bucket_BlobTargetOption_encryptionKey_Key() { + b.create( + objectName(), CONTENT.getBytes(), BlobTargetOption.encryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void bucket_BlobTargetOption_encryptionKey_String() { + b.create( + objectName(), + CONTENT.getBytes(), + BlobTargetOption.encryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void bucket_BlobTargetOption_kmsKeyName_String() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.kmsKeyName("kms-key")); + requestAuditing.assertQueryParam("kmsKeyName", "kms-key"); + } + + @Test + public void bucket_BlobTargetOption_userProject_String() { + b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void bucket_BlobWriteOption_predefinedAcl_PredefinedAcl() { + b.create( + objectName(), + CONTENT.bytesAsInputStream(), + BlobWriteOption.predefinedAcl(PredefinedAcl.PUBLIC_READ)); + requestAuditing.assertQueryParam("predefinedAcl", "publicRead"); + } + + @Test + public void bucket_BlobWriteOption_doesNotExist_() { + b.create(objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.doesNotExist()); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void bucket_BlobWriteOption_generationMatch_long() { + b.create(objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.generationMatch(0)); + requestAuditing.assertQueryParam("ifGenerationMatch", "0"); + } + + @Test + public void bucket_BlobWriteOption_generationNotMatch_long() { + b.create(objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.generationNotMatch(1L)); + requestAuditing.assertQueryParam("ifGenerationNotMatch", "1"); + } + + @Test + public void bucket_BlobWriteOption_metagenerationMatch_long() { + b.create(objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.metagenerationMatch(0)); + requestAuditing.assertQueryParam("ifMetagenerationMatch", "0"); + } + + @Test + public void bucket_BlobWriteOption_metagenerationNotMatch_long() { + b.create( + objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.metagenerationNotMatch(1L)); + requestAuditing.assertQueryParam("ifMetagenerationNotMatch", "1"); + } + + @Test + public void bucket_BlobWriteOption_md5Match_String() { + b.create( + objectName(), + CONTENT.bytesAsInputStream(), + BlobWriteOption.md5Match(CONTENT.getMd5Base64())); + requestAuditing.assertMultipartJsonField("md5Hash", CONTENT.getMd5Base64()); + } + + @Test + public void bucket_BlobWriteOption_crc32cMatch_String() { + b.create( + objectName(), + CONTENT.bytesAsInputStream(), + BlobWriteOption.crc32cMatch(CONTENT.getCrc32cBase64())); + requestAuditing.assertMultipartJsonField("crc32c", CONTENT.getCrc32cBase64()); + } + + @Test + public void bucket_BlobWriteOption_encryptionKey_Key() { + b.create( + objectName(), + CONTENT.bytesAsInputStream(), + BlobWriteOption.encryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void bucket_BlobWriteOption_encryptionKey_String() { + b.create( + objectName(), + CONTENT.bytesAsInputStream(), + BlobWriteOption.encryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void bucket_BlobWriteOption_userProject_String() { + b.create(objectName(), CONTENT.bytesAsInputStream(), BlobWriteOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void blob_BlobSourceOption_generationMatch_() { + o.getContent(Blob.BlobSourceOption.generationMatch()); + requestAuditing.assertQueryParam("ifGenerationMatch", o.getGeneration().toString()); + } + + @Test + public void blob_BlobSourceOption_generationNotMatch_() { + try { + o.getContent(Blob.BlobSourceOption.generationNotMatch()); + } catch (StorageException ignore) { + // this option doesn't make much sense. + // The generation which is read from to construct the ifGenerationNotMatch condition comes + // from the BlobId. However, the same generation value is also included as the generation + // query param, thereby leading to a condition that can NEVER be met... + // This test is only here to verify plumbing, but it should be deprecated and removed + } + requestAuditing.assertQueryParam("ifGenerationNotMatch", o.getGeneration().toString()); + } + + @Test + public void blob_BlobSourceOption_metagenerationMatch_() { + o.getContent(Blob.BlobSourceOption.metagenerationMatch()); + requestAuditing.assertQueryParam("ifMetagenerationMatch", o.getMetageneration().toString()); + } + + @Test + public void blob_BlobSourceOption_metagenerationNotMatch_() { + Blob blob1 = s.create(BlobInfo.newBuilder(b, objectName()).build()); + Blob updated = blob1.toBuilder().setMetadata(ImmutableMap.of("foo", "bar")).build(); + s.update(updated); + requestAuditing.clear(); + blob1.getContent(Blob.BlobSourceOption.metagenerationNotMatch()); + requestAuditing.assertQueryParam( + "ifMetagenerationNotMatch", blob1.getMetageneration().toString()); + } + + @Test + public void blob_BlobSourceOption_decryptionKey_Key() { + e.getContent(Blob.BlobSourceOption.decryptionKey(csekSupport.getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void blob_BlobSourceOption_decryptionKey_String() { + e.getContent(Blob.BlobSourceOption.decryptionKey(csekSupport.getTuple().getKey())); + requestAuditing.assertEncryptionKeyHeaders(csekSupport.getTuple()); + } + + @Test + public void blob_BlobSourceOption_userProject_String() { + o.getContent(Blob.BlobSourceOption.userProject("proj")); + requestAuditing.assertQueryParam("userProject", "proj"); + } + + @Test + public void storage_CopyWriter() { + CopyRequest request = + CopyRequest.newBuilder() + .setSource(o.getBlobId()) + .setSourceOptions(Storage.BlobSourceOption.generationMatch()) + .setTarget( + BlobId.of(b.getName(), objectName(), 57L), + Storage.BlobTargetOption.generationNotMatch()) + .build(); + CopyWriter copy = s.copy(request); + requestAuditing.assertQueryParam("ifGenerationNotMatch", "57"); + requestAuditing.assertQueryParam("ifSourceGenerationMatch", o.getGeneration().toString()); + copy.getResult(); + } + + @Test + public void storage_ComposeRequest() { + Blob obj = b.create(objectName(), CONTENT.getBytes(), BlobTargetOption.doesNotExist()); + requestAuditing.clear(); + Blob updated = obj.toBuilder().setMd5(null).setCrc32c(null).build(); + ComposeRequest request = + ComposeRequest.newBuilder() + .addSource(o.getName()) + .addSource(o.getName()) + .setTarget(updated) + .setTargetOptions(Storage.BlobTargetOption.metagenerationMatch()) + .build(); + + s.compose(request); + requestAuditing.assertQueryParam("ifMetagenerationMatch", obj.getMetageneration().toString()); + } + + private static String bucketName() { + return String.format(Locale.US, "bucket-%03d", bucketCounter++); + } + + private static String objectName() { + return String.format(Locale.US, "object-%03d", objectCounter++); + } + + private static Function> splitOnCommaToSet() { + return s -> treeSet(s.split(",")); + } + + /** + * Util method to make a TreeSet easily. This makes failed assertions easier to read by sorting + * the values. + */ + @SafeVarargs + private static > Set treeSet(T... ts) { + return new TreeSet<>(ImmutableSet.copyOf(ts)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITParallelCompositeUploadBlobWriteSessionConfigTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITParallelCompositeUploadBlobWriteSessionConfigTest.java new file mode 100644 index 000000000000..9b7d630f6b15 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITParallelCompositeUploadBlobWriteSessionConfigTest.java @@ -0,0 +1,263 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.paging.Page; +import com.google.api.gax.rpc.ApiExceptions; +import com.google.cloud.kms.v1.CryptoKey; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.BufferAllocationStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.ExecutorSupplier; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartCleanupStrategy; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartMetadataFieldDecorator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.KmsFixture; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousCloseException; +import java.nio.channels.WritableByteChannel; +import java.security.Key; +import java.time.Duration; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP, Transport.GRPC}, + backends = {Backend.PROD}) +public final class ITParallelCompositeUploadBlobWriteSessionConfigTest { + + private static final int _1MiB = 1024 * 1024; + private static ExecutorService exec; + + @Inject public BucketInfo bucket; + + @Inject + @BucketFixture(BucketType.REQUESTER_PAYS) + public BucketInfo rpBucket; + + @Inject public Storage injectedStorage; + + @Inject public Transport transport; + + @Inject public Generator generator; + @Inject public KmsFixture kmsFixture; + + // configured Storage with the PCU config + private Storage storage; + private Random rand; + + @BeforeClass + public static void beforeClass() { + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("test-pcuwbct-%d").build(); + exec = Executors.newCachedThreadPool(threadFactory); + } + + @Before + public void setUp() throws Exception { + ParallelCompositeUploadBlobWriteSessionConfig pcu = + BlobWriteSessionConfigs.parallelCompositeUpload() + .withExecutorSupplier(ExecutorSupplier.useExecutor(exec)) + // define a max part size that is fairly small to aid in test speed + .withBufferAllocationStrategy(BufferAllocationStrategy.simple(_1MiB)) + .withPartNamingStrategy(PartNamingStrategy.prefix("prefix-a")) + // Write customTime 30 seconds in the future + .withPartMetadataFieldDecorator( + PartMetadataFieldDecorator.setCustomTimeInFuture(Duration.ofSeconds(30))) + // let our fixtures take care of cleaning things + .withPartCleanupStrategy(PartCleanupStrategy.never()); + + StorageOptions storageOptions = + injectedStorage.getOptions().toBuilder().setBlobWriteSessionConfig(pcu).build(); + storage = storageOptions.getService(); + rand = new Random(); + } + + @After + public void tearDown() throws Exception { + if (storage != null) { + storage.close(); + } + } + + @AfterClass + public static void afterClass() { + if (exec != null) { + exec.shutdownNow(); + } + } + + @Test + public void partFilesCreatedWithCustomTimeWritten() throws IOException { + doTest(bucket, 10 * _1MiB + 37, ImmutableList.of(), ImmutableList.of(), ImmutableList.of()); + Page blobs = storage.list(bucket.getName(), Storage.BlobListOption.prefix("prefix-a")); + for (Blob blob : blobs.iterateAll()) { + assertThat(blob.getCustomTimeOffsetDateTime()).isNotNull(); + } + } + + @Test + public void errorRaisedByMethodAndFutureResult() throws IOException { + + BlobInfo info = + BlobInfo.newBuilder(bucket.getName() + "x", generator.randomObjectName()).build(); + byte[] bytes = DataGenerator.rand(rand).genBytes(1); + + BlobWriteSession session = storage.blobWriteSession(info, BlobWriteOption.doesNotExist()); + try { + try (WritableByteChannel channel = session.open()) { + channel.write(ByteBuffer.wrap(bytes)); + } + // it is okay if the exception is raised during write itself or close, if it happens during + // close we should get an AsynchronousCloseException + } catch (AsynchronousCloseException ace) { + assertThat(((StorageException) ace.getCause()).getCode()).isEqualTo(404); + } catch (StorageException se) { + assertThat(se.getCode()).isEqualTo(404); + } + + // the result future should resolve to a failure specifying the failure kind + StorageException se = + assertThrows( + StorageException.class, + () -> ApiExceptions.callAndTranslateApiException(session.getResult())); + assertThat(se.getCode()).isEqualTo(404); + } + + @Test + public void uploadingAnObjectWorks() throws Exception { + doTest(bucket, 32 * _1MiB + 37, ImmutableList.of(), ImmutableList.of(), ImmutableList.of()); + } + + @Test + public void uploadingAnObjectWorks_requesterPays() throws Exception { + String projectId = storage.getOptions().getProjectId(); + int _1MiB = 1024 * 1024; + doTest( + rpBucket, + 32 * _1MiB + 37, + ImmutableList.of(BlobTargetOption.userProject(projectId)), + ImmutableList.of(BlobWriteOption.userProject(projectId)), + ImmutableList.of(BlobSourceOption.userProject(projectId))); + } + + @Test + public void uploadingAnObjectWorks_customerSuppliedEncryptionKey() throws IOException { + CSEKSupport csek = CSEKSupport.create(); + Key key = csek.getKey(); + + doTest( + bucket, + 16 * _1MiB - 13, + ImmutableList.of(BlobTargetOption.encryptionKey(key)), + ImmutableList.of(BlobWriteOption.encryptionKey(key)), + ImmutableList.of(BlobSourceOption.decryptionKey(key))); + } + + @Test + public void uploadingAnObjectWorks_kms() throws IOException { + CryptoKey key1 = kmsFixture.getKey1(); + doTest( + bucket, + 16 * _1MiB - 13, + ImmutableList.of(BlobTargetOption.kmsKeyName(key1.getName())), + ImmutableList.of(BlobWriteOption.kmsKeyName(key1.getName())), + ImmutableList.of()); + } + + /** + * Create an empty object, then overwrite it using a Parallel Composite Upload, then read the full + * object and verify its contents match exactly with what was written. + */ + private void doTest( + BucketInfo bucket, + int objectSizeBytes, + ImmutableList overriddenCreateOptions, + ImmutableList overriddenOverwriteOptions, + ImmutableList overriddenReadOptions) + throws IOException { + + BlobTargetOption[] createOptions = + BlobTargetOption.dedupe( + ImmutableList.of(BlobTargetOption.doesNotExist()), + overriddenCreateOptions.toArray(new BlobTargetOption[0])); + BlobWriteOption[] overwriteOptions = + BlobWriteOption.dedupe( + ImmutableList.of(BlobWriteOption.generationMatch()), + overriddenOverwriteOptions.toArray(new BlobWriteOption[0])); + BlobSourceOption[] readOptions = + BlobSourceOption.dedupe( + ImmutableList.of(), overriddenReadOptions.toArray(new BlobSourceOption[0])); + + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(info, createOptions); + + byte[] bytes = DataGenerator.rand(rand).genBytes(objectSizeBytes); + + BlobWriteSession session = storage.blobWriteSession(gen1, overwriteOptions); + + try (WritableByteChannel channel = session.open()) { + long written = channel.write(ByteBuffer.wrap(bytes)); + assertThat(written).isEqualTo(objectSizeBytes); + } + + BlobInfo result = ApiExceptions.callAndTranslateApiException(session.getResult()); + + assertThat(result.getCrc32c()).isNotNull(); + assertThat(result.getGeneration()).isNotNull(); + + byte[] actual = storage.readAllBytes(result.getBlobId(), readOptions); + + assertThat(actual).isEqualTo(bytes); + assertThat(xxd(actual)).isEqualTo(xxd(bytes)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITQuotaProjectIdTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITQuotaProjectIdTest.java new file mode 100644 index 000000000000..69e79285881b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITQuotaProjectIdTest.java @@ -0,0 +1,156 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeTrue; + +import com.google.api.gax.paging.Page; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import java.util.stream.StreamSupport; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITQuotaProjectIdTest { + // a project number to a dev project which is not used for CI + // this value is the negative case, and points to a project which doesn't have access to the + // bucket used here. + private static final String BAD_PROJECT_ID = "954355001984"; + + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.REQUESTER_PAYS) + public BucketInfo bucket; + + // make sure there is an object in the bucket to be listed + @Inject + @BucketFixture(BucketType.REQUESTER_PAYS) + public ObjectsFixture objectsFixture; + + private StorageOptions baseOptions; + private String projectId; + private GoogleCredentials credentials; + + @Before + public void setUp() throws Exception { + baseOptions = storage.getOptions(); + assumeTrue( + "These tests require GoogleCredentials", + baseOptions.getCredentials() instanceof GoogleCredentials); + credentials = (GoogleCredentials) baseOptions.getCredentials(); + projectId = baseOptions.getProjectId(); + } + + /* + * UserProject precedence + * 1. Method userProject Option + * 2. ServiceOptions.getQuotaProjectId() + * 3. Credentials.quota_project_id + */ + + @Test + public void fromCredentials() throws Exception { + StorageOptions build = + baseOptions.toBuilder() + .setCredentials(credentialsWithQuotaProjectId(credentials, projectId)) + .build(); + + try (Storage s = build.getService()) { + Page page = s.list(bucket.getName()); + assertPage(page); + } + } + + @Test + public void methodOptionOverCredentials() throws Exception { + StorageOptions build = + baseOptions.toBuilder() + .setCredentials(credentialsWithQuotaProjectId(credentials, BAD_PROJECT_ID)) + .build(); + + try (Storage s = build.getService()) { + Page page = s.list(bucket.getName(), BlobListOption.userProject(projectId)); + assertPage(page); + } + } + + @Test + public void fromServiceOptionParameter() throws Exception { + StorageOptions build = baseOptions.toBuilder().setQuotaProjectId(projectId).build(); + + try (Storage s = build.getService()) { + Page page = s.list(bucket.getName()); + assertPage(page); + } + } + + @Test + public void serviceOptionParameterOverCredentials() throws Exception { + StorageOptions build = + baseOptions.toBuilder() + .setCredentials(credentialsWithQuotaProjectId(credentials, BAD_PROJECT_ID)) + .setQuotaProjectId(projectId) + .build(); + + try (Storage s = build.getService()) { + Page page = s.list(bucket.getName()); + assertPage(page); + } + } + + @Test + public void methodOptionOverServiceOptionParameter() throws Exception { + StorageOptions build = baseOptions.toBuilder().setQuotaProjectId(BAD_PROJECT_ID).build(); + + try (Storage s = build.getService()) { + Page page = s.list(bucket.getName(), BlobListOption.userProject(projectId)); + assertPage(page); + } + } + + private void assertPage(Page page) { + boolean info1InResults = + StreamSupport.stream(page.iterateAll().spliterator(), false) + .map(Blob::getName) + .anyMatch(objectsFixture.getInfo1().getName()::equals); + assertThat(info1InResults).isTrue(); + } + + private GoogleCredentials credentialsWithQuotaProjectId( + GoogleCredentials creds, String quotaProjectId) { + return creds.toBuilder().setQuotaProjectId(quotaProjectId).build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITReadableByteChannelBehaviorTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITReadableByteChannelBehaviorTest.java new file mode 100644 index 000000000000..b4082ed4fcbb --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITReadableByteChannelBehaviorTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.ReadProjectionConfig; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITReadableByteChannelBehaviorTest { + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + @Inject public ObjectsFixture objectsFixture; + + @Test + public void eofReturnedMultipleTimes_reader() throws IOException { + BlobId id = objectsFixture.getObj512KiB().getInfo().getBlobId(); + + try (ReadChannel reader = storage.reader(id)) { + eofReturnedMultipleTimes_doTest(reader); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void eofReturnedMultipleTimes_blobReadSession_channel() + throws ExecutionException, InterruptedException, TimeoutException, IOException { + eofReturnedMultipleTimes_doTestBlobReadSession(ReadProjectionConfigs.asChannel()); + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void eofReturnedMultipleTimes_blobReadSession_seekableChannel() + throws ExecutionException, InterruptedException, TimeoutException, IOException { + eofReturnedMultipleTimes_doTestBlobReadSession(ReadProjectionConfigs.asSeekableChannel()); + } + + private void eofReturnedMultipleTimes_doTestBlobReadSession( + ReadProjectionConfig config) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + BlobId id = objectsFixture.getObj512KiB().getInfo().getBlobId(); + + try (BlobReadSession session = storage.blobReadSession(id).get(3, TimeUnit.SECONDS)) { + try (ReadableByteChannel c = session.readAs(config)) { + eofReturnedMultipleTimes_doTest(c); + } + } + } + + private void eofReturnedMultipleTimes_doTest(ReadableByteChannel c) throws IOException { + long copy = ByteStreams.copy(c, Channels.newChannel(ByteStreams.nullOutputStream())); + assertThat(copy).isEqualTo(objectsFixture.getObj512KiB().getInfo().getSize()); + + ByteBuffer buf = ByteBuffer.allocate(8); + int i = ThreadLocalRandom.current().nextInt(3, 10); + for (int j = 0; j < i; j++) { + assertWithMessage("expected EOF " + j).that(c.read(buf)).isEqualTo(-1); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITResumableUploadTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITResumableUploadTest.java new file mode 100644 index 000000000000..f755e2e5e99e --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITResumableUploadTest.java @@ -0,0 +1,219 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TmpFile; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITResumableUploadTest { + @Rule public final TemporaryFolder temp = new TemporaryFolder(); + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void expectedUploadSize_chunked_doesMatch() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + doTestDoesMatch(storage); + } + + @Test + public void expectedUploadSize_chunked_doesNotMatch() throws IOException { + doTestDoesNotMatch(storage); + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void expectedUploadSize_bidi_doesMatch() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()) + .build(); + try (Storage storage = options.getService()) { + doTestDoesMatch(storage); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.HTTP) + public void expectedUploadSize_bidi_doesNotMatch() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()) + .build(); + try (Storage storage = options.getService()) { + doTestDoesNotMatch(storage); + } + } + + @Test + public void expectedUploadSize_ignored_pcu() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.parallelCompositeUpload()) + .build(); + try (Storage storage = options.getService()) { + int objectContentSize = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobWriteSession session = + storage.blobWriteSession(info, BlobWriteOption.expectedObjectSize(objectContentSize + 1)); + try (WritableByteChannel open = session.open()) { + open.write(ByteBuffer.wrap(bytes)); + } + + BlobInfo gen1 = session.getResult().get(5, TimeUnit.SECONDS); + assertThat(gen1.getSize()).isEqualTo(objectContentSize); + } + } + + @Test + public void expectedUploadSize_createFrom_inputStream_doesMatch() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.parallelCompositeUpload()) + .build(); + try (Storage storage = options.getService()) { + int objectContentSize = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobInfo gen1 = + storage.createFrom( + info, + new ByteArrayInputStream(bytes), + BlobWriteOption.expectedObjectSize(objectContentSize)); + assertThat(gen1.getSize()).isEqualTo(objectContentSize); + } + } + + @Test + public void expectedUploadSize_createFrom_inputStream_doesNotMatch() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder() + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.parallelCompositeUpload()) + .build(); + try (Storage storage = options.getService()) { + int objectContentSize = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + StorageException se = + assertThrows( + StorageException.class, + () -> + storage.createFrom( + info, + new ByteArrayInputStream(bytes), + BlobWriteOption.expectedObjectSize(objectContentSize + 1))); + + assertThat(se.getCode()).isEqualTo(400); + } + } + + @Test + public void expectedUploadSize_createFrom_path_doesMatch() throws IOException { + int objectContentSize = 10; + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.getRoot().toPath(), objectContentSize)) { + BlobInfo gen1 = + storage.createFrom( + info, tmpFile.getPath(), BlobWriteOption.expectedObjectSize(objectContentSize)); + assertThat(gen1.getSize()).isEqualTo(objectContentSize); + } + } + + @Test + public void expectedUploadSize_createFrom_path_doesNotMatch() throws IOException { + int objectContentSize = 10; + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (TmpFile tmpFile = + DataGenerator.base64Characters().tempFile(temp.getRoot().toPath(), objectContentSize)) { + StorageException se = + assertThrows( + StorageException.class, + () -> + storage.createFrom( + info, + tmpFile.getPath(), + BlobWriteOption.expectedObjectSize(objectContentSize + 1))); + + assertThat(se.getCode()).isEqualTo(400); + } + } + + private void doTestDoesMatch(Storage storage) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + int objectContentSize = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobWriteSession session = + storage.blobWriteSession(info, BlobWriteOption.expectedObjectSize(objectContentSize)); + try (WritableByteChannel open = session.open()) { + open.write(ByteBuffer.wrap(bytes)); + } + + BlobInfo gen1 = session.getResult().get(5, TimeUnit.SECONDS); + assertThat(gen1.getSize()).isEqualTo(objectContentSize); + } + + private void doTestDoesNotMatch(Storage storage) throws IOException { + int objectContentSize = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobWriteSession session = + storage.blobWriteSession(info, BlobWriteOption.expectedObjectSize(objectContentSize + 1)); + + WritableByteChannel open = session.open(); + open.write(ByteBuffer.wrap(bytes)); + StorageException se = assertThrows(StorageException.class, open::close); + + assertThat(se.getCode()).isEqualTo(400); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITServiceAccountTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITServiceAccountTest.java new file mode 100644 index 000000000000..2141334c9ad1 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITServiceAccountTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP}) +public class ITServiceAccountTest { + + private static final String SERVICE_ACCOUNT_EMAIL_SUFFIX = + "@gs-project-accounts.iam.gserviceaccount.com"; + + @Inject public Storage storage; + + @Test + public void testGetServiceAccount() { + String projectId = storage.getOptions().getProjectId(); + ServiceAccount serviceAccount = storage.getServiceAccount(projectId); + assertNotNull(serviceAccount); + assertTrue(serviceAccount.getEmail().endsWith(SERVICE_ACCOUNT_EMAIL_SUFFIX)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITSignedUrlTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITSignedUrlTest.java new file mode 100644 index 000000000000..b3acc25c700f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITSignedUrlTest.java @@ -0,0 +1,290 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.auth.ServiceAccountSigner; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.PostPolicyV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.SignUrlOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableMap; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URL; +import java.net.URLConnection; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.mime.MultipartEntityBuilder; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public class ITSignedUrlTest { + + private static final byte[] BLOB_BYTE_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private String bucketName; + + @Before + public void setUp() throws Exception { + bucketName = bucket.getName(); + } + + @Test + public void testGetSignedUrl() throws IOException { + if (storage.getOptions().getCredentials() != null) { + assumeTrue(storage.getOptions().getCredentials() instanceof ServiceAccountSigner); + } + String blobName = generator.randomObjectName() + "/with/slashes/and?special=!#$&'()*+,:;=?@[]"; + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + for (Storage.SignUrlOption urlStyle : + Arrays.asList( + Storage.SignUrlOption.withPathStyle(), + Storage.SignUrlOption.withVirtualHostedStyle())) { + URL url = storage.signUrl(blob, 1, TimeUnit.HOURS, urlStyle); + URLConnection connection = url.openConnection(); + byte[] readBytes = new byte[BLOB_BYTE_CONTENT.length]; + try (InputStream responseStream = connection.getInputStream()) { + assertEquals(BLOB_BYTE_CONTENT.length, responseStream.read(readBytes)); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } + } + } + + @Test + public void testGetV2SignedUrlWithAddlQueryParam() throws IOException { + if (storage.getOptions().getCredentials() != null) { + assumeTrue(storage.getOptions().getCredentials() instanceof ServiceAccountSigner); + } + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + for (Storage.SignUrlOption urlStyle : + Arrays.asList( + Storage.SignUrlOption.withPathStyle(), + Storage.SignUrlOption.withVirtualHostedStyle())) { + String generationStr = remoteBlob.getGeneration().toString(); + URL url = + storage.signUrl( + blob, + 1, + TimeUnit.HOURS, + urlStyle, + Storage.SignUrlOption.withV2Signature(), + Storage.SignUrlOption.withQueryParams(ImmutableMap.of("generation", generationStr))); + // Finally, verify that the URL works and we can get the object as expected: + URLConnection connection = url.openConnection(); + byte[] readBytes = new byte[BLOB_BYTE_CONTENT.length]; + try (InputStream responseStream = connection.getInputStream()) { + assertEquals(BLOB_BYTE_CONTENT.length, responseStream.read(readBytes)); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } + } + } + + @Test + public void testPostSignedUrl() throws IOException { + if (storage.getOptions().getCredentials() != null) { + assumeTrue(storage.getOptions().getCredentials() instanceof ServiceAccountSigner); + } + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + assertNotNull(storage.create(blob)); + for (Storage.SignUrlOption urlStyle : + Arrays.asList( + Storage.SignUrlOption.withPathStyle(), + Storage.SignUrlOption.withVirtualHostedStyle())) { + + URL url = + storage.signUrl( + blob, 1, TimeUnit.HOURS, Storage.SignUrlOption.httpMethod(HttpMethod.POST), urlStyle); + URLConnection connection = url.openConnection(); + connection.setDoOutput(true); + connection.connect(); + Blob remoteBlob = storage.get(bucketName, blobName); + assertNotNull(remoteBlob); + assertEquals(blob.getBucket(), remoteBlob.getBucket()); + assertEquals(blob.getName(), remoteBlob.getName()); + } + } + + @Test + public void testV4SignedUrl() throws IOException { + if (storage.getOptions().getCredentials() != null) { + assumeTrue(storage.getOptions().getCredentials() instanceof ServiceAccountSigner); + } + + String blobName = generator.randomObjectName() + "/with/slashes/and?special=!#$&'()*+,:;=?@[]"; + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + Blob remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT); + assertNotNull(remoteBlob); + for (Storage.SignUrlOption urlStyle : + Arrays.asList( + Storage.SignUrlOption.withPathStyle(), + Storage.SignUrlOption.withVirtualHostedStyle())) { + + URL url = + storage.signUrl( + blob, 1, TimeUnit.HOURS, Storage.SignUrlOption.withV4Signature(), urlStyle); + URLConnection connection = url.openConnection(); + byte[] readBytes = new byte[BLOB_BYTE_CONTENT.length]; + try (InputStream responseStream = connection.getInputStream()) { + assertEquals(BLOB_BYTE_CONTENT.length, responseStream.read(readBytes)); + assertArrayEquals(BLOB_BYTE_CONTENT, readBytes); + } + } + } + + @Test + @Ignore("TODO: fix b/468377909 to enable test again") + public void testSignedPostPolicyV4() throws Exception { + PostFieldsV4 fields = PostFieldsV4.newBuilder().setAcl("public-read").build(); + + BlobId id = BlobId.of(bucketName, generator.randomObjectName()); + + PostPolicyV4 policy = + storage.generateSignedPostPolicyV4( + BlobInfo.newBuilder(id).build(), 7, TimeUnit.DAYS, fields); + + String content = "Hello, World!"; + try (CloseableHttpClient client = HttpClientBuilder.create().build()) { + HttpPost request = new HttpPost(policy.getUrl()); + MultipartEntityBuilder builder = MultipartEntityBuilder.create(); + + for (Map.Entry entry : policy.getFields().entrySet()) { + builder.addTextBody(entry.getKey(), entry.getValue()); + } + builder.addBinaryBody( + "file", + new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_OCTET_STREAM, + id.getName()); + request.setEntity(builder.build()); + client.execute(request); + } + + Blob blob = storage.get(id); + byte[] actualContent = blob.getContent(); + String actual = new String(actualContent, StandardCharsets.UTF_8); + assertEquals(content, actual); + } + + @Test + public void testUploadUsingSignedURL() throws Exception { + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucketName, blobName).build(); + assertNotNull(storage.create(blob)); + Map extensionHeaders = new HashMap<>(); + extensionHeaders.put("x-goog-resumable", "start"); + for (Storage.SignUrlOption urlStyle : + Arrays.asList( + Storage.SignUrlOption.withPathStyle(), + Storage.SignUrlOption.withVirtualHostedStyle())) { + URL signUrl = + storage.signUrl( + blob, + 1, + TimeUnit.HOURS, + Storage.SignUrlOption.httpMethod(HttpMethod.POST), + Storage.SignUrlOption.withExtHeaders(extensionHeaders), + urlStyle); + byte[] bytesArrayToUpload = BLOB_STRING_CONTENT.getBytes(); + Storage unauthenticatedStorage = StorageOptions.getUnauthenticatedInstance().getService(); + try (WriteChannel writer = unauthenticatedStorage.writer(signUrl)) { + writer.write(ByteBuffer.wrap(bytesArrayToUpload, 0, bytesArrayToUpload.length)); + } + + int lengthOfDownLoadBytes = -1; + BlobId blobId = BlobId.of(bucketName, blobName); + Blob blobToRead = storage.get(blobId); + try (ReadChannel reader = blobToRead.reader()) { + ByteBuffer bytes = ByteBuffer.allocate(64 * 1024); + lengthOfDownLoadBytes = reader.read(bytes); + } + + assertEquals(bytesArrayToUpload.length, lengthOfDownLoadBytes); + assertTrue(storage.delete(bucketName, blobName)); + } + } + + @Test + public void generatingSignedURLForHttpProducesTheCorrectScheme() throws Exception { + StorageOptions options = + storage.getOptions().toBuilder().setHost("http://[::1]").setProjectId("no-project").build(); + try (Storage s = options.getService()) { + BlobInfo info = BlobInfo.newBuilder("no-bucket", "no-object").build(); + URL urlV2 = s.signUrl(info, 5, TimeUnit.MINUTES, SignUrlOption.withV2Signature()); + URL urlV4 = s.signUrl(info, 5, TimeUnit.MINUTES, SignUrlOption.withV4Signature()); + URI uriV2 = urlV2.toURI(); + URI uriV4 = urlV4.toURI(); + assertAll( + () -> assertThat(uriV2.getScheme()).isEqualTo("http"), + () -> assertThat(uriV2.getHost()).isEqualTo("[::1]"), + () -> assertThat(uriV2.getPath()).contains("no-bucket/no-object"), + () -> assertThat(uriV4.getScheme()).isEqualTo("http"), + () -> assertThat(uriV4.getHost()).isEqualTo("[::1]"), + () -> assertThat(uriV4.getPath()).contains("no-bucket/no-object")); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageOptionsTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageOptionsTest.java new file mode 100644 index 000000000000..0d8114a7fb2d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageOptionsTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static org.junit.Assume.assumeTrue; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.OAuth2Credentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.it.ITStorageOptionsTest.CredentialsParameters; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.common.collect.ImmutableList; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +@Parameterized(CredentialsParameters.class) +public final class ITStorageOptionsTest { + + public static final class CredentialsParameters implements ParametersProvider { + + @Override + public ImmutableList parameters() { + return ImmutableList.of( + NoCredentials.getInstance(), + GoogleCredentials.create(/* accessToken= */ null), + OAuth2Credentials.create(null)); + } + } + + @Parameter public Credentials credentials; + + @Test + public void clientShouldConstructCleanly_http() throws Exception { + StorageOptions options = StorageOptions.http().setCredentials(credentials).build(); + doTest(options); + } + + @Test + public void clientShouldConstructCleanly_grpc() throws Exception { + StorageOptions options = + StorageOptions.grpc() + .setCredentials(credentials) + .setAttemptDirectPath(false) + .setEnableGrpcClientMetrics(false) + .build(); + doTest(options); + } + + @Test + public void clientShouldConstructCleanly_directPath() throws Exception { + assumeTrue( + "Unable to determine environment can access directPath", TestUtils.isOnComputeEngine()); + StorageOptions options = + StorageOptions.grpc() + .setCredentials(credentials) + .setAttemptDirectPath(true) + .setEnableGrpcClientMetrics(false) + .build(); + doTest(options); + } + + @Test + public void lackOfProjectIdDoesNotPreventConstruction_http() throws Exception { + StorageOptions options = StorageOptions.http().setCredentials(credentials).build(); + doTest(options); + } + + @Test + public void lackOfProjectIdDoesNotPreventConstruction_grpc() throws Exception { + StorageOptions options = + StorageOptions.grpc() + .setCredentials(credentials) + .setAttemptDirectPath(false) + .setEnableGrpcClientMetrics(false) + .build(); + doTest(options); + } + + private static void doTest(StorageOptions options) throws Exception { + //noinspection EmptyTryBlock + try (Storage ignore = options.getService()) {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageReadChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageReadChannelTest.java new file mode 100644 index 000000000000..ced70f71276a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITStorageReadChannelTest.java @@ -0,0 +1,199 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.getBlobInfoFromReadChannelFunction; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.gzipBytes; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = Backend.PROD, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITStorageReadChannelTest { + + @Inject public Storage storage; + + @Inject public BucketInfo bucket; + + @Inject public Generator generator; + + @Inject public ObjectsFixture objectsFixture; + + @Test + public void storageReadChannel_getObject_returns() throws Exception { + int _512KiB = 512 * 1024; + int _1MiB = 1024 * 1024; + + final BlobInfo info; + ChecksummedTestContent content; + { + byte[] uncompressedBytes = DataGenerator.base64Characters().genBytes(_512KiB); + byte[] gzipBytes = gzipBytes(uncompressedBytes); + content = ChecksummedTestContent.of(gzipBytes); + BlobInfo tmp = + BlobInfo.newBuilder(bucket, generator.randomObjectName()) + // define an object with explicit content type and encoding. + // JSON and gRPC have differing default behavior returning these values if they are + // either undefined, or match HTTP defaults. + .setContentType("text/plain") + .setContentEncoding("gzip") + .build(); + + Blob gen1 = storage.create(tmp, content.getBytes(), BlobTargetOption.doesNotExist()); + info = gen1.asBlobInfo(); + } + + try (ReadChannel c = + storage.reader(info.getBlobId(), BlobSourceOption.shouldReturnRawInputStream(true))) { + + ApiFuture infoFuture = getBlobInfoFromReadChannelFunction(c); + + ByteBuffer buf = ByteBuffer.allocate(_1MiB); + c.read(buf); + String actual = xxd(buf); + String expected = xxd(content.getBytes()); + assertThat(actual).isEqualTo(expected); + + BlobInfo blobInfo = infoFuture.get(3, TimeUnit.SECONDS); + assertAll( + () -> equalForField(blobInfo, info, BlobInfo::getName), + () -> equalForField(blobInfo, info, BlobInfo::getBucket), + () -> equalForField(blobInfo, info, BlobInfo::getGeneration), + () -> equalForField(blobInfo, info, BlobInfo::getMetageneration), + () -> equalForField(blobInfo, info, BlobInfo::getSize), + () -> equalForField(blobInfo, info, BlobInfo::getContentType), + () -> equalForField(blobInfo, info, BlobInfo::getContentEncoding)); + } + } + + @Test + public void storageReadChannel_shouldAllowDisablingBufferingBySettingChunkSize_lteq0() + throws Exception { + int _512KiB = 512 * 1024; + int _1MiB = 1024 * 1024; + + final BlobInfo info; + byte[] uncompressedBytes = DataGenerator.base64Characters().genBytes(_512KiB); + { + BlobInfo tmp = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + Blob gen1 = storage.create(tmp, uncompressedBytes, BlobTargetOption.doesNotExist()); + info = gen1.asBlobInfo(); + } + + try (ReadChannel c = storage.reader(info.getBlobId())) { + ApiFuture infoFuture = getBlobInfoFromReadChannelFunction(c); + c.setChunkSize(0); + + ByteBuffer buf = ByteBuffer.allocate(_1MiB); + // Because this is unbuffered, the underlying channel will not necessarily fill up the buf + // in a single read call. Repeatedly read until full or EOF. + int read = fillFrom(buf, c); + assertThat(read).isEqualTo(_512KiB); + String actual = xxd(buf); + String expected = xxd(uncompressedBytes); + assertThat(actual).isEqualTo(expected); + BlobInfo blobInfo = infoFuture.get(3, TimeUnit.SECONDS); + assertThat(blobInfo.getBlobId()).isEqualTo(info.getBlobId()); + } + } + + @Test + public void storageReadChannel_attemptToReadZeroBytes() throws IOException { + BlobInfo info1 = objectsFixture.getInfo1(); + try (ReadChannel r = storage.reader(info1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel w = Channels.newChannel(baos)) { + r.setChunkSize(10); + r.seek(10); + r.limit(10); + + ByteStreams.copy(r, w); + assertThat(baos.toByteArray()).isEmpty(); + } + } + + @Test + public void storageReadChannel_getObject_404() { + BlobId id = BlobId.of(bucket.getName(), generator.randomObjectName()); + + try (ReadChannel c = storage.reader(id)) { + ApiFuture infoFuture = getBlobInfoFromReadChannelFunction(c); + IOException ioException = + assertThrows(IOException.class, () -> c.read(ByteBuffer.allocate(10))); + assertThat(ioException).hasCauseThat().isInstanceOf(StorageException.class); + ExecutionException ee = + assertThrows(ExecutionException.class, () -> infoFuture.get(3, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + StorageException cause = (StorageException) ee.getCause(); + assertThat(cause.getCode()).isEqualTo(404); + } + } + + private static void equalForField(T actual, T expected, Function f) { + F aF = f.apply(actual); + F eF = f.apply(expected); + assertThat(aF).isEqualTo(eF); + } + + static int fillFrom(ByteBuffer buf, ReadableByteChannel c) throws IOException { + int total = 0; + while (buf.hasRemaining()) { + int read = c.read(buf); + if (read != -1) { + total += read; + } else { + break; + } + } + return total; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITTransferManagerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITTransferManagerTest.java new file mode 100644 index 000000000000..e5794fa81992 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITTransferManagerTest.java @@ -0,0 +1,739 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TestUtils; +import com.google.cloud.storage.TmpFile; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.transfermanager.BucketNameMismatchException; +import com.google.cloud.storage.transfermanager.DownloadJob; +import com.google.cloud.storage.transfermanager.DownloadResult; +import com.google.cloud.storage.transfermanager.ParallelDownloadConfig; +import com.google.cloud.storage.transfermanager.ParallelUploadConfig; +import com.google.cloud.storage.transfermanager.PathTraversalBlockedException; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import com.google.cloud.storage.transfermanager.TransferManagerConfigTestingInstances; +import com.google.cloud.storage.transfermanager.TransferStatus; +import com.google.cloud.storage.transfermanager.UploadJob; +import com.google.cloud.storage.transfermanager.UploadResult; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + transports = {Transport.HTTP}, + backends = {Backend.PROD}) +public class ITTransferManagerTest { + + private static final Comparator comp = + Comparator.comparing(info -> info.getBlobId().getName()); + private static final Comparator comp2 = + Comparator.comparing(DownloadResult::getInput, comp); + + private static final long CHUNK_THRESHOLD = 2L * 1024 * 1024; + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Rule public final TemporaryFolder tmpDir = new TemporaryFolder(); + + private Path baseDir; + private static final int objectContentSize = 64; + private List blobs = new ArrayList<>(); + + @Before + public void setUp() throws Exception { + baseDir = tmpDir.getRoot().toPath(); + BlobInfo blobInfo1 = + BlobInfo.newBuilder( + BlobId.of( + bucket.getName(), + String.format(Locale.US, "%s/src", generator.randomObjectName()))) + .build(); + BlobInfo blobInfo2 = + BlobInfo.newBuilder( + BlobId.of( + bucket.getName(), + String.format(Locale.US, "%s/src", generator.randomObjectName()))) + .build(); + BlobInfo blobInfoChunking = + BlobInfo.newBuilder( + BlobId.of( + bucket.getName(), + String.format(Locale.US, "%s/src", generator.randomObjectName()))) + .build(); + Collections.addAll(blobs, blobInfo1, blobInfo2); + ByteBuffer content = DataGenerator.base64Characters().genByteBuffer(108); + for (BlobInfo blob : blobs) { + try (WriteChannel writeChannel = storage.writer(blob)) { + writeChannel.write(content); + } + } + // We make this size just a bit bigger than the threshold. + long size = CHUNK_THRESHOLD + 100L; + ByteBuffer chunkedContent = DataGenerator.base64Characters().genByteBuffer(size); + try (WriteChannel writeChannel = storage.writer(blobInfoChunking)) { + writeChannel.write(chunkedContent); + } + blobs.add(blobInfoChunking); + } + + @Test + public void uploadFiles() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile1 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile2 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + List files = + ImmutableList.of(tmpFile.getPath(), tmpFile1.getPath(), tmpFile2.getPath()); + String bucketName = bucket.getName(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults).hasSize(3); + assertThat( + uploadResults.stream() + .filter(result -> result.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())) + .hasSize(3); + } + } + + @Test + public void uploadFilesPartNaming() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowParallelCompositeUpload(true) + .setPerWorkerBufferSize(128 * 1024) + .setParallelCompositeUploadPartNamingStrategy(PartNamingStrategy.prefix("not-root")) + .build(); + long size = CHUNK_THRESHOLD + 100L; + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, size)) { + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucket.getName()).build(); + UploadJob job = + transferManager.uploadFiles( + Collections.singletonList(tmpFile.getPath()), parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.SUCCESS); + } + } + + @Test + public void uploadFilesWithOpts() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile1 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile2 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + List files = + ImmutableList.of(tmpFile.getPath(), tmpFile1.getPath(), tmpFile2.getPath()); + String bucketName = bucket.getName(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder() + .setBucketName(bucketName) + .setWriteOptsPerRequest(Collections.singletonList(BlobWriteOption.doesNotExist())) + .build(); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults).hasSize(3); + assertThat( + uploadResults.stream() + .filter(result -> result.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())) + .hasSize(3); + } + } + + @Test + public void uploadFilesOneFailure() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile1 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); + TmpFile tmpFile2 = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + List files = + ImmutableList.of( + tmpFile.getPath(), + tmpFile1.getPath(), + tmpFile2.getPath(), + Paths.get("this-file-does-not-exist.txt")); + String bucketName = bucket.getName(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults).hasSize(4); + assertThat( + uploadResults.stream() + .filter(x -> x.getStatus() == TransferStatus.FAILED_TO_FINISH) + .collect(Collectors.toList())) + .hasSize(1); + assertThat( + uploadResults.stream() + .filter(result -> result.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())) + .hasSize(3); + } + } + + @Test + public void uploadNonexistentBucket() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder().build(); + String bucketName = bucket.getName() + "-does-not-exist"; + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + List files = ImmutableList.of(tmpFile.getPath()); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.FAILED_TO_FINISH); + assertThat(uploadResults.get(0).getException()).isInstanceOf(StorageException.class); + } + } + + @Test + public void uploadNonexistentFile() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder().build(); + String bucketName = bucket.getName(); + try (TransferManager transferManager = config.getService()) { + List files = ImmutableList.of(Paths.get("this-file-does-not-exist.txt")); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.FAILED_TO_FINISH); + assertThat(uploadResults.get(0).getException()).isInstanceOf(NoSuchFileException.class); + } + } + + @Test + public void uploadFailsSkipIfExists() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder().build(); + String bucketName = bucket.getName(); + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).setSkipIfExists(true).build(); + UploadJob jobInitUpload = + transferManager.uploadFiles(ImmutableList.of(tmpFile.getPath()), parallelUploadConfig); + List uploadResults = jobInitUpload.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.SUCCESS); + UploadJob failedSecondUpload = + transferManager.uploadFiles(ImmutableList.of(tmpFile.getPath()), parallelUploadConfig); + List failedResult = failedSecondUpload.getUploadResults(); + assertThat(failedResult.get(0).getStatus()).isEqualTo(TransferStatus.SKIPPED); + } + } + + @Test + public void uploadSkipIfExistsGenerationOverride() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder().build(); + String bucketName = bucket.getName(); + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize)) { + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder() + .setBucketName(bucketName) + .setSkipIfExists(true) + .setWriteOptsPerRequest(ImmutableList.of(BlobWriteOption.generationMatch(5L))) + .build(); + assertThat(parallelUploadConfig.getWriteOptsPerRequest()).hasSize(1); + UploadJob jobInitUpload = + transferManager.uploadFiles(ImmutableList.of(tmpFile.getPath()), parallelUploadConfig); + List uploadResults = jobInitUpload.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.SUCCESS); + UploadJob failedSecondUpload = + transferManager.uploadFiles(ImmutableList.of(tmpFile.getPath()), parallelUploadConfig); + List failedResult = failedSecondUpload.getUploadResults(); + assertThat(failedResult.get(0).getStatus()).isEqualTo(TransferStatus.SKIPPED); + } + } + + @Test + public void downloadBlobs() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName(); + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + try { + assertThat(downloadResults).hasSize(3); + } finally { + cleanUpFiles(downloadResults); + } + } + } + + @Test + public void downloadBlobsAllowChunked() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowDivideAndConquerDownload(true) + .setPerWorkerBufferSize(128 * 1024) + .build(); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName(); + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + assertThat(downloadResults).hasSize(3); + + List expectedContents = + blobs.stream() + .sorted(comp) + .map(BlobInfo::getBlobId) + .map(storage::readAllBytes) + .map(TestUtils::xxd) + .collect(Collectors.toList()); + + List actualContents = + downloadResults.stream() + .sorted(comp2) + .map(DownloadResult::getOutputDestination) + .map(ITTransferManagerTest::readAllPathBytes) + .map(TestUtils::xxd) + .collect(Collectors.toList()); + + try { + assertThat(actualContents).isEqualTo(expectedContents); + } finally { + cleanUpFiles(downloadResults); + } + } + } + + @Test + public void uploadFilesAllowPCU() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowParallelCompositeUpload(true) + .setPerWorkerBufferSize(128 * 1024) + .build(); + long size = CHUNK_THRESHOLD + 100L; + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, size)) { + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucket.getName()).build(); + UploadJob job = + transferManager.uploadFiles( + Collections.singletonList(tmpFile.getPath()), parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults.get(0).getStatus()).isEqualTo(TransferStatus.SUCCESS); + } + } + + @Test + public void uploadFilesAllowMultiplePCUAndSmallerFiles() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowParallelCompositeUpload(true) + .setPerWorkerBufferSize(128 * 1024) + .build(); + long largeFileSize = CHUNK_THRESHOLD + 100L; + long smallFileSize = CHUNK_THRESHOLD - 100L; + try (TransferManager transferManager = config.getService(); + TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, largeFileSize); + TmpFile tmpfile2 = DataGenerator.base64Characters().tempFile(baseDir, largeFileSize); + TmpFile tmpFile3 = DataGenerator.base64Characters().tempFile(baseDir, smallFileSize)) { + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucket.getName()).build(); + List files = + ImmutableList.of(tmpFile.getPath(), tmpfile2.getPath(), tmpFile3.getPath()); + UploadJob job = transferManager.uploadFiles(files, parallelUploadConfig); + List uploadResults = job.getUploadResults(); + assertThat(uploadResults).hasSize(3); + assertThat( + uploadResults.stream() + .filter(result -> result.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())) + .hasSize(3); + } + } + + @Test + public void downloadNonexistentBucket() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName() + "-does-not-exist"; + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + List failedToStart = + downloadResults.stream() + .filter(x -> x.getStatus() == TransferStatus.FAILED_TO_START) + .collect(Collectors.toList()); + assertThat(failedToStart).hasSize(3); + } + } + + @Test + public void downloadBlobsChunkedFail() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowDivideAndConquerDownload(true) + .setPerWorkerBufferSize(128 * 1024) + .build(); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName() + "-does-not-exist"; + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + assertThat(downloadResults).hasSize(3); + List failedToStart = + downloadResults.stream() + .filter(x -> x.getStatus() == TransferStatus.FAILED_TO_START) + .collect(Collectors.toList()); + assertThat(failedToStart).hasSize(3); + } + } + + @Test + public void downloadBlobsPreconditionFailure() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()).toBuilder() + .setAllowDivideAndConquerDownload(true) + .setPerWorkerBufferSize(128 * 1024) + .build(); + try (TransferManager transferManager = config.getService()) { + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucket.getName()) + .setDownloadDirectory(baseDir) + .setOptionsPerRequest(ImmutableList.of(BlobSourceOption.generationMatch(-1))) + .build(); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + assertThat(downloadResults).hasSize(3); + List failedToStart = + downloadResults.stream() + .filter(x -> x.getStatus() == TransferStatus.FAILED_TO_START) + .collect(Collectors.toList()); + assertThat(failedToStart).hasSize(3); + } + } + + @Test + public void downloadBlobsOneFailure() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName(); + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + List downloadBlobs = blobs; + BlobInfo nonexistentBlob = + BlobInfo.newBuilder( + BlobId.of( + bucket.getName(), + String.format(Locale.US, "%s/src", generator.randomObjectName()))) + .build(); + downloadBlobs.add(nonexistentBlob); + DownloadJob job = transferManager.downloadBlobs(blobs, parallelDownloadConfig); + List downloadResults = job.getDownloadResults(); + try { + assertThat(downloadResults).hasSize(4); + assertThat( + downloadResults.stream() + .filter(res -> res.getStatus() == TransferStatus.FAILED_TO_START) + .collect(Collectors.toList())) + .hasSize(1); + } finally { + cleanUpFiles( + downloadResults.stream() + .filter(res -> res.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())); + } + } + } + + @Test + public void uploadChangePrefix() throws Exception { + try (TmpFile tmpFile1 = DataGenerator.base64Characters().tempFile(baseDir, 373); + TmpFile tmpFile2 = + DataGenerator.base64Characters().tempFile(baseDir, 2 * 1024 * 1024 + 13); + TransferManager tm = + TransferManagerConfig.newBuilder() + .setMaxWorkers(1) + .setPerWorkerBufferSize(4 * 1024 * 1024) + .setAllowDivideAndConquerDownload(false) + .setAllowParallelCompositeUpload(false) + .setStorageOptions(storage.getOptions()) + .build() + .getService()) { + + String prefix = "asdfasdf"; + ImmutableMap<@NonNull String, @Nullable String> metadata = ImmutableMap.of("k", "v"); + String contentType = "text/plain;charset=utf-8"; + ParallelUploadConfig uploadConfig = + ParallelUploadConfig.newBuilder() + .setBucketName(bucket.getName()) + .setSkipIfExists(false) + .setUploadBlobInfoFactory( + (b, f) -> + BlobInfo.newBuilder( + b, prefix + f.replace(baseDir.toAbsolutePath().toString(), "")) + .setContentType(contentType) + .setMetadata(metadata) + .build()) + .setWriteOptsPerRequest(ImmutableList.of(BlobWriteOption.doesNotExist())) + .build(); + + ImmutableList files = ImmutableList.of(tmpFile1.getPath(), tmpFile2.getPath()); + UploadJob uploadJob = tm.uploadFiles(files, uploadConfig); + List uploadResults = uploadJob.getUploadResults(); + + List expected = + files.stream() + .map(p -> p.getFileName().toString()) + .map(s -> prefix + "/" + s) + .collect(Collectors.toList()); + + List actualGsUtilUris = + uploadResults.stream() + .map(UploadResult::getUploadedBlob) + .map(BlobInfo::getName) + .collect(Collectors.toList()); + assertThat(actualGsUtilUris).containsExactlyElementsIn(expected); + + List> actualMetadatas = + uploadResults.stream() + .map(UploadResult::getUploadedBlob) + .map(BlobInfo::getMetadata) + .collect(Collectors.toList()); + + assertThat(actualMetadatas).isEqualTo(ImmutableList.of(metadata, metadata)); + + List actualContentTypes = + uploadResults.stream() + .map(UploadResult::getUploadedBlob) + .map(BlobInfo::getContentType) + .collect(Collectors.toList()); + + assertThat(actualContentTypes).isEqualTo(ImmutableList.of(contentType, contentType)); + } + } + + @Test + public void bucketNameFromUploadBlobInfoFactoryMustMatchConfig() throws Exception { + try (TmpFile tmpFile1 = DataGenerator.base64Characters().tempFile(baseDir, 373); + TransferManager tm = + TransferManagerConfig.newBuilder() + .setMaxWorkers(1) + .setPerWorkerBufferSize(4 * 1024 * 1024) + .setAllowDivideAndConquerDownload(false) + .setAllowParallelCompositeUpload(false) + .setStorageOptions(storage.getOptions()) + .build() + .getService()) { + + ParallelUploadConfig uploadConfig = + ParallelUploadConfig.newBuilder() + .setBucketName(bucket.getName()) + .setSkipIfExists(false) + .setUploadBlobInfoFactory((b, f) -> BlobInfo.newBuilder(b + "x", f).build()) + .setWriteOptsPerRequest(ImmutableList.of(BlobWriteOption.doesNotExist())) + .build(); + + ImmutableList files = ImmutableList.of(tmpFile1.getPath()); + UploadJob uploadJob = tm.uploadFiles(files, uploadConfig); + List uploadResults = uploadJob.getUploadResults(); + + Optional failedToStart = + uploadResults.stream() + .filter(r -> r.getStatus() == TransferStatus.FAILED_TO_START) + .findAny(); + assertThat(failedToStart).isPresent(); + UploadResult result = failedToStart.get(); + assertThat(result.getException()).isInstanceOf(BucketNameMismatchException.class); + } + } + + @Test + public void downloadBlobsPathTraversalBlocked() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName(); + // Create an object with a name that attempts to "escape" the target directory + String maliciousName = "../malicious.txt"; + BlobInfo maliciousBlob = BlobInfo.newBuilder(BlobId.of(bucketName, maliciousName)).build(); + storage.create( + maliciousBlob, "malicious content".getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) // baseDir is the target + .build(); + + List blobsToDownload = new ArrayList<>(blobs); + blobsToDownload.add(maliciousBlob); + + DownloadJob job = transferManager.downloadBlobs(blobsToDownload, parallelDownloadConfig); + List results = job.getDownloadResults(); + + try { + long successCount = + results.stream().filter(res -> res.getStatus() == TransferStatus.SUCCESS).count(); + assertThat(successCount).isEqualTo(blobs.size()); + + // Verify that the malicious blob was blocked/skipped + Optional blockedResult = + results.stream() + .filter(res -> res.getInput().getName().equals(maliciousName)) + .findFirst(); + + assertThat(blockedResult).isPresent(); + assertThat(blockedResult.get().getStatus()).isEqualTo(TransferStatus.FAILED_TO_START); + assertThat(blockedResult.get().getException()) + .isInstanceOf(PathTraversalBlockedException.class); + assertThat(blockedResult.get().getException().getMessage()).contains("blocked"); + } finally { + storage.delete(maliciousBlob.getBlobId()); + cleanUpFiles( + results.stream() + .filter(res -> res.getStatus() == TransferStatus.SUCCESS) + .collect(Collectors.toList())); + } + } + } + + @Test + public void downloadBlobsPathTraversalAllowedWithinTarget() throws Exception { + TransferManagerConfig config = + TransferManagerConfigTestingInstances.defaults(storage.getOptions()); + try (TransferManager transferManager = config.getService()) { + String bucketName = bucket.getName(); + // This name resolves to 'safe.txt' inside the target directory + String safeNameWithDots = "subdir/../safe.txt"; + BlobInfo safeBlob = BlobInfo.newBuilder(BlobId.of(bucketName, safeNameWithDots)).build(); + storage.create(safeBlob, "safe content".getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(baseDir) + .build(); + + DownloadJob job = + transferManager.downloadBlobs( + Collections.singletonList(safeBlob), parallelDownloadConfig); + List results = job.getDownloadResults(); + + try { + assertThat(results.get(0).getStatus()).isEqualTo(TransferStatus.SUCCESS); + // Verify it was saved to the correct normalized location + Path expectedPath = baseDir.resolve("safe.txt").toAbsolutePath().normalize(); + assertThat(results.get(0).getOutputDestination().toAbsolutePath().normalize()) + .isEqualTo(expectedPath); + } finally { + cleanUpFiles(results); + storage.delete(safeBlob.getBlobId()); + } + } + } + + private void cleanUpFiles(List results) throws IOException { + // Cleanup downloaded blobs and the parent directory + for (DownloadResult res : results) { + Files.delete(res.getOutputDestination()); + Files.delete(res.getOutputDestination().getParent()); + } + } + + private static byte[] readAllPathBytes(Path path) { + try { + return Files.readAllBytes(path); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUniverseDomainTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUniverseDomainTest.java new file mode 100644 index 000000000000..a0fc1a1fabc7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUniverseDomainTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assume.assumeNotNull; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.UUID; +import org.junit.BeforeClass; +import org.junit.Test; + +public class ITUniverseDomainTest { + + private static final String TEST_UNIVERSE_DOMAIN = System.getenv("TEST_UNIVERSE_DOMAIN"); + private static final String TEST_PROJECT_ID = System.getenv("TEST_UNIVERSE_PROJECT_ID"); + private static final String TEST_UNIVERSE_LOCATION = System.getenv("TEST_UNIVERSE_LOCATION"); + private static final String CREDENTIAL_PATH = System.getenv("TEST_UNIVERSE_DOMAIN_CREDENTIAL"); + private static Storage storage; + + @BeforeClass + public static void setUp() throws Exception { + assumeNotNull(TEST_UNIVERSE_DOMAIN); + assumeNotNull(TEST_PROJECT_ID); + assumeNotNull(TEST_UNIVERSE_LOCATION); + assumeNotNull(CREDENTIAL_PATH); + GoogleCredentials creds = + ServiceAccountCredentials.fromStream(Files.newInputStream(Paths.get(CREDENTIAL_PATH))) + .toBuilder() + .build() + .createWithUseJwtAccessWithScope(true) + .createScoped("https://www.googleapis.com/auth/cloud-platform"); + + storage = + StorageOptions.newBuilder() + .setUniverseDomain(TEST_UNIVERSE_DOMAIN) + .setProjectId(TEST_PROJECT_ID) + .setCredentials(creds) + .build() + .getService(); + } + + @Test + public void universeDomainTests() throws Exception { + String bucketName = "java-storage-ud-" + UUID.randomUUID(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName).setLocation(TEST_UNIVERSE_LOCATION).build(); + try (TemporaryBucket tempBucket = + TemporaryBucket.newBuilder().setBucketInfo(bucketInfo).setStorage(storage).build()) { + String content = "hello"; + String objectName = "ud-test-object"; + + storage.create( + BlobInfo.newBuilder(bucketName, objectName).build(), + content.getBytes(StandardCharsets.UTF_8)); + + Blob blob = storage.get(bucketName, objectName); + assertEquals(content, new String(blob.getContent(), StandardCharsets.UTF_8)); + + storage.delete(bucketName, objectName); + assertNull(storage.get(bucketName, objectName)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUserAgentTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUserAgentTest.java new file mode 100644 index 000000000000..e408f7808126 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITUserAgentTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.common.collect.ImmutableList; +import java.util.Objects; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITUserAgentTest { + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void userAgentIncludesGcloudJava_writer_http() throws Exception { + RequestAuditing requestAuditing = new RequestAuditing(); + HttpStorageOptions options2 = + StorageOptions.http().setTransportOptions(requestAuditing).build(); + try (Storage storage = options2.getService()) { + try (WriteChannel writer = + storage.writer(BlobInfo.newBuilder(bucket, generator.randomObjectName()).build())) { + writer.write(DataGenerator.base64Characters().genByteBuffer(13)); + } + } + + ImmutableList userAgents = + requestAuditing.getRequests().stream() + .map(HttpRequest::getHeaders) + .map(HttpHeaders::getUserAgent) + .filter(Objects::nonNull) + .collect(ImmutableList.toImmutableList()); + + ImmutableList found = + userAgents.stream() + .filter(ua -> ua.contains("gcloud-java/")) + .collect(ImmutableList.toImmutableList()); + assertThat(found).hasSize(2); // one for the create session, and one for the PUT and finalize + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelConnectionPoolTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelConnectionPoolTest.java new file mode 100644 index 000000000000..1b2e3b97bc1a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelConnectionPoolTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.apache.ApacheHttpTransport; +import com.google.auth.http.HttpTransportFactory; +import com.google.cloud.TransportOptions; +import com.google.cloud.WriteChannel; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public class ITWriteChannelConnectionPoolTest { + private static final byte[] BLOB_BYTE_CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final String BLOB_STRING_CONTENT = "Hello Google Cloud Storage!"; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + private static class CustomHttpTransportFactory implements HttpTransportFactory { + @Override + @SuppressWarnings({"unchecked", "deprecation"}) + public HttpTransport create() { + PoolingHttpClientConnectionManager manager = new PoolingHttpClientConnectionManager(); + manager.setMaxTotal(1); + return new ApacheHttpTransport(HttpClients.createMinimal(manager)); + } + } + + @Test + public void testWriteChannelWithConnectionPool() throws IOException { + TransportOptions transportOptions = + HttpTransportOptions.newBuilder() + .setHttpTransportFactory(new CustomHttpTransportFactory()) + .build(); + Storage storageWithPool = + StorageOptions.http().setTransportOptions(transportOptions).build().getService(); + String blobName = generator.randomObjectName(); + BlobInfo blob = BlobInfo.newBuilder(bucket.getName(), blobName).build(); + byte[] stringBytes; + try (WriteChannel writer = storageWithPool.writer(blob)) { + stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); + writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); + writer.write(ByteBuffer.wrap(stringBytes)); + } + try (WriteChannel writer = storageWithPool.writer(blob)) { + stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); + writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); + writer.write(ByteBuffer.wrap(stringBytes)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelTest.java new file mode 100644 index 000000000000..a0fb839fcbdc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITWriteChannelTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.PackagePrivateMethodWorkarounds; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.io.IOException; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) +public final class ITWriteChannelTest { + + @Inject public Storage storage; + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + @Test + public void writeChannel_isOpen_onConstruction() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + try (WriteChannel writer = storage.writer(info)) { + assertThat(writer.isOpen()).isTrue(); + } + } + + @Test + public void writeChannel_createsObjectEvenIfWriteNeverCalled() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + WriteChannel w; + try (WriteChannel writer = storage.writer(info)) { + w = writer; + assertThat(writer.isOpen()).isTrue(); + } + + Optional internalInfo = + PackagePrivateMethodWorkarounds.maybeGetBlobInfoFunction().apply(w); + + assertThat(internalInfo.isPresent()).isTrue(); + + Blob blob = storage.get(info.getBlobId()); + assertThat(blob).isNotNull(); + } + + @Test + public void writeChannel_openAfterWriteSmallerThanBlockSize() throws IOException { + BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + WriteChannel w; + try (WriteChannel writer = storage.writer(info)) { + w = writer; + assertThat(writer.isOpen()).isTrue(); + + int write = writer.write(DataGenerator.base64Characters().genByteBuffer(10)); + assertThat(write).isEqualTo(10); + + assertThat(writer.isOpen()).isTrue(); + } + + assertThat(w.isOpen()).isFalse(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ReadMaskTestUtils.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ReadMaskTestUtils.java new file mode 100644 index 000000000000..4fea041f9b4f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ReadMaskTestUtils.java @@ -0,0 +1,88 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeTrue; + +import java.util.List; +import java.util.function.Function; + +final class ReadMaskTestUtils { + + static final class Args { + private final F field; + private final LazyAssertion assertion; + + Args(F field, LazyAssertion assertion) { + this.field = field; + this.assertion = assertion; + } + + F getField() { + return field; + } + + LazyAssertion getAssertion() { + return assertion; + } + + @Override + public String toString() { + return field.toString(); + } + } + + @FunctionalInterface + interface LazyAssertion { + void validate(T jsonT, T grpcT) throws AssertionError; + + default LazyAssertion> pairwiseList() { + LazyAssertion self = this; + return (jsonTs, grpcTs) -> { + final int length = Math.min(jsonTs.size(), grpcTs.size()); + int idx = 0; + for (; idx < length; idx++) { + T jT = jsonTs.get(idx); + T gT = grpcTs.get(idx); + self.validate(jT, gT); + } + + assertThat(idx).isEqualTo(jsonTs.size()); + assertThat(idx).isEqualTo(grpcTs.size()); + + assertThat(jsonTs.size()).isEqualTo(length); + assertThat(grpcTs.size()).isEqualTo(length); + }; + } + + static LazyAssertion equal() { + return (a, g) -> assertThat(g).isEqualTo(a); + } + + static LazyAssertion skip(String message) { + return (a, g) -> assumeTrue(message, false); + } + + static LazyAssertion apiaryNullGrpcDefault(F def, Function extractor) { + return (jsonT, grpcT) -> { + assertThat(extractor.apply(jsonT)).isNull(); + assertThat(extractor.apply(grpcT)).isEqualTo(def); + }; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java new file mode 100644 index 000000000000..6740d3afd6f8 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java @@ -0,0 +1,226 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpContent; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.MultipartContent; +import com.google.api.client.http.MultipartContent.Part; +import com.google.api.client.http.json.JsonHttpContent; +import com.google.api.client.json.GenericJson; +import com.google.cloud.ServiceOptions; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.it.CSEKSupport.EncryptionKeyTuple; +import com.google.common.collect.ImmutableList; +import com.google.common.truth.IterableSubject; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public final class RequestAuditing extends HttpTransportOptions implements AssertRequestHeaders { + + private final List requests; + + public RequestAuditing() { + super(HttpTransportOptions.newBuilder()); + requests = Collections.synchronizedList(new ArrayList<>()); + } + + @Override + public HttpRequestInitializer getHttpRequestInitializer(ServiceOptions serviceOptions) { + HttpRequestInitializer delegate = super.getHttpRequestInitializer(serviceOptions); + return request -> { + requests.add(request); + delegate.initialize(request); + }; + } + + public ImmutableList getRequests() { + return ImmutableList.copyOf(requests); + } + + public void clear() { + requests.clear(); + } + + void assertQueryParam(String paramName, String expectedValue) { + assertQueryParam(paramName, ImmutableList.of(expectedValue), Function.identity()); + } + + void assertQueryParam(String paramName, ImmutableList expectedValue) { + assertQueryParam(paramName, expectedValue, Function.identity()); + } + + void assertQueryParam(String paramName, T expected, Function transform) { + assertQueryParam(paramName, ImmutableList.of(expected), transform); + } + + private void assertQueryParam( + String paramName, ImmutableList expected, Function transform) { + ImmutableList requests = getRequests(); + + List actual = + requests.stream() + .map(HttpRequest::getUrl) + // When a multipart (http, not MPU) request is sent it will show up as multiple requests + // de-dupe before processing + .distinct() + .map(u -> (String) u.getFirst(paramName)) + .filter(Objects::nonNull) + .map(transform) + .collect(Collectors.toList()); + + assertWithMessage("Query Param " + paramName).that(actual).isEqualTo(expected); + } + + void assertPathParam(String resourceName, String expectedValue) { + ImmutableList requests = getRequests(); + + List actual = + requests.stream() + .map(HttpRequest::getUrl) + // When a multipart (http, not MPU) request is sent it will show up as multiple requests + // de-dupe before processing + .distinct() + .map(GenericUrl::getRawPath) + .map( + s -> { + int resourceNameIndex = s.indexOf(resourceName); + if (resourceNameIndex >= 0) { + int valueBegin = resourceNameIndex + resourceName.length() + 1; + int nextSlashIdx = s.indexOf("/", valueBegin); + + if (nextSlashIdx > valueBegin) { + return s.substring(valueBegin, nextSlashIdx); + } else { + return s.substring(nextSlashIdx + 1); + } + } else { + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + assertWithMessage("Path Param " + resourceName) + .that(actual) + .isEqualTo(ImmutableList.of(expectedValue)); + } + + void assertNoContentEncoding() { + ImmutableList requests = getRequests(); + + List actual = + requests.stream() + .map(HttpRequest::getHeaders) + .map(HttpHeaders::getContentType) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + assertWithMessage("Header Content-Encoding").that(actual).isEmpty(); + } + + void assertEncryptionKeyHeaders(EncryptionKeyTuple tuple) { + ImmutableList requests = getRequests(); + + List actual = + requests.stream() + .map(HttpRequest::getHeaders) + .map( + h -> + new EncryptionKeyTuple( + (String) h.get("x-goog-encryption-algorithm"), + (String) h.get("x-goog-encryption-key"), + (String) h.get("x-goog-encryption-key-sha256"))) + .collect(Collectors.toList()); + + // When a multipart (http, not MPU) request is sent it will show up as multiple requests, + // constrain our assertion to contains rather than exact matching + assertWithMessage("Headers x-goog-encryption-*") + .that(actual) + .containsAtLeastElementsIn(ImmutableList.of(tuple)); + } + + void assertMultipartContentJsonAndText() { + List actual = + getRequests().stream() + .filter(r -> r.getContent() instanceof MultipartContent) + .map(r -> (MultipartContent) r.getContent()) + .flatMap(c -> c.getParts().stream()) + .map(Part::getContent) + .map(HttpContent::getType) + .collect(Collectors.toList()); + + assertWithMessage("Multipart Content-Type") + .that(actual) + .isEqualTo(ImmutableList.of("application/json; charset=UTF-8", "text/plain")); + } + + void assertMultipartJsonField(String jsonField, Object expectedValue) { + List collect = + getRequests().stream() + .filter(r -> r.getContent() instanceof MultipartContent) + .map(r -> (MultipartContent) r.getContent()) + .flatMap(c -> c.getParts().stream()) + .map(Part::getContent) + .filter(content -> "application/json; charset=UTF-8".equals(content.getType())) + .filter(c -> c instanceof JsonHttpContent) + .map(c -> (JsonHttpContent) c) + .map(c -> (GenericJson) c.getData()) + .map(json -> json.get(jsonField)) + .collect(Collectors.toList()); + assertWithMessage("Multipart json field " + jsonField) + .that(collect) + .isEqualTo(ImmutableList.of(expectedValue)); + } + + @Override + public IterableSubject assertRequestHeader(String headerName, FilteringPolicy filteringPolicy) { + Function, Stream> filter; + switch (filteringPolicy) { + case DISTINCT: + filter = Stream::distinct; + break; + case NO_FILTER: + filter = Function.identity(); + break; + default: + throw new IllegalStateException("Unhandled enum value: " + filteringPolicy); + } + + ImmutableList requests = getRequests(); + + Stream stream = + requests.stream() + .map(HttpRequest::getHeaders) + .map(headers -> headers.get(headerName)) + .filter(Objects::nonNull); + List actual = filter.apply(stream).collect(Collectors.toList()); + + return assertWithMessage(String.format(Locale.US, "Headers %s", headerName)).that(actual); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/StorageNativeCanary.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/StorageNativeCanary.java new file mode 100644 index 000000000000..3f585a03d127 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/StorageNativeCanary.java @@ -0,0 +1,197 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.paging.Page; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TestUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.util.List; +import java.util.UUID; +import org.junit.Test; + +// Intentionally avoid StorageITRunner here. It touches lots of code at a semi-static level making +// native-test have a hard time. +public final class StorageNativeCanary { + + private static final int _256KiB = 256 * 1024; + private static final byte[] bytes = DataGenerator.base64Characters().genBytes(512 * 1024); + + @Test + public void canary_happyPath_http() throws Throwable { + assertBehaviorOfPrimaryStorageActions(StorageOptions.http().build().getService()); + } + + @Test + public void canary_happyPath_grpc() throws Throwable { + assertBehaviorOfPrimaryStorageActions(StorageOptions.grpc().build().getService()); + } + + /** + * When testing on Native Image, we're primarily wanting to verify the primary code paths are + * properly detected by the native image compiler. + * + *

For Storage, we have a few "primary code paths" we want to ensure are validated: + * + *

    + *
  • Can a (Unary) Request Succeed? + *
  • Can a (ServerStream) Object Read Request Succeed? + *
  • Can a (ClientStream) Object Write Request Succeed? + *
  • Can a (Page over Unary) Paginated Request Succeed? + *
+ * + * To validate this, our happy path test is as follows: + * + *
    + *
  • Create a temporary bucket (Unary) + *
  • Insert two (2) objects (Unary, ServerStream) + *
  • List all objects, using a pageSize of 1 (Page over Unary) + *
  • Read all bytes of each object (ServerStream) + *
  • Delete each object (Unary) + *
  • Delete temporary bucket (Unary) + *
+ */ + private static void assertBehaviorOfPrimaryStorageActions(Storage storage) throws Throwable { + // create a temporary bucket + try (TemporaryBucket temporaryBucket = + TemporaryBucket.newBuilder() + .setStorage(storage) + .setBucketInfo(BucketInfo.of("java-storage-grpc-" + UUID.randomUUID())) + .build()) { + String bucketName = temporaryBucket.getBucket().getName(); + String obj1Name = UUID.randomUUID().toString(); + String obj2Name = UUID.randomUUID().toString(); + + // insert 2 objects + BlobInfo info1 = BlobInfo.newBuilder(bucketName, obj1Name).build(); + BlobInfo info2 = BlobInfo.newBuilder(bucketName, obj2Name).build(); + uploadUsingWriter(storage, info1); + uploadUsingWriter(storage, info2); + + // list objects + Page page = storage.list(bucketName, BlobListOption.pageSize(1)); + List blobs = ImmutableList.copyOf(page.iterateAll()); + + // read all bytes of each object + List actual = + blobs.stream() + .map(info -> readAll(storage, info)) + .collect(ImmutableList.toImmutableList()); + + List deletes = + blobs.stream() + .map(b -> storage.delete(b.getBlobId(), BlobSourceOption.generationMatch())) + .collect(ImmutableList.toImmutableList()); + + assertAll( + () -> { + List actualNames = + actual.stream() + .map(BlobWithContent::getInfo) + .map(BlobInfo::getBlobId) + .map(BlobId::getName) + .collect(ImmutableList.toImmutableList()); + + assertThat(actualNames).containsExactly(info1.getName(), info2.getName()); + }, + () -> assertThat(actual.get(0).getContent()).isEqualTo(bytes), + () -> assertThat(actual.get(1).getContent()).isEqualTo(bytes), + () -> assertThat(deletes.get(0)).isTrue(), + () -> assertThat(deletes.get(1)).isTrue()); + } catch (Throwable e) { + String hintMessage = + "Possible missing reflect-config configuration. Run the following to regenerate grpc" + + " reflect-config: mvn -Dmaven.test.skip.exec=true clean install && cd" + + " google-cloud-storage && mvn -Pregen-grpc-graalvm-reflect-config exec:exec"; + Throwable linkageError = TestUtils.findThrowable(LinkageError.class, e); + Throwable roe = TestUtils.findThrowable(ReflectiveOperationException.class, e); + if (linkageError != null) { + throw new RuntimeException(hintMessage, linkageError); + } else if (roe != null) { + throw new RuntimeException(hintMessage, roe); + } else { + throw e; + } + } + } + + private static void uploadUsingWriter(Storage storage, BlobInfo info) throws IOException { + try (WriteChannel writeChannel = storage.writer(info, BlobWriteOption.doesNotExist())) { + // set our size to the smallest resumable size, so we can send multiple requests + writeChannel.setChunkSize(_256KiB); + ByteStreams.copy(Channels.newChannel(new ByteArrayInputStream(bytes)), writeChannel); + } + } + + private static BlobWithContent readAll(Storage storage, BlobInfo info) { + try (ReadChannel readChannel = + storage.reader(info.getBlobId(), BlobSourceOption.generationMatch()); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + WritableByteChannel writeChannel = Channels.newChannel(outputStream)) { + // only buffer up to half the object + readChannel.setChunkSize(_256KiB); + ByteStreams.copy(readChannel, writeChannel); + return new BlobWithContent(info, outputStream.toByteArray()); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + } + + private static final class BlobWithContent { + private final BlobInfo info; + private final byte[] content; + + private BlobWithContent(BlobInfo info, byte[] content) { + this.info = info; + this.content = content; + } + + public BlobInfo getInfo() { + return info; + } + + public byte[] getContent() { + return content; + } + } + + private static final class RuntimeIOException extends RuntimeException { + private RuntimeIOException(IOException cause) { + super(cause); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/TemporaryBucket.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/TemporaryBucket.java new file mode 100644 index 000000000000..0109c3d2b1ef --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/TemporaryBucket.java @@ -0,0 +1,120 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static java.util.Objects.requireNonNull; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.conformance.retry.CleanupStrategy; +import com.google.common.base.Preconditions; +import com.google.storage.control.v2.StorageControlClient; +import java.time.Duration; +import org.checkerframework.checker.nullness.qual.Nullable; + +public final class TemporaryBucket implements AutoCloseable { + + private final BucketInfo bucket; + private final Storage storage; + @Nullable private final StorageControlClient ctrl; + private final Duration cleanupTimeout; + private final CleanupStrategy cleanupStrategy; + + private TemporaryBucket( + BucketInfo bucket, + Storage storage, + @Nullable StorageControlClient ctrl, + Duration cleanupTimeout, + CleanupStrategy cleanupStrategy) { + this.bucket = bucket; + this.storage = storage; + this.ctrl = ctrl; + this.cleanupTimeout = cleanupTimeout; + this.cleanupStrategy = cleanupStrategy; + } + + /** Return the BucketInfo from the created temporary bucket. */ + public BucketInfo getBucket() { + return bucket; + } + + @Override + public void close() throws Exception { + if (cleanupStrategy == CleanupStrategy.ALWAYS) { + if (ctrl != null) { + BucketCleaner.doCleanup(bucket.getName(), storage, ctrl); + } else { + BucketCleaner.doCleanup(bucket.getName(), storage); + } + } + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static final class Builder { + + private CleanupStrategy cleanupStrategy; + private Duration cleanupTimeoutDuration; + private BucketInfo bucketInfo; + private Storage storage; + private StorageControlClient ctrl; + + private Builder() { + this.cleanupStrategy = CleanupStrategy.ALWAYS; + this.cleanupTimeoutDuration = Duration.ofMinutes(1); + } + + public Builder setCleanupStrategy(CleanupStrategy cleanupStrategy) { + this.cleanupStrategy = cleanupStrategy; + return this; + } + + public Builder setCleanupTimeoutDuration(Duration cleanupTimeoutDuration) { + this.cleanupTimeoutDuration = cleanupTimeoutDuration; + return this; + } + + public Builder setBucketInfo(BucketInfo bucketInfo) { + this.bucketInfo = bucketInfo; + return this; + } + + public Builder setStorage(Storage storage) { + this.storage = storage; + return this; + } + + public Builder setStorageControlClient(StorageControlClient ctrl) { + this.ctrl = ctrl; + return this; + } + + public TemporaryBucket build() { + Preconditions.checkArgument( + cleanupStrategy != CleanupStrategy.ONLY_ON_SUCCESS, "Unable to detect success."); + Storage s = requireNonNull(storage, "storage must be non null"); + Bucket b = s.create(requireNonNull(bucketInfo, "bucketInfo must be non null")); + + // intentionally drop from Bucket to BucketInfo to ensure not leaking the Storage instance + return new TemporaryBucket( + b.asBucketInfo(), s, ctrl, cleanupTimeoutDuration, cleanupStrategy); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunFrameworkMethod.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunFrameworkMethod.java new file mode 100644 index 000000000000..cde361d2ded4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunFrameworkMethod.java @@ -0,0 +1,77 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import java.lang.annotation.Annotation; +import java.util.Optional; +import org.junit.runners.model.FrameworkMethod; + +/** + * Utility wrapper for {@link FrameworkMethod} providing "syntax" extension for exclude and ignore + * logic + */ +final class CrossRunFrameworkMethod { + + private final FrameworkMethod fm; + + private CrossRunFrameworkMethod(FrameworkMethod fm) { + this.fm = fm; + } + + boolean isIgnored(CrossRunIntersection crossRunIntersection) { + ImmutableSet ignores = ignores(); + return ignores.stream().anyMatch(crossRunIntersection::anyMatch); + } + + boolean isExcluded(CrossRunIntersection crossRunIntersection) { + ImmutableSet crossRunIntersections = excludedAndNotIgnored(); + return crossRunIntersections.stream().anyMatch(crossRunIntersection::anyMatch); + } + + private ImmutableSet excludedAndNotIgnored() { + ImmutableSet excludes = + findMethodOrClassAnnotation(fm, CrossRun.Exclude.class) + .map(CrossRunIntersection::expand) + .orElse(ImmutableSet.of()); + ImmutableSet ignores = ignores(); + return ImmutableSet.copyOf(Sets.difference(excludes, ignores)); + } + + private ImmutableSet ignores() { + return findMethodOrClassAnnotation(fm, CrossRun.Ignore.class) + .map(CrossRunIntersection::expand) + .orElse(ImmutableSet.of()); + } + + static CrossRunFrameworkMethod of(FrameworkMethod fm) { + return new CrossRunFrameworkMethod(fm); + } + + private static Optional findMethodOrClassAnnotation( + FrameworkMethod child, Class annotation) { + A methodA = child.getAnnotation(annotation); + A classA = child.getDeclaringClass().getAnnotation(annotation); + if (methodA != null) { + return Optional.of(methodA); + } else { + return Optional.ofNullable(classA); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunIntersection.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunIntersection.java new file mode 100644 index 000000000000..a48ba9d3ce5f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/CrossRunIntersection.java @@ -0,0 +1,184 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import static java.util.Objects.requireNonNull; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableSet; +import java.util.Locale; +import java.util.Objects; +import javax.annotation.concurrent.Immutable; +import javax.annotation.concurrent.ThreadSafe; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Represents a single cell in the computed cross product specified by a {@link CrossRun} + * declaration + */ +@ThreadSafe +@Immutable +public final class CrossRunIntersection { + + private final @Nullable Backend backend; + private final @Nullable Transport transport; + + private CrossRunIntersection(@Nullable Backend backend, @Nullable Transport transport) { + this.backend = backend; + this.transport = transport; + } + + @Nullable + public Backend getBackend() { + return backend; + } + + @Nullable + public Transport getTransport() { + return transport; + } + + public CrossRunIntersection clearBackend() { + if (backend == null) { + return this; + } else { + return new CrossRunIntersection(null, transport); + } + } + + public CrossRunIntersection clearTransport() { + if (transport == null) { + return this; + } else { + return new CrossRunIntersection(backend, null); + } + } + + public CrossRunIntersection withBackend(Backend backend) { + requireNonNull(backend, "backend must be non null"); + if (this.backend == backend) { + return this; + } else { + return new CrossRunIntersection(backend, transport); + } + } + + public CrossRunIntersection withTransport(Transport transport) { + requireNonNull(transport, "transport must be non null"); + if (this.transport == transport) { + return this; + } else { + return new CrossRunIntersection(backend, transport); + } + } + + public boolean anyMatch(CrossRunIntersection other) { + CrossRunIntersection l = this; + CrossRunIntersection r = other; + + if (l.backend == null) { + r = r.clearBackend(); + } + if (r.backend == null) { + l = l.clearBackend(); + } + + if (l.transport == null) { + r = r.clearTransport(); + } + if (r.transport == null) { + l = l.clearTransport(); + } + + return l.equals(r); + } + + /** + * use Square brackets to bound our changing of the test name it appears intellij has custom + * handling to drop the square bracketed text which results in it being able to resolve and link + * the test method. The use of square brackets follows the pattern set forth by @Parameterized + * from JUnit, and resembles index based access of an array. + */ + public String fmtSuiteName() { + String t = transport != null ? transport.toString() : "NULL_TRANSPORT"; + String b = backend != null ? backend.toString() : "NULL_BACKEND"; + return String.format(Locale.US, "[%s][%s]", t, b); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CrossRunIntersection)) { + return false; + } + CrossRunIntersection crossRunIntersection = (CrossRunIntersection) o; + return backend == crossRunIntersection.backend && transport == crossRunIntersection.transport; + } + + @Override + public int hashCode() { + return Objects.hash(backend, transport); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("backend", backend) + .add("transport", transport) + .toString(); + } + + public static CrossRunIntersection of(@Nullable Backend t, @Nullable Transport s) { + return new CrossRunIntersection(t, s); + } + + public static ImmutableSet expand(CrossRun.Ignore i) { + ImmutableSet backends = ImmutableSet.copyOf(i.backends()); + ImmutableSet transports = ImmutableSet.copyOf(i.transports()); + return expand(backends, transports); + } + + public static ImmutableSet expand(CrossRun.Exclude i) { + ImmutableSet backends = ImmutableSet.copyOf(i.backends()); + ImmutableSet transports = ImmutableSet.copyOf(i.transports()); + return expand(backends, transports); + } + + public static ImmutableSet expand( + ImmutableSet backends, ImmutableSet<@Nullable Transport> transports) { + if (backends.isEmpty() && transports.isEmpty()) { + return ImmutableSet.of(); + } else if (!backends.isEmpty() && !transports.isEmpty()) { + return backends.stream() + .flatMap(t -> transports.stream().map(s -> new CrossRunIntersection(t, s))) + .collect(ImmutableSet.toImmutableSet()); + } else if (!backends.isEmpty()) { + return backends.stream() + .map(t -> new CrossRunIntersection(t, null)) + .collect(ImmutableSet.toImmutableSet()); + } else { + return transports.stream() + .map(s -> new CrossRunIntersection(null, s)) + .collect(ImmutableSet.toImmutableSet()); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/RunNotifierUnion.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/RunNotifierUnion.java new file mode 100644 index 000000000000..302593348b23 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/RunNotifierUnion.java @@ -0,0 +1,127 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import com.google.cloud.storage.it.runner.registry.Registry; +import org.junit.function.ThrowingRunnable; +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; +import org.junit.runner.notification.RunNotifier; +import org.junit.runner.notification.StoppedByUserException; + +/** + * Run Notifier doesn't provide an API to query if our custom listener is registered or not, so, + * instead we decorate the one provided to us and forward the events to the delegate and our + * registry. + */ +final class RunNotifierUnion extends RunNotifier { + + private final RunNotifier delegate; + private final Registry registry; + + RunNotifierUnion(RunNotifier delegate, Registry registry) { + this.delegate = delegate; + this.registry = registry; + } + + @Override + public void fireTestRunStarted(Description description) { + safely(() -> registry.testRunStarted(description)); + delegate.fireTestRunStarted(description); + } + + @Override + public void fireTestRunFinished(Result result) { + safely(() -> registry.testRunFinished(result)); + delegate.fireTestRunFinished(result); + } + + @Override + public void fireTestSuiteStarted(Description description) { + safely(() -> registry.testSuiteStarted(description)); + delegate.fireTestSuiteStarted(description); + } + + @Override + public void fireTestSuiteFinished(Description description) { + safely(() -> registry.testSuiteFinished(description)); + delegate.fireTestSuiteFinished(description); + } + + @Override + public void fireTestStarted(Description description) throws StoppedByUserException { + safely(() -> registry.testStarted(description)); + delegate.fireTestStarted(description); + } + + @Override + public void fireTestFailure(Failure failure) { + safely(() -> registry.testFailure(failure)); + delegate.fireTestFailure(failure); + } + + @Override + public void fireTestAssumptionFailed(Failure failure) { + safely(() -> registry.testAssumptionFailure(failure)); + delegate.fireTestAssumptionFailed(failure); + } + + @Override + public void fireTestIgnored(Description description) { + safely(() -> registry.testIgnored(description)); + delegate.fireTestIgnored(description); + } + + @Override + public void fireTestFinished(Description description) { + safely(() -> registry.testFinished(description)); + delegate.fireTestFinished(description); + } + + @Override + public void addFirstListener(RunListener listener) { + delegate.addFirstListener(listener); + } + + @Override + public void addListener(RunListener listener) { + delegate.addListener(listener); + } + + @Override + public void removeListener(RunListener listener) { + delegate.removeListener(listener); + } + + @Override + public void pleaseStop() { + delegate.pleaseStop(); + } + + /** + * Invoking a listener can throw an exception. If that happens catch it and forward it as an event + */ + private void safely(ThrowingRunnable r) { + try { + r.run(); + } catch (Throwable t) { + delegate.fireTestFailure(new Failure(null, t)); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/SneakyException.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/SneakyException.java new file mode 100644 index 000000000000..aa6d005995bf --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/SneakyException.java @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +/** + * Sometimes, you need to make it so that a checked exception is hidden from an action. Sometimes, + * those checked exceptions are important for other integration purposes. + * + *

This class provides some utility methods to sneakily (not method declared) throw exceptions + * and later unwrap any sneakily wrapped exception if it's needed. + */ +public final class SneakyException extends RuntimeException { + + public SneakyException(Throwable cause) { + super(cause); + } + + public static T sneaky(SneakySupplier t) { + try { + return t.get(); + } catch (Exception e) { + throw new SneakyException(e); + } + } + + public static T unwrap(SneakySupplier t) { + try { + return t.get(); + } catch (SneakyException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new RuntimeException(cause); + } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @FunctionalInterface + public interface SneakySupplier { + T get() throws Exception; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITLeafRunner.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITLeafRunner.java new file mode 100644 index 000000000000..d8e0a784604f --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITLeafRunner.java @@ -0,0 +1,178 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import static com.google.cloud.storage.it.runner.SneakyException.sneaky; + +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Registry; +import com.google.common.collect.ImmutableList; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.InitializationError; +import org.junit.runners.model.TestClass; +import org.slf4j.LoggerFactory; + +final class StorageITLeafRunner extends BlockJUnit4ClassRunner { + private final CrossRunIntersection crossRunIntersection; + @Nullable private final String name; + private final TestInitializer f; + + private StorageITLeafRunner( + TestClass testClass, + CrossRunIntersection crossRunIntersection, + @Nullable String name, + TestInitializer f) + throws InitializationError { + super(testClass); + this.f = f; + this.crossRunIntersection = crossRunIntersection; + this.name = name; + } + + @Override + protected void validateFields(List errors) { + super.validateFields(errors); + TestClass testClass = getTestClass(); + if (testClass != null) { + testClass.getAnnotatedFields(Inject.class).stream() + .map( + ff -> { + Field f = ff.getField(); + Class type = f.getType(); + int modifiers = f.getModifiers(); + if (Modifier.isPublic(modifiers)) { + // manually lookup the registry here since this method is called as part of the + // super constructor and any passed in value to our constructor hasn't been set + // yet. + Registry registry = Registry.getInstance(); + if (registry.injectableTypes().stream().anyMatch(type::isAssignableFrom)) { + return null; + } else { + return new Exception( + String.format( + Locale.US, + "@Inject field '%s' must have a type compatible with one of [%s]", + f, + registry.injectableTypesString())); + } + } else { + return new Exception( + String.format(Locale.US, "The @Inject field '%s' must be public", f)); + } + }) + .filter(Objects::nonNull) + .forEach(errors::add); + } + } + + @Override + protected void validateTestMethods(List errors) { + TestClass testClass = getTestClass(); + if (testClass != null) { + boolean anyTestWithTimeout = + testClass.getAnnotatedMethods(Test.class).stream() + .anyMatch(fm -> fm.getAnnotation(Test.class).timeout() > 0); + + boolean timeoutRule = + testClass.getAnnotatedFields(Rule.class).stream() + .anyMatch(ff -> ff.getType().isAssignableFrom(Timeout.class)); + + boolean timeoutClassRule = + testClass.getAnnotatedFields(ClassRule.class).stream() + .anyMatch(ff -> ff.getType().isAssignableFrom(Timeout.class)); + + if (anyTestWithTimeout || timeoutRule || timeoutClassRule) { + String msg = + "Using @Test(timeout = 1), @Rule Timeout or @ClassRule Timeout can break multi-thread" + + " and Fixture support of StorageITRunner. Please refactor your test to detect a" + + " timeout in the test itself."; + LoggerFactory.getLogger(StorageITRunner.class).warn(msg); + } + } + super.validateTestMethods(errors); + } + + @Override + protected String getName() { + if (name == null) { + return super.getName(); + } else { + return name; + } + } + + @Override + protected String testName(FrameworkMethod method) { + if (name == null) { + return method.getName(); + } else { + return method.getName() + getName(); + } + } + + @Override + protected List computeTestMethods() { + List baseMethods = super.computeTestMethods(); + if (crossRunIntersection != null) { + return baseMethods.stream() + .filter(Objects::nonNull) + .filter(fm -> !CrossRunFrameworkMethod.of(fm).isExcluded(crossRunIntersection)) + .collect(ImmutableList.toImmutableList()); + } else { + return baseMethods; + } + } + + @Override + protected Object createTest(FrameworkMethod method) throws Exception { + return f.apply(super.createTest(method)); + } + + @Override + protected boolean isIgnored(FrameworkMethod child) { + return super.isIgnored(child) + || CrossRunFrameworkMethod.of(child).isIgnored(crossRunIntersection); + } + + static StorageITLeafRunner of( + TestClass testClass, + CrossRunIntersection crossRunIntersection, + @Nullable String name, + TestInitializer f) + throws InitializationError { + return new StorageITLeafRunner(testClass, crossRunIntersection, name, f); + } + + static StorageITLeafRunner unsafeOf( + TestClass testClass, + CrossRunIntersection crossRunIntersection, + @Nullable String name, + TestInitializer f) { + return sneaky(() -> of(testClass, crossRunIntersection, name, f)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunner.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunner.java new file mode 100644 index 000000000000..208892bb4dea --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunner.java @@ -0,0 +1,278 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.CrossRun.AllowClassRule; +import com.google.cloud.storage.it.runner.annotations.ParallelFriendly; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.registry.Registry; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import java.lang.annotation.Annotation; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Function; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.ClassRule; +import org.junit.runner.Description; +import org.junit.runner.Runner; +import org.junit.runner.manipulation.Filter; +import org.junit.runner.manipulation.NoTestsRemainException; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.Suite; +import org.junit.runners.model.FrameworkField; +import org.junit.runners.model.InitializationError; +import org.junit.runners.model.TestClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Storage custom runner which will handle {@link CrossRun}, {@link SingleBackend}, {@link + * com.google.cloud.storage.it.runner.annotations.ParallelFriendly} and {@link + * com.google.cloud.storage.it.runner.annotations.Inject} suite computation. + * + *

Use in place of the usual default JUnit test runner. + * + * @see org.junit.runners.BlockJUnit4ClassRunner + */ +public final class StorageITRunner extends Suite { + static { + org.slf4j.bridge.SLF4JBridgeHandler.removeHandlersForRootLogger(); + org.slf4j.bridge.SLF4JBridgeHandler.install(); + } + + private static final Logger LOGGER = LoggerFactory.getLogger(StorageITRunner.class); + + private final Lock childrenLock = new ReentrantLock(); + private volatile ImmutableList filteredChildren = null; + + public StorageITRunner(Class klass) throws InitializationError { + super(klass, computeRunners(klass, Registry.getInstance())); + boolean runInParallel = getTestClass().getAnnotation(ParallelFriendly.class) != null; + if (runInParallel) { + this.setScheduler(Registry.getInstance().parallelScheduler()); + } + } + + @Override + public void run(RunNotifier notifier) { + LOGGER.debug("run(notifier : {})", notifier); + super.run(new RunNotifierUnion(notifier, Registry.getInstance())); + } + + /* + Filter is how intellij picks an individual method to run + */ + @Override + public void filter(Filter filter) throws NoTestsRemainException { + childrenLock.lock(); + try { + // TODO: Figure out how/why the test name is being mangled when @CrossRun is present + // test_results + // | test1[http][prod]() + // \ StorageParamTest + // instead of + // StorageParamTest / [http][prod] / test1 + filteredChildren = + getFilteredChildren().stream() + .filter(c -> shouldRun(filter, c, this::describeChild)) + .collect(ImmutableList.toImmutableList()); + if (filteredChildren.isEmpty()) { + throw new NoTestsRemainException(); + } + } finally { + childrenLock.unlock(); + } + } + + private List getFilteredChildren() { + if (filteredChildren == null) { + childrenLock.lock(); + try { + if (filteredChildren == null) { + filteredChildren = ImmutableList.copyOf(getChildren()); + } + } finally { + childrenLock.unlock(); + } + } + return filteredChildren; + } + + private static List computeRunners(Class klass, Registry registry) + throws InitializationError { + TestClass testClass = new TestClass(klass); + + Parameterized parameterized = testClass.getAnnotation(Parameterized.class); + + CrossRun crossRun = getClassAnnotation(testClass, CrossRun.class); + SingleBackend singleBackend = getClassAnnotation(testClass, SingleBackend.class); + StorageITRunner.validateBackendAnnotations(crossRun, singleBackend); + + final ImmutableList parameters; + if (parameterized != null) { + try { + Class ppC = parameterized.value(); + ParametersProvider pp = ppC.newInstance(); + registry.injectFields(pp, null); + parameters = pp.parameters(); + if (parameters == null || parameters.isEmpty()) { + throw new InitializationError( + "Null or empty parameters from ParameterProvider: " + ppC.getName()); + } + } catch (InstantiationException | IllegalAccessException e) { + throw new InitializationError(e); + } + } else { + parameters = null; + } + + if (crossRun != null) { + List classRules = testClass.getAnnotatedFields(ClassRule.class); + AllowClassRule allowClassRule = testClass.getAnnotation(AllowClassRule.class); + if (allowClassRule == null && !classRules.isEmpty()) { + String msg = + "@CrossRun used along with @ClassRule. This can be dangerous, multiple class scopes" + + " will be created for cross running, possibly breaking expectations on rule" + + " scope. If the use of a @ClassRule is still desirable, please annotate your" + + " class with @CrossRun.AllowClassRule"; + throw new InitializationError(msg); + } + return SneakyException.unwrap( + () -> + ImmutableSet.copyOf(crossRun.backends()).stream() + .flatMap( + b -> + ImmutableSet.copyOf(crossRun.transports()).stream() + .map(t -> CrossRunIntersection.of(b, t))) + .flatMap( + c -> { + TestInitializer ti = registry.newTestInitializerForCell(c); + if (parameters != null) { + return parameters.stream() + .map( + param -> + StorageITLeafRunner.unsafeOf( + testClass, + c, + fmtParam(c, param), + ti.andThen(setFieldTo(testClass, param)))); + } else { + return Stream.of( + StorageITLeafRunner.unsafeOf(testClass, c, c.fmtSuiteName(), ti)); + } + }) + .collect(ImmutableList.toImmutableList())); + } else { + Backend backend = singleBackend.value(); + CrossRunIntersection crossRunIntersection = CrossRunIntersection.of(backend, null); + TestInitializer ti = registry.newTestInitializerForCell(crossRunIntersection); + if (parameters != null) { + return SneakyException.unwrap( + () -> + parameters.stream() + .map( + param -> + StorageITLeafRunner.unsafeOf( + testClass, + crossRunIntersection, + fmtParam(param), + ti.andThen(setFieldTo(testClass, param)))) + .collect(ImmutableList.toImmutableList())); + } else { + return ImmutableList.of(StorageITLeafRunner.of(testClass, crossRunIntersection, null, ti)); + } + } + } + + private static @Nullable A getClassAnnotation( + TestClass testClass, Class annotation) { + A a = testClass.getAnnotation(annotation); + if (a != null) { + return a; + } + Class parent = testClass.getJavaClass().getSuperclass(); + if (parent == null) { + return null; + } + return getClassAnnotation(new TestClass(parent), annotation); + } + + private static String fmtParam(Object param) { + return String.format(Locale.US, "[%s]", param.toString()); + } + + private static String fmtParam(CrossRunIntersection c, Object param) { + return c.fmtSuiteName() + fmtParam(param); + } + + private static TestInitializer setFieldTo(TestClass testClass, Object param) { + return o -> { + List ffs = testClass.getAnnotatedFields(Parameter.class); + for (FrameworkField ff : ffs) { + ff.getField().set(o, param); + } + return o; + }; + } + + private static void validateBackendAnnotations(CrossRun crossRun, SingleBackend singleBackend) + throws InitializationError { + if (crossRun != null && singleBackend != null) { + throw new InitializationError( + String.format( + Locale.US, + "Class annotated with both @%s and @%s. Pick only one.", + CrossRun.class.getSimpleName(), + SingleBackend.class.getSimpleName())); + } else if (crossRun == null && singleBackend == null) { + throw new InitializationError( + String.format( + Locale.US, + "Missing either of @%s and @%s.", + CrossRun.class.getSimpleName(), + SingleBackend.class.getSimpleName())); + } + } + + private static boolean shouldRun(Filter filter, T each, Function describe) { + if (each instanceof StorageITLeafRunner) { + StorageITLeafRunner leaf = (StorageITLeafRunner) each; + return testsRemaining(filter, leaf); + } + + boolean b = filter.shouldRun(describe.apply(each)); + return b && testsRemaining(filter, each); + } + + private static boolean testsRemaining(Filter f, Object o) { + try { + f.apply(o); + return true; + } catch (NoTestsRemainException e) { + return false; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunnerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunnerTest.java new file mode 100644 index 000000000000..2320047fbac9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/StorageITRunnerTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +@RunWith(Enclosed.class) +public final class StorageITRunnerTest { + + @RunWith(StorageITRunner.class) + @CrossRun( + backends = {Backend.PROD}, + transports = {Transport.HTTP, Transport.GRPC}) + public abstract static class Parent { + @Inject public Transport transport; + } + + public static final class Child extends Parent { + @Test + public void transport() { + assertThat(transport).isNotNull(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/TestInitializer.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/TestInitializer.java new file mode 100644 index 000000000000..79cd37ff8550 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/TestInitializer.java @@ -0,0 +1,37 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner; + +/** + * JUnit test initialization is kinda racey due to its need for constructor super calls and calling + * into instance methods from that constructor hierarchy. + * + *

This interface allows us to define an external composable means of initializing a single + * instance of a test. + * + *

This class shouldn't be used outside of {@code com.google.cloud.storage.it.runner}. When we + * have access to java modules this will be enforced. + */ +@FunctionalInterface +public interface TestInitializer { + + Object apply(Object o) throws Exception; + + default TestInitializer andThen(TestInitializer other) { + return o -> other.apply(this.apply(o)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Backend.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Backend.java new file mode 100644 index 000000000000..6e6c1b37b4bc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Backend.java @@ -0,0 +1,25 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +/** An enum to signify the backends we test against. */ +public enum Backend { + /** Use the "Production" GCS endpoints */ + PROD, + /** Use the test bench as a backend */ + TEST_BENCH +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketFixture.java new file mode 100644 index 000000000000..ebd0d43e903d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketFixture.java @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Optional discriminator annotation which can be applied to a field of type {@link BucketType} in + * order to explicitly select a specific type of bucket. + */ +@Target(ElementType.FIELD) +@Retention(RetentionPolicy.RUNTIME) +public @interface BucketFixture { + + BucketType value() default BucketType.DEFAULT; +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketType.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketType.java new file mode 100644 index 000000000000..356288b957f3 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/BucketType.java @@ -0,0 +1,31 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +/** Enum of types of buckets we support in our suite */ +public enum BucketType { + /** A default bucket created using all GCS defaults */ + DEFAULT, + /** A bucket created using all GCS defaults except that it has requester_pays enabled. */ + REQUESTER_PAYS, + /** A bucket created with Hierarchical Namespace enabled */ + HNS, + /** A bucket created using all GCS default except that object versioning is enabled */ + VERSIONED, + /** A Rapid bucket */ + RAPID +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/CrossRun.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/CrossRun.java new file mode 100644 index 000000000000..960e56e94a72 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/CrossRun.java @@ -0,0 +1,87 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import com.google.cloud.storage.TransportCompatibility; +import com.google.cloud.storage.TransportCompatibility.Transport; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotate a class to specify it should be cross run for multiple backend and transport + * combinations. + * + *

If you only need a single backend consider using {@link SingleBackend} + * + * @see SingleBackend + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface CrossRun { + + Backend[] backends(); + + Transport[] transports(); + + /** + * Exclude a method from being included in the generated test suite if it's backend and transport + * match with those defined. When matching, if the empty set is defined as a value it will be + * treated as ANY rather than NONE. + * + *

{@link Ignore} Will take precedence if present on a method along with {@link Exclude}. + */ + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.RUNTIME) + @interface Exclude { + + Backend[] backends() default {}; + + TransportCompatibility.Transport[] transports() default {}; + } + + /** + * Ignore a method from being ran in the generated test suite if it's backend and transport match + * with those defined. When matching, if the empty set is defined as a value it will be treated as + * ANY rather than NONE. + * + *

{@link Ignore} Will take precedence if present on a method along with {@link Exclude}. + */ + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.RUNTIME) + @interface Ignore { + + Backend[] backends() default {}; + + TransportCompatibility.Transport[] transports() default {}; + } + + /** + * When using {@link CrossRun} a class scope will be created for each permutation, this can break + * expectations of scope/lifecycle for {@link org.junit.ClassRule}s. In an abundance of caution, + * we consider the use of a {@link org.junit.ClassRule} along with {@link CrossRun} an invalid + * class definition. + * + *

In order to allow the use of a {@link org.junit.ClassRule} along with the caveats mentioned + * above, a class can be annotated with {@link AllowClassRule} to suppress the error and proceed + * running the test class with the rule. + */ + @Target({ElementType.TYPE}) + @Retention(RetentionPolicy.RUNTIME) + @interface AllowClassRule {} +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Inject.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Inject.java new file mode 100644 index 000000000000..89586e47a7ce --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Inject.java @@ -0,0 +1,37 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Denote a field which should have it's value injected by either {@link + * com.google.cloud.storage.it.runner.StorageITRunner} when running each test in the suite. + * + *

If the receiver type of the field is not currently supported an error will be raised with a + * list supported types. + * + * @see com.google.cloud.storage.it.runner.StorageITRunner + * @see BucketFixture + * @see StorageFixture + */ +@Target(ElementType.FIELD) +@Retention(RetentionPolicy.RUNTIME) +public @interface Inject {} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/ParallelFriendly.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/ParallelFriendly.java new file mode 100644 index 000000000000..5ade06efea4a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/ParallelFriendly.java @@ -0,0 +1,30 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signal to the test runner that the test in this class can run in parallel. Annotating a class + * with this annotation does not guarantee the test will be run in parallel. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface ParallelFriendly {} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Parameterized.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Parameterized.java new file mode 100644 index 000000000000..b7003e3db166 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/Parameterized.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import com.google.common.collect.ImmutableList; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to signify a test should be ran in a parameterized fashion. The specified {@link + * ParametersProvider} is able to define {@code @}{@link Inject}able fields which can be used when + * {@link ParametersProvider#parameters()} is invoked. + * + *

The defined class of {@link ParametersProvider} must a public no-args constructor which will + * be used to create a new instance. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface Parameterized { + + Class value(); + + @Target(ElementType.FIELD) + @Retention(RetentionPolicy.RUNTIME) + @interface Parameter {} + + interface ParametersProvider { + ImmutableList parameters(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/SingleBackend.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/SingleBackend.java new file mode 100644 index 000000000000..d9d98b9ac1ea --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/SingleBackend.java @@ -0,0 +1,37 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Sibling annotation to {@link CrossRun} except when you only need a single specific backend for + * your tests. + * + *

Should be used with {@link StorageFixture} to select the specific transport for field + * injection. + * + * @see CrossRun + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface SingleBackend { + Backend value(); +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/StorageFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/StorageFixture.java new file mode 100644 index 000000000000..44a106402f5b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/annotations/StorageFixture.java @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.annotations; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Optional discriminator annotation which can be applied to a field of type {@link Transport} in + * order to explicitly select a specific type of storage instance. + */ +@Target(ElementType.FIELD) +@Retention(RetentionPolicy.RUNTIME) +public @interface StorageFixture { + Transport value() default Transport.HTTP; +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/AbstractStorageProxy.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/AbstractStorageProxy.java new file mode 100644 index 000000000000..eeae23382551 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/AbstractStorageProxy.java @@ -0,0 +1,521 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobAppendableUpload; +import com.google.cloud.storage.BlobAppendableUploadConfig; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.PostPolicyV4; +import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; +import com.google.cloud.storage.PostPolicyV4.PostFieldsV4; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.file.Path; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Define a simplistic class which implements {@link Storage} while also delegating all calls to an + * underlying instance of {@link Storage}. When this class is extended it can then override + * individual methods rather than the entire Storage interface. + */ +abstract class AbstractStorageProxy implements Storage { + + protected final Storage delegate; + + protected AbstractStorageProxy(Storage delegate) { + this.delegate = delegate; + } + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + return delegate.create(bucketInfo, options); + } + + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + return delegate.create(blobInfo, options); + } + + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + return delegate.create(blobInfo, content, options); + } + + @Override + public Blob create( + BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options) { + return delegate.create(blobInfo, content, offset, length, options); + } + + @Override + @Deprecated + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + return delegate.create(blobInfo, content, options); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, BlobWriteOption... options) + throws IOException { + return delegate.createFrom(blobInfo, path, options); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, Path path, int bufferSize, BlobWriteOption... options) + throws IOException { + return delegate.createFrom(blobInfo, path, bufferSize, options); + } + + @Override + public Blob createFrom(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) + throws IOException { + return delegate.createFrom(blobInfo, content, options); + } + + @Override + public Blob createFrom( + BlobInfo blobInfo, InputStream content, int bufferSize, BlobWriteOption... options) + throws IOException { + return delegate.createFrom(blobInfo, content, bufferSize, options); + } + + @Override + public Bucket get(String bucket, BucketGetOption... options) { + return delegate.get(bucket, options); + } + + @Override + public Bucket lockRetentionPolicy(BucketInfo bucket, BucketTargetOption... options) { + return delegate.lockRetentionPolicy(bucket, options); + } + + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + return delegate.get(bucket, blob, options); + } + + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + return delegate.get(blob, options); + } + + @Override + public Blob get(BlobId blob) { + return delegate.get(blob); + } + + @Override + public Blob restore(BlobId blob, BlobRestoreOption... options) { + return delegate.restore(blob, options); + } + + @Override + public Page list(BucketListOption... options) { + return delegate.list(options); + } + + @Override + public Page list(String bucket, BlobListOption... options) { + return delegate.list(bucket, options); + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + return delegate.update(bucketInfo, options); + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + return delegate.update(blobInfo, options); + } + + @Override + public Blob update(BlobInfo blobInfo) { + return delegate.update(blobInfo); + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + return delegate.delete(bucket, options); + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + return delegate.delete(bucket, blob, options); + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + return delegate.delete(blob, options); + } + + @Override + public boolean delete(BlobId blob) { + return delegate.delete(blob); + } + + @Override + public Blob compose(ComposeRequest composeRequest) { + return delegate.compose(composeRequest); + } + + @Override + public CopyWriter copy(CopyRequest copyRequest) { + return delegate.copy(copyRequest); + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + return delegate.readAllBytes(bucket, blob, options); + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + return delegate.readAllBytes(blob, options); + } + + @Override + public StorageBatch batch() { + return delegate.batch(); + } + + @Override + public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + return delegate.reader(bucket, blob, options); + } + + @Override + public ReadChannel reader(BlobId blob, BlobSourceOption... options) { + return delegate.reader(blob, options); + } + + @Override + public void downloadTo(BlobId blob, Path path, BlobSourceOption... options) { + delegate.downloadTo(blob, path, options); + } + + @Override + public void downloadTo(BlobId blob, OutputStream outputStream, BlobSourceOption... options) { + delegate.downloadTo(blob, outputStream, options); + } + + @Override + public WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + return delegate.writer(blobInfo, options); + } + + @Override + public WriteChannel writer(URL signedURL) { + return delegate.writer(signedURL); + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + return delegate.signUrl(blobInfo, duration, unit, options); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + return delegate.generateSignedPostPolicyV4( + blobInfo, duration, unit, fields, conditions, options); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostFieldsV4 fields, + PostPolicyV4Option... options) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, fields, options); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, + long duration, + TimeUnit unit, + PostConditionsV4 conditions, + PostPolicyV4Option... options) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, conditions, options); + } + + @Override + public PostPolicyV4 generateSignedPostPolicyV4( + BlobInfo blobInfo, long duration, TimeUnit unit, PostPolicyV4Option... options) { + return delegate.generateSignedPostPolicyV4(blobInfo, duration, unit, options); + } + + @Override + public List get(BlobId... blobIds) { + return delegate.get(blobIds); + } + + @Override + public List get(Iterable blobIds) { + return delegate.get(blobIds); + } + + @Override + public List update(BlobInfo... blobInfos) { + return delegate.update(blobInfos); + } + + @Override + public List update(Iterable blobInfos) { + return delegate.update(blobInfos); + } + + @Override + public List delete(BlobId... blobIds) { + return delegate.delete(blobIds); + } + + @Override + public List delete(Iterable blobIds) { + return delegate.delete(blobIds); + } + + @Override + public Acl getAcl(String bucket, Entity entity, BucketSourceOption... options) { + return delegate.getAcl(bucket, entity, options); + } + + @Override + public Acl getAcl(String bucket, Entity entity) { + return delegate.getAcl(bucket, entity); + } + + @Override + public boolean deleteAcl(String bucket, Entity entity, BucketSourceOption... options) { + return delegate.deleteAcl(bucket, entity, options); + } + + @Override + public boolean deleteAcl(String bucket, Entity entity) { + return delegate.deleteAcl(bucket, entity); + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + return delegate.createAcl(bucket, acl, options); + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + return delegate.createAcl(bucket, acl); + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + return delegate.updateAcl(bucket, acl, options); + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + return delegate.updateAcl(bucket, acl); + } + + @Override + public List listAcls(String bucket, BucketSourceOption... options) { + return delegate.listAcls(bucket, options); + } + + @Override + public List listAcls(String bucket) { + return delegate.listAcls(bucket); + } + + @Override + public Acl getDefaultAcl(String bucket, Entity entity) { + return delegate.getDefaultAcl(bucket, entity); + } + + @Override + public boolean deleteDefaultAcl(String bucket, Entity entity) { + return delegate.deleteDefaultAcl(bucket, entity); + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + return delegate.createDefaultAcl(bucket, acl); + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + return delegate.updateDefaultAcl(bucket, acl); + } + + @Override + public List listDefaultAcls(String bucket) { + return delegate.listDefaultAcls(bucket); + } + + @Override + public Acl getAcl(BlobId blob, Entity entity) { + return delegate.getAcl(blob, entity); + } + + @Override + public boolean deleteAcl(BlobId blob, Entity entity) { + return delegate.deleteAcl(blob, entity); + } + + @Override + public Acl createAcl(BlobId blob, Acl acl) { + return delegate.createAcl(blob, acl); + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + return delegate.updateAcl(blob, acl); + } + + @Override + public List listAcls(BlobId blob) { + return delegate.listAcls(blob); + } + + @Override + public HmacKey createHmacKey(ServiceAccount serviceAccount, CreateHmacKeyOption... options) { + return delegate.createHmacKey(serviceAccount, options); + } + + @Override + public Page listHmacKeys(ListHmacKeysOption... options) { + return delegate.listHmacKeys(options); + } + + @Override + public HmacKeyMetadata getHmacKey(String accessId, GetHmacKeyOption... options) { + return delegate.getHmacKey(accessId, options); + } + + @Override + public void deleteHmacKey(HmacKeyMetadata hmacKeyMetadata, DeleteHmacKeyOption... options) { + delegate.deleteHmacKey(hmacKeyMetadata, options); + } + + @Override + public HmacKeyMetadata updateHmacKeyState( + HmacKeyMetadata hmacKeyMetadata, HmacKeyState state, UpdateHmacKeyOption... options) { + return delegate.updateHmacKeyState(hmacKeyMetadata, state, options); + } + + @Override + public Policy getIamPolicy(String bucket, BucketSourceOption... options) { + return delegate.getIamPolicy(bucket, options); + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + return delegate.setIamPolicy(bucket, policy, options); + } + + @Override + public List testIamPermissions( + String bucket, List permissions, BucketSourceOption... options) { + return delegate.testIamPermissions(bucket, permissions, options); + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + return delegate.getServiceAccount(projectId); + } + + @Override + public Notification createNotification(String bucket, NotificationInfo notificationInfo) { + return delegate.createNotification(bucket, notificationInfo); + } + + @Override + public Notification getNotification(String bucket, String notificationId) { + return delegate.getNotification(bucket, notificationId); + } + + @Override + public List listNotifications(String bucket) { + return delegate.listNotifications(bucket); + } + + @Override + public boolean deleteNotification(String bucket, String notificationId) { + return delegate.deleteNotification(bucket, notificationId); + } + + @Override + public BlobWriteSession blobWriteSession(BlobInfo blobInfo, BlobWriteOption... options) { + return delegate.blobWriteSession(blobInfo, options); + } + + @Override + public ApiFuture blobReadSession(BlobId id, BlobSourceOption... options) { + return delegate.blobReadSession(id, options); + } + + @Override + public BlobAppendableUpload blobAppendableUpload( + BlobInfo blobInfo, BlobAppendableUploadConfig uploadConfig, BlobWriteOption... options) { + return delegate.blobAppendableUpload(blobInfo, uploadConfig, options); + } + + @Override + public void close() throws Exception { + delegate.close(); + } + + @Override + public Blob moveBlob(MoveBlobRequest request) { + return delegate.moveBlob(request); + } + + @Override + public StorageOptions getOptions() { + return delegate.getOptions(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BackendResources.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BackendResources.java new file mode 100644 index 000000000000..40c0ee2a895c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BackendResources.java @@ -0,0 +1,346 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import static com.google.cloud.storage.it.runner.registry.RegistryApplicabilityPredicate.backendIs; +import static com.google.cloud.storage.it.runner.registry.RegistryApplicabilityPredicate.bucketTypeIs; +import static com.google.cloud.storage.it.runner.registry.RegistryApplicabilityPredicate.isDefaultBucket; +import static com.google.cloud.storage.it.runner.registry.RegistryApplicabilityPredicate.transportAndBackendAre; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.BucketInfo.HierarchicalNamespace; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.GrpcStorageOptions; +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.GrpcPlainRequestLoggingInterceptor; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.StorageControlSettings; +import com.google.storage.control.v2.stub.StorageControlStubSettings; +import io.grpc.ManagedChannelBuilder; +import java.io.IOException; +import java.net.URI; +import java.util.Locale; +import java.util.UUID; + +/** The set of resources which are defined for a single backend. */ +final class BackendResources implements ManagedLifecycle { + + private final Backend backend; + private final ProtectedBucketNames protectedBucketNames; + + private final ImmutableList> registryEntries; + + private BackendResources( + Backend backend, + ProtectedBucketNames protectedBucketNames, + ImmutableList> registryEntries) { + this.backend = backend; + this.protectedBucketNames = protectedBucketNames; + this.registryEntries = registryEntries; + } + + public ImmutableList> getRegistryEntries() { + return registryEntries; + } + + @Override + public Object get() { + return this; + } + + @Override + public void start() {} + + @Override + public void stop() { + protectedBucketNames.stop(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("backend", backend).toString(); + } + + @SuppressWarnings("SwitchStatementWithTooFewBranches") + static BackendResources of( + Backend backend, + TestRunScopedInstance otelSdk, + TestRunScopedInstance zone) { + ProtectedBucketNames protectedBucketNames = new ProtectedBucketNames(); + TestRunScopedInstance storageJson = + TestRunScopedInstance.of( + "fixture/STORAGE/[JSON][" + backend.name() + "]", + () -> { + HttpStorageOptions.Builder optionsBuilder; + switch (backend) { + case TEST_BENCH: + optionsBuilder = + StorageOptions.http() + .setCredentials(NoCredentials.getInstance()) + .setHost(Registry.getInstance().testBench().getBaseUri()) + .setProjectId("test-project-id"); + break; + default: // PROD, java8 doesn't have exhaustive checking for enum switch + // Register the exporters with OpenTelemetry + optionsBuilder = StorageOptions.http().setOpenTelemetry(otelSdk.get().get()); + break; + } + HttpStorageOptions built = optionsBuilder.build(); + return new StorageInstance(built, protectedBucketNames); + }); + TestRunScopedInstance storageGrpc = + TestRunScopedInstance.of( + "fixture/STORAGE/[GRPC][" + backend.name() + "]", + () -> { + GrpcStorageOptions.Builder optionsBuilder; + switch (backend) { + case TEST_BENCH: + optionsBuilder = + StorageOptions.grpc() + .setGrpcInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .setCredentials(NoCredentials.getInstance()) + .setHost(Registry.getInstance().testBench().getGRPCBaseUri()) + .setAttemptDirectPath(false) + .setProjectId("test-project-id"); + break; + default: // PROD, java8 doesn't have exhaustive checking for enum switch + // Register the exporters with OpenTelemetry + optionsBuilder = StorageOptions.grpc().setOpenTelemetry(otelSdk.get().get()); + break; + } + GrpcStorageOptions built = + optionsBuilder + .setGrpcInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .setEnableGrpcClientMetrics(false) + .build(); + return new StorageInstance(built, protectedBucketNames); + }); + TestRunScopedInstance ctrl = + TestRunScopedInstance.of( + "fixture/STORAGE_CONTROL/[" + backend.name() + "]", + () -> { + StorageControlSettings.Builder builder; + switch (backend) { + case TEST_BENCH: + String baseUri = Registry.getInstance().testBench().getGRPCBaseUri(); + URI uri = URI.create(baseUri); + String endpoint = String.format(Locale.US, "%s:%d", uri.getHost(), uri.getPort()); + InstantiatingGrpcChannelProvider.Builder b = + StorageControlStubSettings.defaultGrpcTransportProviderBuilder() + .setInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .setEndpoint(endpoint); + if (uri.getScheme().equals("http")) { + b.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + } + InstantiatingGrpcChannelProvider instantiatingGrpcChannelProvider = b.build(); + builder = + StorageControlSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setEndpoint(endpoint) + .setTransportChannelProvider(instantiatingGrpcChannelProvider); + break; + default: // PROD, java8 doesn't have exhaustive checking for enum switch + builder = + StorageControlSettings.newBuilder() + .setTransportChannelProvider( + StorageControlStubSettings.defaultGrpcTransportProviderBuilder() + .setInterceptorProvider( + GrpcPlainRequestLoggingInterceptor.getInterceptorProvider()) + .build()); + break; + } + + try { + StorageControlSettings settings = builder.build(); + return new StorageControlInstance(settings); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + TestRunScopedInstance bucket = + TestRunScopedInstance.of( + "fixture/BUCKET/[" + backend.name() + "]", + () -> { + String bucketName = + String.format(Locale.US, "java-storage-grpc-%s", UUID.randomUUID()); + protectedBucketNames.add(bucketName); + return new BucketInfoShim( + BucketInfo.newBuilder(bucketName) + .setLocation(zone.get().get().getRegion()) + .build(), + storageJson.get().getStorage(), + ctrl.get().getCtrl()); + }); + TestRunScopedInstance bucketRp = + TestRunScopedInstance.of( + "fixture/BUCKET/[" + backend.name() + "]/REQUESTER_PAYS", + () -> { + String bucketName = + String.format(Locale.US, "java-storage-grpc-rp-%s", UUID.randomUUID()); + protectedBucketNames.add(bucketName); + return new BucketInfoShim( + BucketInfo.newBuilder(bucketName) + .setLocation(zone.get().get().getRegion()) + .setRequesterPays(true) + .build(), + storageJson.get().getStorage(), + ctrl.get().getCtrl()); + }); + TestRunScopedInstance bucketVersioned = + TestRunScopedInstance.of( + "fixture/BUCKET/[" + backend.name() + "]/VERSIONED", + () -> { + String bucketName = + String.format(Locale.US, "java-storage-grpc-v-%s", UUID.randomUUID()); + protectedBucketNames.add(bucketName); + return new BucketInfoShim( + BucketInfo.newBuilder(bucketName) + .setLocation(zone.get().get().getRegion()) + .setVersioningEnabled(true) + .build(), + storageJson.get().getStorage(), + ctrl.get().getCtrl()); + }); + TestRunScopedInstance bucketHns = + TestRunScopedInstance.of( + "fixture/BUCKET/[" + backend.name() + "]/HNS", + () -> { + String bucketName = + String.format(Locale.US, "java-storage-grpc-hns-%s", UUID.randomUUID()); + protectedBucketNames.add(bucketName); + return new BucketInfoShim( + BucketInfo.newBuilder(bucketName) + .setLocation(zone.get().get().getRegion()) + .setHierarchicalNamespace( + HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .setIamConfiguration( + IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build(), + storageJson.get().getStorage(), + ctrl.get().getCtrl()); + }); + TestRunScopedInstance bucketRapid = + TestRunScopedInstance.of( + "fixture/BUCKET/[" + backend.name() + "]/RAPID", + () -> { + String bucketName = + String.format(Locale.US, "java-storage-grpc-rapid-%s", UUID.randomUUID()); + protectedBucketNames.add(bucketName); + return new BucketInfoShim( + BucketInfo.newBuilder(bucketName) + .setLocation(zone.get().get().getRegion()) + .setCustomPlacementConfig( + CustomPlacementConfig.newBuilder() + .setDataLocations(ImmutableList.of(zone.get().get().getZone())) + .build()) + .setStorageClass(StorageClass.valueOf("RAPID")) + .setHierarchicalNamespace( + HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .setIamConfiguration( + IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build(), + storageJson.get().getStorage(), + ctrl.get().getCtrl()); + }); + TestRunScopedInstance objectsFixture = + TestRunScopedInstance.of( + "fixture/OBJECTS/[" + backend.name() + "]", + () -> new ObjectsFixture(storageJson.get().getStorage(), bucket.get().getBucketInfo())); + TestRunScopedInstance objectsFixtureRp = + TestRunScopedInstance.of( + "fixture/OBJECTS/[" + backend.name() + "]/REQUESTER_PAYS", + () -> + new ObjectsFixture(storageJson.get().getStorage(), bucketRp.get().getBucketInfo())); + TestRunScopedInstance objectsFixtureHns = + TestRunScopedInstance.of( + "fixture/OBJECTS/[" + backend.name() + "]/HNS", + () -> + new ObjectsFixture( + storageJson.get().getStorage(), bucketHns.get().getBucketInfo())); + TestRunScopedInstance kmsFixture = + TestRunScopedInstance.of( + "fixture/KMS/[" + backend.name() + "]", + () -> KmsFixture.of(storageJson.get().getStorage(), zone.get().get())); + + return new BackendResources( + backend, + protectedBucketNames, + ImmutableList.of( + RegistryEntry.of( + 40, Storage.class, storageJson, transportAndBackendAre(Transport.HTTP, backend)), + RegistryEntry.of( + 50, Storage.class, storageGrpc, transportAndBackendAre(Transport.GRPC, backend)), + RegistryEntry.of(55, StorageControlClient.class, ctrl, backendIs(backend)), + RegistryEntry.of( + 60, + BucketInfo.class, + bucketRp, + backendIs(backend).and(bucketTypeIs(BucketType.REQUESTER_PAYS))), + RegistryEntry.of( + 61, + BucketInfo.class, + bucketHns, + backendIs(backend).and(bucketTypeIs(BucketType.HNS))), + RegistryEntry.of( + 62, + BucketInfo.class, + bucketVersioned, + backendIs(backend).and(bucketTypeIs(BucketType.VERSIONED))), + RegistryEntry.of( + 63, + BucketInfo.class, + bucketRapid, + backendIs(backend).and(bucketTypeIs(BucketType.RAPID))), + RegistryEntry.of( + 70, BucketInfo.class, bucket, backendIs(backend).and(isDefaultBucket())), + RegistryEntry.of( + 80, + ObjectsFixture.class, + objectsFixture, + backendIs(backend).and(isDefaultBucket())), + RegistryEntry.of( + 90, + ObjectsFixture.class, + objectsFixtureRp, + backendIs(backend).and(bucketTypeIs(BucketType.REQUESTER_PAYS))), + RegistryEntry.of( + 91, + ObjectsFixture.class, + objectsFixtureHns, + backendIs(backend).and(bucketTypeIs(BucketType.HNS))), + RegistryEntry.of(100, KmsFixture.class, kmsFixture, backendIs(backend)))); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BucketInfoShim.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BucketInfoShim.java new file mode 100644 index 000000000000..0c981895038b --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/BucketInfoShim.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.storage.control.v2.StorageControlClient; +import java.util.Locale; + +/** Shim to lift a BucketInfo to be a managed bucket instance */ +final class BucketInfoShim implements ManagedLifecycle { + + private final BucketInfo bucketInfo; + private final Storage s; + private final StorageControlClient ctrl; + + private BucketInfo createdBucket; + + BucketInfoShim(BucketInfo bucketInfo, Storage s, StorageControlClient ctrl) { + this.bucketInfo = bucketInfo; + this.s = s; + this.ctrl = ctrl; + } + + public BucketInfo getBucketInfo() { + return createdBucket; + } + + @Override + public Object get() { + return bucketInfo; + } + + @Override + public void start() { + try { + createdBucket = s.create(bucketInfo).asBucketInfo(); + } catch (StorageException se) { + String msg = se.getMessage().toLowerCase(Locale.US); + if (se.getCode() == 400 && (msg.contains("not a valid zone in location")) + || msg.contains("custom placement config") + || msg.contains("zonal")) { + assumeTrue( + "Skipping test due to bucket setup unavailable in current zone. (" + msg + ")", false); + } + throw se; + } + } + + @Override + public void stop() { + BucketCleaner.doCleanup(bucketInfo.getName(), s /*, ctrl*/); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Generator.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Generator.java new file mode 100644 index 000000000000..2235433f1bf9 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Generator.java @@ -0,0 +1,67 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import java.util.Locale; +import java.util.UUID; +import java.util.WeakHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.runner.Description; + +/** Scoped instance of a class which generates common things for tests. */ +public final class Generator implements ManagedLifecycle { + + private final WeakHashMap counters; + + Generator() { + counters = new WeakHashMap<>(); + } + + /** Generate a new random bucket name */ + @NonNull + public String randomBucketName() { + // TODO: track their creation and detect if the bucket is "leaked" and fail the test + return "java-storage-grpc-rand-" + UUID.randomUUID(); + } + + @NonNull + public String randomObjectName() { + Description currentTest = Registry.getInstance().getCurrentTest(); + if (currentTest == null) { + throw new IllegalStateException("No actively running test in registry."); + } + AtomicInteger counter = counters.computeIfAbsent(currentTest, (d) -> new AtomicInteger(1)); + return String.format( + Locale.US, + "%s.%s-%04d", + currentTest.getClassName(), + currentTest.getMethodName(), + counter.getAndIncrement()); + } + + @Override + public Object get() { + return this; + } + + @Override + public void start() {} + + @Override + public void stop() {} +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/KmsFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/KmsFixture.java new file mode 100644 index 000000000000..bd6f2fc49ed0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/KmsFixture.java @@ -0,0 +1,168 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.kms.v1.CreateCryptoKeyRequest; +import com.google.cloud.kms.v1.CreateKeyRingRequest; +import com.google.cloud.kms.v1.CryptoKey; +import com.google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose; +import com.google.cloud.kms.v1.CryptoKeyName; +import com.google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm; +import com.google.cloud.kms.v1.CryptoKeyVersionTemplate; +import com.google.cloud.kms.v1.KeyManagementServiceClient; +import com.google.cloud.kms.v1.KeyRing; +import com.google.cloud.kms.v1.KeyRingName; +import com.google.cloud.kms.v1.LocationName; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import io.grpc.StatusRuntimeException; +import java.io.IOException; + +public class KmsFixture implements ManagedLifecycle { + + private final Storage storage; + private final String keyRingLocation; + private final String keyRingName; + private final String key1Name; + private final String key2Name; + + private KeyRing keyRing; + private CryptoKey key1; + private CryptoKey key2; + + private KmsFixture( + Storage storage, + String keyRingLocation, + String keyRingName, + String key1Name, + String key2Name) { + this.storage = storage; + this.keyRingLocation = keyRingLocation; + this.keyRingName = keyRingName; + this.key1Name = key1Name; + this.key2Name = key2Name; + } + + public String getKeyRingLocation() { + return keyRingLocation; + } + + public KeyRing getKeyRing() { + return keyRing; + } + + public CryptoKey getKey1() { + return key1; + } + + public CryptoKey getKey2() { + return key2; + } + + @Override + public Object get() { + return this; + } + + @Override + public void start() { + try (KeyManagementServiceClient kms = KeyManagementServiceClient.create()) { + keyRing = resolveKeyRing(kms); + Policy ignore = grantStorageServiceAccountRolesToKeyRing(kms); + key1 = resolveKey(kms, key1Name); + key2 = resolveKey(kms, key2Name); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void stop() { + // KMS prevents key and ring deletion, https://cloud.google.com/kms/docs/faq#cannot_delete + } + + static KmsFixture of(Storage s, Zone zone) { + // KMS prevents key and ring deletion, https://cloud.google.com/kms/docs/faq#cannot_delete + // therefore we instead prefer stable names to not blow out the number of keys and rings + // in a project. + return new KmsFixture( + s, zone.getRegion(), "gcs_test_kms_key_ring", "gcs_kms_key_one", "gcs_kms_key_two"); + } + + private KeyRing resolveKeyRing(KeyManagementServiceClient kms) throws StatusRuntimeException { + String projectId = storage.getOptions().getProjectId(); + KeyRingName ringName = KeyRingName.of(projectId, keyRingLocation, keyRingName); + try { + return kms.getKeyRing(ringName.toString()); + } catch (NotFoundException ex) { + CreateKeyRingRequest req = + CreateKeyRingRequest.newBuilder() + .setParent(LocationName.of(projectId, keyRingLocation).toString()) + .setKeyRingId(keyRingName) + .setKeyRing(KeyRing.newBuilder().build()) + .build(); + System.out.println("req = " + req); + return kms.createKeyRing(req); + } + } + + private Policy grantStorageServiceAccountRolesToKeyRing(KeyManagementServiceClient kms) { + String projectId = storage.getOptions().getProjectId(); + ServiceAccount serviceAccount = storage.getServiceAccount(projectId); + Binding binding = + Binding.newBuilder() + .setRole("roles/cloudkms.cryptoKeyEncrypterDecrypter") + .addMembers("serviceAccount:" + serviceAccount.getEmail()) + .build(); + SetIamPolicyRequest setIamPolicyRequest = + SetIamPolicyRequest.newBuilder() + .setResource(keyRing.getName()) + .setPolicy(Policy.newBuilder().addBindings(binding).build()) + .build(); + + return kms.setIamPolicy(setIamPolicyRequest); + } + + private CryptoKey resolveKey(KeyManagementServiceClient kms, String keyName) { + CryptoKeyName cryptoKeyName = cryptoKeyNameOnRing(keyRing, keyName); + try { + return kms.getCryptoKey(cryptoKeyName); + } catch (NotFoundException ex) { + CreateCryptoKeyRequest req = + CreateCryptoKeyRequest.newBuilder() + .setParent(keyRing.getName()) + .setCryptoKeyId(keyName) + .setCryptoKey( + CryptoKey.newBuilder() + .setPurpose(CryptoKeyPurpose.ENCRYPT_DECRYPT) + .setVersionTemplate( + CryptoKeyVersionTemplate.newBuilder() + .setAlgorithm(CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION))) + .build(); + return kms.createCryptoKey(req); + } + } + + private static CryptoKeyName cryptoKeyNameOnRing(KeyRing keyRing, String keyName) { + KeyRingName krn = KeyRingName.parse(keyRing.getName()); + return CryptoKeyName.of(krn.getProject(), krn.getLocation(), krn.getKeyRing(), keyName); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ManagedLifecycle.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ManagedLifecycle.java new file mode 100644 index 000000000000..f34ba29c96c4 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ManagedLifecycle.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +/** + * Marker interface to signify an object which has start and stop lifecycle points. + * + *

This interface shouldn't be used outside of {@code com.google.cloud.storage.it.runner}. When + * we have access to java modules this will be enforced. + */ +public interface ManagedLifecycle { + + Object get(); + + void start(); + + void stop(); +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/MetadataService.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/MetadataService.java new file mode 100644 index 000000000000..b668a4936c26 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/MetadataService.java @@ -0,0 +1,70 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestFactory; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.common.base.Suppliers; +import com.google.common.io.CharStreams; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.UnknownHostException; +import java.util.Optional; +import java.util.function.Supplier; + +final class MetadataService { + + private static final Supplier requestFactory = + Suppliers.memoize( + () -> + new NetHttpTransport.Builder() + .build() + .createRequestFactory( + request -> { + request.setCurlLoggingEnabled(false); + request.getHeaders().set("Metadata-Flavor", "Google"); + })); + private static final String baseUri = "http://metadata.google.internal"; + + public static void main(String[] args) throws IOException { + System.out.println("zone() = " + zone()); + } + + public static Optional zone() throws IOException { + return get("/computeMetadata/v1/instance/zone").map(Zone::parse); + } + + public static Optional get(String path) throws IOException { + GenericUrl url = new GenericUrl(baseUri + path); + try { + HttpRequest req = requestFactory.get().buildGetRequest(url); + HttpResponse resp = req.execute(); + try (InputStream content = resp.getContent(); + Reader r = new InputStreamReader(content)) { + return CharStreams.readLines(r).stream().findFirst(); + } + } catch (HttpResponseException | UnknownHostException e) { + return Optional.empty(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java new file mode 100644 index 000000000000..2fc8b3582d7c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java @@ -0,0 +1,161 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobGetOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableMap; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +/** Globally scoped objects correlated with a specific backend and bucket */ +public final class ObjectsFixture implements ManagedLifecycle { + + private final Storage s; + private final BucketInfo bucket; + + private final BlobTargetOption[] blobTargetOptions; + private final BlobGetOption[] blobGetOptions; + + private BlobInfo info1; + private BlobInfo info2; + private BlobInfo info3; + private BlobInfo info4; + private ObjectAndContent obj512KiB; + + ObjectsFixture(Storage s, BucketInfo bucket) { + this.s = s; + this.bucket = bucket; + boolean isRequesterPays = Boolean.TRUE.equals(bucket.requesterPays()); + String projectId = s.getOptions().getProjectId(); + if (isRequesterPays) { + blobTargetOptions = + new BlobTargetOption[] { + BlobTargetOption.doesNotExist(), BlobTargetOption.userProject(projectId) + }; + } else { + blobTargetOptions = new BlobTargetOption[] {BlobTargetOption.doesNotExist()}; + } + if (isRequesterPays) { + blobGetOptions = new BlobGetOption[] {BlobGetOption.userProject(projectId)}; + } else { + blobGetOptions = new BlobGetOption[] {}; + } + } + + @Override + public Object get() { + return this; + } + + public BlobInfo getInfo1() { + return info1; + } + + public BlobInfo getInfo2() { + return info2; + } + + public BlobInfo getInfo3() { + return info3; + } + + public BlobInfo getInfo4() { + return info4; + } + + public ObjectAndContent getObj512KiB() { + return obj512KiB; + } + + @Override + public void start() { + String bucketName = bucket.getName(); + + BlobId blobId1 = BlobId.of(bucketName, objName("001")); + BlobId blobId2 = BlobId.of(bucketName, objName("002")); + BlobId blobId3 = BlobId.of(bucketName, objName("003")); + BlobId blobId4 = BlobId.of(bucketName, objName("004")); + + BlobInfo info1 = BlobInfo.newBuilder(blobId1).setMetadata(ImmutableMap.of("pow", "1")).build(); + BlobInfo info2 = BlobInfo.newBuilder(blobId2).setMetadata(ImmutableMap.of("pow", "2")).build(); + BlobInfo info3 = BlobInfo.newBuilder(blobId3).setMetadata(ImmutableMap.of("pow", "3")).build(); + BlobInfo info4 = BlobInfo.newBuilder(blobId4).setMetadata(ImmutableMap.of("pow", "4")).build(); + this.info1 = + s.create(info1, "A".getBytes(StandardCharsets.UTF_8), blobTargetOptions).asBlobInfo(); + + ComposeRequest c2 = + ComposeRequest.newBuilder() + .addSource(blobId1.getName(), blobId1.getName()) + .setTarget(info2) + .setTargetOptions(blobTargetOptions) + .build(); + ComposeRequest c3 = + ComposeRequest.newBuilder() + .addSource(blobId2.getName(), blobId2.getName()) + .setTarget(info3) + .setTargetOptions(blobTargetOptions) + .build(); + ComposeRequest c4 = + ComposeRequest.newBuilder() + .addSource(blobId3.getName(), blobId3.getName()) + .setTarget(info4) + .setTargetOptions(blobTargetOptions) + .build(); + this.info2 = s.compose(c2).asBlobInfo(); + this.info3 = s.compose(c3).asBlobInfo(); + this.info4 = s.compose(c4).asBlobInfo(); + + byte[] bytes = DataGenerator.base64Characters().genBytes(512 * 1024); + Blob obj512KiB = + s.create(BlobInfo.newBuilder(bucket, "obj512KiB").build(), bytes, blobTargetOptions); + this.obj512KiB = new ObjectAndContent(obj512KiB.asBlobInfo(), ChecksummedTestContent.of(bytes)); + } + + @Override + public void stop() {} + + private static String objName(String name) { + return String.format(Locale.US, "%s/%s", ObjectsFixture.class.getSimpleName(), name); + } + + public static final class ObjectAndContent { + private final BlobInfo info; + private final ChecksummedTestContent content; + + private ObjectAndContent(BlobInfo info, ChecksummedTestContent content) { + this.info = info; + this.content = content; + } + + public BlobInfo getInfo() { + return info; + } + + public ChecksummedTestContent getContent() { + return content; + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/OtelSdkShim.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/OtelSdkShim.java new file mode 100644 index 000000000000..f2ea10bce5d0 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/OtelSdkShim.java @@ -0,0 +1,102 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.cloud.opentelemetry.trace.TraceConfiguration; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.time.Duration; +import java.util.Arrays; + +public final class OtelSdkShim implements ManagedLifecycle { + private static final boolean STORAGE_IT_OTEL_ENABLE = + Arrays.asList( + System.getProperty("STORAGE_IT_OTEL_ENABLE"), System.getenv("STORAGE_IT_OTEL_ENABLE")) + .contains("true"); + private final String projectId; + + private OpenTelemetrySdk otelSdk; + + OtelSdkShim(String projectId) { + this.projectId = projectId; + } + + @Override + public OpenTelemetry get() { + if (otelSdk == null) { + return OpenTelemetry.noop(); + } + return otelSdk; + } + + @Override + public void start() { + if (!STORAGE_IT_OTEL_ENABLE) { + otelSdk = null; + return; + } + MetricConfiguration metricConfiguration = + MetricConfiguration.builder() + .setProjectId(projectId) + .setDeadline(Duration.ofSeconds(30)) + .build(); + TraceConfiguration traceConfiguration = + TraceConfiguration.builder() + .setProjectId(projectId) + .setDeadline(Duration.ofSeconds(30)) + .build(); + MetricExporter metricExporter = + GoogleCloudMetricExporter.createWithConfiguration(metricConfiguration); + SpanExporter spanExporter = TraceExporter.createWithConfiguration(traceConfiguration); + + SdkMeterProvider meterProvider = + SdkMeterProvider.builder() + .registerMetricReader( + PeriodicMetricReader.builder(metricExporter) + .setInterval(Duration.ofSeconds(60)) + .build()) + .build(); + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .setSampler(Sampler.traceIdRatioBased(1.0)) + .addSpanProcessor( + BatchSpanProcessor.builder(spanExporter).setMeterProvider(meterProvider).build()) + .build(); + otelSdk = + OpenTelemetrySdk.builder() + .setTracerProvider(tracerProvider) + .setMeterProvider(meterProvider) + .build(); + } + + @Override + public void stop() { + if (otelSdk != null) { + otelSdk.close(); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ParallelScheduler.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ParallelScheduler.java new file mode 100644 index 000000000000..ed8769fae764 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ParallelScheduler.java @@ -0,0 +1,50 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Phaser; +import org.junit.runners.model.RunnerScheduler; + +final class ParallelScheduler implements RunnerScheduler { + + private final ExecutorService executorService; + private final Phaser childCounter; + + ParallelScheduler(ExecutorService executorService) { + this.executorService = executorService; + childCounter = new Phaser(); + } + + @Override + public void schedule(Runnable childStatement) { + childCounter.register(); + executorService.submit( + () -> { + try { + childStatement.run(); + } finally { + childCounter.arrive(); + } + }); + } + + @Override + public void finished() { + childCounter.awaitAdvance(0); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ProtectedBucketNames.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ProtectedBucketNames.java new file mode 100644 index 000000000000..cb1b5d51141a --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ProtectedBucketNames.java @@ -0,0 +1,67 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.common.collect.ImmutableSet; +import javax.annotation.concurrent.ThreadSafe; + +/** + * Threadsafe class to track protected bucket names. + * + *

We are tracking that tests do not mutate the global buckets which are shared across tests, + * this class tracks the names of the buckets. + */ +@ThreadSafe +final class ProtectedBucketNames implements ManagedLifecycle { + + private ImmutableSet protectedNames; + + ProtectedBucketNames() { + this.protectedNames = ImmutableSet.of(); + } + + @Override + public Object get() { + return this; + } + + @Override + public synchronized void start() { + protectedNames = ImmutableSet.of(); + } + + @Override + public synchronized void stop() { + protectedNames = ImmutableSet.of(); + } + + public synchronized void add(String name) { + ImmutableSet tmp = protectedNames; + protectedNames = ImmutableSet.builder().addAll(tmp).add(name).build(); + } + + public synchronized void remove(String name) { + ImmutableSet tmp = protectedNames; + protectedNames = + tmp.stream().filter(n -> !n.equals(name)).collect(ImmutableSet.toImmutableSet()); + } + + public boolean isProtected(String name) { + ImmutableSet tmp = protectedNames; + return tmp.contains(name); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Registry.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Registry.java new file mode 100644 index 000000000000..c232cd32de6c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Registry.java @@ -0,0 +1,328 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.CrossRunIntersection; +import com.google.cloud.storage.it.runner.TestInitializer; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.Executors; +import java.util.function.Function; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.runner.Description; +import org.junit.runner.notification.RunListener; +import org.junit.runners.model.FrameworkField; +import org.junit.runners.model.RunnerScheduler; +import org.junit.runners.model.TestClass; + +/** + * Global tests resource registry which encapsulates the lifecycle of all integration tests global + * resources including but not limited to {@link com.google.cloud.storage.Storage} instances, {@link + * com.google.cloud.storage.BucketInfo} buckets, Test Bench, Thread Pool, etc. + * + *

All resources are lazily initialized and will live until the registry is shutdown. + * + *

All resources registered here are available for field {@code @}{@link Inject}ion when run via + * {@link com.google.cloud.storage.it.runner.StorageITRunner}. + * + *

This class shouldn't be used outside of {@code com.google.cloud.storage.it.runner}. When we + * have access to java modules this will be enforced. + */ +public final class Registry extends RunListener { + + private static volatile boolean shutdownHookRegistered = false; + private static final Object shutdownHookRegistrationLock = new Object(); + + private static final Registry INSTANCE = new Registry(); + private final ListeningScheduledExecutorService exec = + MoreExecutors.listeningDecorator( + Executors.newScheduledThreadPool( + 2 * Runtime.getRuntime().availableProcessors(), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("test-run-%d").build())); + + private final TestRunScopedInstance testBench = + TestRunScopedInstance.of("fixture/TEST_BENCH", () -> TestBench.newBuilder().build()); + + private final TestRunScopedInstance generator = + TestRunScopedInstance.of("fixture/GENERATOR", Generator::new); + + private final TestRunScopedInstance zone = + TestRunScopedInstance.of("fixture/ZONE", Zone.ZoneShim::new); + public final TestRunScopedInstance otelSdk = + TestRunScopedInstance.of( + "fixture/OTEL_SDK", + () -> { + String projectId = StorageOptions.getDefaultInstance().getProjectId(); + return new OtelSdkShim(projectId); + }); + + private final BackendResources prodBackendResources = + BackendResources.of(Backend.PROD, otelSdk, zone); + private final BackendResources testBenchBackendResource = + BackendResources.of(Backend.TEST_BENCH, otelSdk, zone); + + private final ImmutableList> entries = + new ImmutableList.Builder>() + .add( + RegistryEntry.of(0, OpenTelemetry.class, otelSdk), + RegistryEntry.of(1, Zone.class, zone), + RegistryEntry.of(1, TestBench.class, testBench), + RegistryEntry.of(2, Generator.class, generator), + registryEntry(3, Backend.class, CrossRunIntersection::getBackend), + registryEntry(4, Transport.class, CrossRunIntersection::getTransport)) + .addAll(prodBackendResources.getRegistryEntries()) + .addAll(testBenchBackendResource.getRegistryEntries()) + .build(); + + private final ImmutableSet> injectableTypes = + entries.stream().map(RegistryEntry::getType).collect(ImmutableSet.toImmutableSet()); + private final String injectableTypesString = Joiner.on("|").join(injectableTypes); + private final ThreadLocal currentTest = new ThreadLocal<>(); + + private static final class CurrentTest { + private final Description desc; + private final Span span; + private final Scope scope; + + private CurrentTest(Description desc, Span span, Scope scope) { + this.desc = desc; + this.span = span; + this.scope = scope; + } + + public static CurrentTest of(Description desc, Span span, Scope scope) { + return new CurrentTest(desc, span, scope); + } + } + + private Registry() {} + + public static Registry getInstance() { + if (!shutdownHookRegistered) { + synchronized (shutdownHookRegistrationLock) { + if (!shutdownHookRegistered) { + Runtime.getRuntime().addShutdownHook(new Thread(() -> Registry.getInstance().shutdown())); + shutdownHookRegistered = true; + } + } + } + return INSTANCE; + } + + public ImmutableSet> injectableTypes() { + return injectableTypes; + } + + public String injectableTypesString() { + return injectableTypesString; + } + + TestBench testBench() { + return testBench.get(); + } + + @Nullable + public Description getCurrentTest() { + return currentTest.get().desc; + } + + @Override + public void testStarted(Description description) { + OpenTelemetry sdk; + if (description.getDisplayName().contains("[TEST_BENCH]") + || isClassAnnotatedSingleBackendTestBench(description) + || Arrays.stream(description.getTestClass().getDeclaredFields()) + .anyMatch(field -> field.getType() == TestBench.class)) { + sdk = OpenTelemetry.noop(); + } else { + sdk = otelSdk.get().get(); + } + + Tracer tracer = sdk.getTracer("test"); + Span span = + tracer + .spanBuilder( + String.format( + Locale.US, "%s/%s", description.getClassName(), description.getMethodName())) + .setAttribute("service.name", "test") + .startSpan(); + Scope scope = span.makeCurrent(); + currentTest.set(CurrentTest.of(description, span, scope)); + } + + @Override + public void testFinished(Description description) { + CurrentTest currentTest = this.currentTest.get(); + currentTest.scope.close(); + currentTest.span.end(); + this.currentTest.remove(); + } + + public RunnerScheduler parallelScheduler() { + // Schedulers can't be shared, create a new one with the shared thread pool + return new ParallelScheduler(exec); + } + + public TestInitializer newTestInitializerForCell(CrossRunIntersection c) { + return o -> { + injectFields(o, c); + return o; + }; + } + + public void injectFields(Object test, CrossRunIntersection crossRunIntersection) + throws IllegalAccessException { + TestClass testClass = new TestClass(test.getClass()); + List annotatedFields = testClass.getAnnotatedFields(Inject.class); + for (FrameworkField ff : annotatedFields) { + Object resolve = resolve(ff, crossRunIntersection); + ff.getField().set(test, resolve); + } + } + + public Object resolve(FrameworkField ff, CrossRunIntersection crossRunIntersection) { + StorageFixture sf = ff.getAnnotation(StorageFixture.class); + final CrossRunIntersection finalCrossRunIntersection; + if (sf != null) { + finalCrossRunIntersection = crossRunIntersection.withTransport(sf.value()); + } else { + finalCrossRunIntersection = crossRunIntersection; + } + Optional> first = + entries.stream() + .filter(re -> re.getPredicate().test(ff, finalCrossRunIntersection)) + .findFirst(); + if (first.isPresent()) { + TestRunScopedInstance instance = first.get().getInstance(); + ManagedLifecycle o = instance.get(); + if (o instanceof StatelessManagedLifecycle) { + StatelessManagedLifecycle sml = (StatelessManagedLifecycle) o; + return sml.resolve(ff, crossRunIntersection); + } else { + return o.get(); + } + } else { + throw new IllegalArgumentException( + String.format( + Locale.US, "Invalid: ff: %s, crossRunIntersection: %s", ff, crossRunIntersection)); + } + } + + // private volatile ListenableFuture shutdownF; + + private void shutdown() { + Span span = + otelSdk + .get() + .get() + .getTracer("registry") + .spanBuilder("registry/shutdown") + .setAttribute("service.name", "registry") + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + entries.stream() + .sorted() + .filter(e -> e.getShutdownPriority() > 0) + .forEach( + re -> { + try { + re.getInstance().shutdown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + try { + // manually shutdown otelSdk so that the previous span can be recorded as ending + otelSdk.shutdown(); + } catch (Exception e) { + //noinspection ThrowFromFinallyBlock + throw new RuntimeException(e); + } + } + } + + @FunctionalInterface + private interface StatelessManagedLifecycle extends ManagedLifecycle { + T resolve(FrameworkField ff, CrossRunIntersection crossRunIntersection); + + @Override + default Object get() { + return this; + } + + @Override + default void start() {} + + @Override + default void stop() {} + } + + private static RegistryEntry registryEntry( + int shutdownPriority, Class c, Function f) { + TestRunScopedInstance of = TestRunScopedInstance.of(c.getSimpleName(), () -> lift(f)); + RegistryApplicabilityPredicate pred = RegistryApplicabilityPredicate.cellWith(f); + return RegistryEntry.of( + shutdownPriority, + c, + of, + RegistryApplicabilityPredicate.annotatedWith(Inject.class) + .and(RegistryApplicabilityPredicate.assignableFrom(c)) + .and(pred)); + } + + private static StatelessManagedLifecycle lift(Function f) { + return (ff, cell) -> f.apply(cell); + } + + private static boolean isClassAnnotatedSingleBackendTestBench(Description description) { + return Arrays.stream(description.getTestClass().getAnnotations()) + .anyMatch( + a -> { + if (a instanceof SingleBackend) { + SingleBackend sb = (SingleBackend) a; + return sb.value() == Backend.TEST_BENCH; + } else { + return false; + } + }); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryApplicabilityPredicate.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryApplicabilityPredicate.java new file mode 100644 index 000000000000..43bcd8096a40 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryApplicabilityPredicate.java @@ -0,0 +1,95 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import static java.util.Objects.requireNonNull; + +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.CrossRunIntersection; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import java.lang.annotation.Annotation; +import java.util.function.Function; +import org.junit.runners.model.FrameworkField; + +/** + * In order to register an instance with the {@link Registry} a Predicate must be provided which + * will determine if it is applicable to a specific {@link FrameworkField} and {@link + * CrossRunIntersection}. + * + *

Includes various utility method to create predicates + */ +@FunctionalInterface +interface RegistryApplicabilityPredicate { + + boolean test(FrameworkField ff, CrossRunIntersection crossRunIntersection); + + default RegistryApplicabilityPredicate and(RegistryApplicabilityPredicate next) { + requireNonNull(next, "next must be non null"); + return (ff, intersection) -> this.test(ff, intersection) && next.test(ff, intersection); + } + + default RegistryApplicabilityPredicate or(RegistryApplicabilityPredicate next) { + requireNonNull(next, "next must be non null"); + return (ff, intersection) -> this.test(ff, intersection) || next.test(ff, intersection); + } + + static RegistryApplicabilityPredicate alwaysTrue() { + return (ff, intersection) -> true; + } + + static RegistryApplicabilityPredicate annotatedWith(Class ac) { + return (ff, intersection) -> ff.getAnnotation(ac) != null; + } + + static RegistryApplicabilityPredicate assignableFrom(Class c) { + return (ff, intersection) -> ff.getType().isAssignableFrom(c); + } + + static RegistryApplicabilityPredicate backendIs(Backend b) { + return (ff, intersection) -> intersection != null && intersection.getBackend() == b; + } + + static RegistryApplicabilityPredicate transportIs(Transport t) { + return (ff, intersection) -> intersection != null && intersection.getTransport() == t; + } + + static RegistryApplicabilityPredicate transportAndBackendAre(Transport t, Backend b) { + return transportIs(t).and(backendIs(b)); + } + + static RegistryApplicabilityPredicate cellWith(Function f) { + return (ff, intersection) -> intersection != null && f.apply(intersection) != null; + } + + static RegistryApplicabilityPredicate isDefaultBucket() { + return (ff, intersection) -> { + BucketFixture annotation = ff.getAnnotation(BucketFixture.class); + if (annotation == null) { + return true; + } else { + return annotation.value() == BucketType.DEFAULT; + } + }; + } + + static RegistryApplicabilityPredicate bucketTypeIs(BucketType bucketType) { + return annotatedWith(BucketFixture.class) + .and((ff, intersection) -> ff.getAnnotation(BucketFixture.class).value() == bucketType); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryEntry.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryEntry.java new file mode 100644 index 000000000000..9c7d395d6f20 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/RegistryEntry.java @@ -0,0 +1,102 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.common.base.MoreObjects; +import java.util.Comparator; + +/** + * Entry tuple for an instance registered with the {@link Registry}. + * + *

Binds together the target injection type, shutdown priority, extended applicability predicate + * and the instance. + */ +final class RegistryEntry implements Comparable> { + + private static final Comparator> comparator = + Comparator.>comparingInt(RegistryEntry::getShutdownPriority).reversed(); + + private final Class type; + + /** Higher will be shutdown earlier */ + private final int shutdownPriority; + + private final RegistryApplicabilityPredicate predicate; + private final TestRunScopedInstance instance; + + private RegistryEntry( + int shutdownPriority, + Class type, + TestRunScopedInstance instance, + RegistryApplicabilityPredicate predicate) { + this.shutdownPriority = shutdownPriority; + this.type = type; + this.predicate = predicate; + this.instance = instance; + } + + public Class getType() { + return type; + } + + public int getShutdownPriority() { + return shutdownPriority; + } + + public RegistryApplicabilityPredicate getPredicate() { + return predicate; + } + + public TestRunScopedInstance getInstance() { + return instance; + } + + @Override + public int compareTo(RegistryEntry o) { + return comparator.compare(this, o); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("type", type) + .add("shutdownPriority", shutdownPriority) + .add("instance", instance) + .add("predicate", predicate) + .toString(); + } + + static RegistryEntry of( + int shutdownPriority, Class c, TestRunScopedInstance instance) { + return of(shutdownPriority, c, instance, RegistryApplicabilityPredicate.alwaysTrue()); + } + + static RegistryEntry of( + int shutdownPriority, + Class c, + TestRunScopedInstance instance, + RegistryApplicabilityPredicate predicate) { + return new RegistryEntry<>( + shutdownPriority, + c, + instance, + RegistryApplicabilityPredicate.annotatedWith(Inject.class) + .and(RegistryApplicabilityPredicate.assignableFrom(c)) + .and(predicate)); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageControlInstance.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageControlInstance.java new file mode 100644 index 000000000000..aaa1abf8b226 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageControlInstance.java @@ -0,0 +1,62 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.StorageControlSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +final class StorageControlInstance implements ManagedLifecycle { + + private final StorageControlSettings settings; + + private StorageControlClient ctrl; + + StorageControlInstance(StorageControlSettings settings) { + this.settings = settings; + } + + StorageControlClient getCtrl() { + return ctrl; + } + + @Override + public Object get() { + return ctrl; + } + + @Override + public void start() { + try { + ctrl = StorageControlClient.create(settings); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void stop() { + try { + ctrl.shutdownNow(); + ctrl.awaitTermination(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageInstance.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageInstance.java new file mode 100644 index 000000000000..b653dea05676 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/StorageInstance.java @@ -0,0 +1,185 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.cloud.Policy; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +final class StorageInstance implements ManagedLifecycle { + + private final StorageOptions options; + private Storage storage; + + private Storage proxy; + + private final ProtectedBucketNames protectedBucketNames; + + StorageInstance(StorageOptions options, ProtectedBucketNames protectedBucketNames) { + this.options = options; + this.protectedBucketNames = protectedBucketNames; + } + + Storage getStorage() { + return storage; + } + + @Override + public Object get() { + return proxy; + } + + @Override + public void start() { + storage = options.getService(); + proxy = new VetoingStorageProxy(); + } + + @Override + public void stop() { + try (Storage ignore = storage) { + storage = null; + proxy = null; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static VetoedBucketUpdateException err(String bucketName) { + return new VetoedBucketUpdateException("Attempted to modify global bucket: " + bucketName); + } + + private static class VetoException extends RuntimeException { + private VetoException(String message) { + super(message); + } + } + + private static final class VetoedBucketUpdateException extends VetoException { + private VetoedBucketUpdateException(String message) { + super(message); + } + } + + /** + * Define a proxy which can veto calls attempting to mutate protected buckets. this helps guard + * against a test trying to mutate the global bucket rather than creating its own bucket. + */ + private final class VetoingStorageProxy extends AbstractStorageProxy { + + private VetoingStorageProxy() { + super(storage); + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + checkBucketProtected(bucketInfo); + return super.update(bucketInfo, options); + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + checkBucketProtected(bucket); + return super.delete(bucket, options); + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + checkBucketProtected(bucket); + return super.setIamPolicy(bucket, policy, options); + } + + @Override + public boolean deleteDefaultAcl(String bucket, Entity entity) { + checkBucketProtected(bucket); + return super.deleteDefaultAcl(bucket, entity); + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + checkBucketProtected(bucket); + return super.createDefaultAcl(bucket, acl); + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + checkBucketProtected(bucket); + return super.updateDefaultAcl(bucket, acl); + } + + @Override + public boolean deleteAcl(String bucket, Entity entity, BucketSourceOption... options) { + checkBucketProtected(bucket); + return super.deleteAcl(bucket, entity, options); + } + + @Override + public boolean deleteAcl(String bucket, Entity entity) { + checkBucketProtected(bucket); + return super.deleteAcl(bucket, entity); + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + checkBucketProtected(bucket); + return super.createAcl(bucket, acl, options); + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + checkBucketProtected(bucket); + return super.createAcl(bucket, acl); + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + checkBucketProtected(bucket); + return super.updateAcl(bucket, acl, options); + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + checkBucketProtected(bucket); + return super.updateAcl(bucket, acl); + } + + @Override + public Bucket lockRetentionPolicy(BucketInfo bucket, BucketTargetOption... options) { + checkBucketProtected(bucket); + return super.lockRetentionPolicy(bucket, options); + } + + @Override + public void close() throws Exception { + throw new VetoException("Called #close() on global Storage instance"); + } + + private void checkBucketProtected(BucketInfo bucket) { + checkBucketProtected(bucket.getName()); + } + + private void checkBucketProtected(String bucketName) { + if (protectedBucketNames.isProtected(bucketName)) { + throw err(bucketName); + } + } + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java new file mode 100644 index 000000000000..4d9340762093 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java @@ -0,0 +1,564 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import static com.google.cloud.RetryHelper.runWithRetries; +import static java.util.Objects.requireNonNull; + +import com.google.api.client.http.ByteArrayContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpContent; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestFactory; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.Tuple; +import com.google.cloud.conformance.storage.v1.InstructionList; +import com.google.cloud.conformance.storage.v1.Method; +import com.google.cloud.storage.it.runner.SneakyException; +import com.google.common.base.Charsets; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.io.CharStreams; +import com.google.gson.Gson; +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.SocketException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A {@link ManagedLifecycle} which integrates with the storage-testbench by pulling the + * docker image, starting the container, providing methods for interacting with the {@code + * /retry_test} rest api, stopping the container. + * + *

A single instance of the testbench is expected to be managed by the {@link + * com.google.cloud.storage.it.runner.registry.Registry} which is used by {@link + * com.google.cloud.storage.it.runner.StorageITRunner}. Accessing the testbench can be accomplished + * by doing the following: + * + *

    + *
  1. Annotating your test class {@code @RunWith(StorageITRunner.class)} + *
  2. Configuring the backend for your integration tests to be {@link + * com.google.cloud.storage.it.runner.annotations.Backend#TEST_BENCH} by doing either + *
      + *
    1. Annotating your test class with {@code @SingleBackend(Backend.TEST_BENCH)} + *
    2. Annotating your test class with {@code @CrossRun} and ensuring {@code + * Backend.TEST_BENCH} is included in the {@code backends} parameter + *
    + *
  3. Specifying {@code @Inject public TestBench testBench;} as a field for the instance of + * testbench to be injected to your test + *
+ */ +public final class TestBench implements ManagedLifecycle { + + private static final Logger LOGGER = LoggerFactory.getLogger(TestBench.class); + + private final boolean ignorePullError; + private final String baseUri; + private final String gRPCBaseUri; + private final String dockerImageName; + private final String dockerImageTag; + private final String containerName; + + private final Gson gson; + private final HttpRequestFactory requestFactory; + + private Process process; + private Path tempDirectory; + private Path outPath; + private Path errPath; + + private boolean runningOutsideAlready; + + private TestBench( + boolean ignorePullError, + String baseUri, + String gRPCBaseUri, + String dockerImageName, + String dockerImageTag, + String containerName) { + this.ignorePullError = true; + this.baseUri = baseUri; + this.gRPCBaseUri = gRPCBaseUri; + this.dockerImageName = dockerImageName; + this.dockerImageTag = dockerImageTag; + this.containerName = containerName; + this.gson = new Gson(); + this.requestFactory = + new NetHttpTransport.Builder() + .build() + .createRequestFactory( + request -> { + request.setCurlLoggingEnabled(false); + request.getHeaders().setAccept("application/json"); + request + .getHeaders() + .setUserAgent( + String.format(Locale.US, "%s/ test-bench/", this.containerName)); + }); + } + + public String getBaseUri() { + return baseUri; + } + + public String getGRPCBaseUri() { + return gRPCBaseUri; + } + + public RetryTestResource createRetryTest(RetryTestResource retryTestResource) throws IOException { + GenericUrl url = new GenericUrl(baseUri + "/retry_test"); + String jsonString = gson.toJson(retryTestResource); + HttpContent content = + new ByteArrayContent("application/json", jsonString.getBytes(StandardCharsets.UTF_8)); + HttpRequest req = requestFactory.buildPostRequest(url, content); + HttpResponse resp = req.execute(); + RetryTestResource result = gson.fromJson(resp.parseAsString(), RetryTestResource.class); + resp.disconnect(); + return result; + } + + public void deleteRetryTest(RetryTestResource retryTestResource) throws IOException { + GenericUrl url = new GenericUrl(baseUri + "/retry_test/" + retryTestResource.id); + HttpRequest req = requestFactory.buildDeleteRequest(url); + HttpResponse resp = req.execute(); + resp.disconnect(); + } + + public RetryTestResource getRetryTest(RetryTestResource retryTestResource) throws IOException { + GenericUrl url = new GenericUrl(baseUri + "/retry_test/" + retryTestResource.id); + HttpRequest req = requestFactory.buildGetRequest(url); + HttpResponse resp = req.execute(); + RetryTestResource result = gson.fromJson(resp.parseAsString(), RetryTestResource.class); + resp.disconnect(); + return result; + } + + public List listRetryTests() throws IOException { + GenericUrl url = new GenericUrl(baseUri + "/retry_tests"); + HttpRequest req = requestFactory.buildGetRequest(url); + HttpResponse resp = req.execute(); + JsonObject result = gson.fromJson(resp.parseAsString(), JsonObject.class); + JsonArray retryTest = (JsonArray) result.get("retry_test"); + ImmutableList.Builder b = ImmutableList.builder(); + for (JsonElement e : retryTest) { + b.add(gson.fromJson(e, RetryTestResource.class)); + } + resp.disconnect(); + return b.build(); + } + + private boolean startGRPCServer(int gRPCPort) throws IOException { + GenericUrl url = new GenericUrl(baseUri + "/start_grpc?port=9090"); + HttpRequest req = requestFactory.buildGetRequest(url); + HttpResponse resp = req.execute(); + resp.disconnect(); + return resp.getStatusCode() == 200; + } + + @Override + public Object get() { + return this; + } + + @Override + public void start() { + try { + listRetryTests(); + LOGGER.info("Using testbench running outside test suite."); + runningOutsideAlready = true; + return; + } catch (IOException ignore) { + // expected when the server isn't running already + } + try { + tempDirectory = Files.createTempDirectory(containerName); + outPath = tempDirectory.resolve("stdout"); + errPath = tempDirectory.resolve("stderr"); + + File outFile = outPath.toFile(); + File errFile = errPath.toFile(); + LOGGER.info("Redirecting server stdout to: {}", outFile.getAbsolutePath()); + LOGGER.info("Redirecting server stderr to: {}", errFile.getAbsolutePath()); + String dockerImage = String.format(Locale.US, "%s:%s", dockerImageName, dockerImageTag); + // First try and pull the docker image, this validates docker is available and running + // on the host, as well as gives time for the image to be downloaded independently of + // trying to start the container. (Below, when we first start the container we then attempt + // to issue a call against the api before we yield to run our tests.) + try { + Process p = + new ProcessBuilder() + .command("docker", "pull", dockerImage) + .redirectOutput(outFile) + .redirectError(errFile) + .start(); + p.waitFor(5, TimeUnit.MINUTES); + if (!ignorePullError && p.exitValue() != 0) { + dumpServerLogs(outPath, errPath); + throw new IllegalStateException( + String.format( + Locale.US, + "Non-zero status while attempting to pull docker image '%s'", + dockerImage)); + } + } catch (InterruptedException | IllegalThreadStateException e) { + dumpServerLogs(outPath, errPath); + throw new IllegalStateException( + String.format( + Locale.US, "Timeout while attempting to pull docker image '%s'", dockerImage)); + } + + int port = URI.create(baseUri).getPort(); + int gRPCPort = URI.create(gRPCBaseUri).getPort(); + final List command = + ImmutableList.of( + "docker", + "run", + "-i", + "--rm", + "--publish", + port + ":9000", + "--publish", + gRPCPort + ":9090", + String.format(Locale.US, "--name=%s", containerName), + dockerImage, + "gunicorn", + "--bind=0.0.0.0:9000", + "--worker-class=sync", + "--threads=10", + "--access-logfile=-", + "--keep-alive=0", + "testbench:run()"); + process = + new ProcessBuilder() + .command(command) + .redirectOutput(outFile) + .redirectError(errFile) + .start(); + LOGGER.info(command.toString()); + try { + // wait a small amount of time for the server to come up before probing + Thread.sleep(500); + // wait for the server to come up + List existingResources = + runWithRetries( + TestBench.this::listRetryTests, + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofSeconds(30)) + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofSeconds(5)) + .build(), + new BasicResultRetryAlgorithm>() { + @Override + public boolean shouldRetry( + Throwable previousThrowable, List previousResponse) { + return previousThrowable instanceof SocketException; + } + }, + NanoClock.getDefaultClock()); + if (!existingResources.isEmpty()) { + LOGGER.info( + "Test Server already has retry tests in it, is it running outside the tests?"); + } + // Start gRPC Service + if (!startGRPCServer(gRPCPort)) { + throw new IllegalStateException( + "Failed to start server within a reasonable amount of time. Host url(gRPC): " + + gRPCBaseUri); + } + } catch (RetryHelperException e) { + dumpServerLogs(outPath, errPath); + throw new IllegalStateException( + "Failed to connect to server within a reasonable amount of time. Host url: " + baseUri, + e.getCause()); + } + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void stop() { + if (runningOutsideAlready) { + // if the server was running outside the tests already simply return + return; + } + try { + process.destroy(); + process.waitFor(2, TimeUnit.SECONDS); + boolean attemptForceStopContainer = false; + try { + int processExitValue = process.exitValue(); + if (processExitValue != 0) { + attemptForceStopContainer = true; + } + LOGGER.warn("Container exit value = {}", processExitValue); + } catch (IllegalThreadStateException e) { + attemptForceStopContainer = true; + } + + if (attemptForceStopContainer) { + LOGGER.warn("Container did not gracefully exit, attempting to explicitly stop it."); + ImmutableList command = ImmutableList.of("docker", "kill", containerName); + LOGGER.warn(command.toString()); + Process shutdownProcess = new ProcessBuilder(command).start(); + shutdownProcess.waitFor(5, TimeUnit.SECONDS); + int shutdownProcessExitValue = shutdownProcess.exitValue(); + LOGGER.warn("Container exit value = {}", shutdownProcessExitValue); + } + + // wait for the server to shutdown + runWithRetries( + () -> { + try { + listRetryTests(); + } catch (SocketException e) { + // desired result + return null; + } + throw new NotShutdownException(); + }, + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofSeconds(30)) + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofSeconds(5)) + .build(), + new BasicResultRetryAlgorithm>() { + @Override + public boolean shouldRetry(Throwable previousThrowable, List previousResponse) { + return previousThrowable instanceof NotShutdownException; + } + }, + NanoClock.getDefaultClock()); + try { + Files.delete(errPath); + Files.delete(outPath); + Files.delete(tempDirectory); + } catch (IOException e) { + throw new RuntimeException(e); + } + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + } + + private void dumpServerLogs(Path outFile, Path errFile) throws IOException { + try { + LOGGER.warn("Dumping contents of stdout"); + dumpServerLog("stdout", outFile.toFile()); + } finally { + LOGGER.warn("Dumping contents of stderr"); + dumpServerLog("stderr", errFile.toFile()); + } + } + + private void dumpServerLog(String prefix, File out) throws IOException { + try (BufferedReader reader = new BufferedReader(new FileReader(out))) { + String line; + while ((line = reader.readLine()) != null) { + LOGGER.warn("<{}> {}", prefix, line); + } + } + } + + static Builder newBuilder() { + return new Builder(); + } + + public static final class RetryTestResource { + public String id; + public Boolean completed; + public String transport; + public JsonObject instructions; + + public RetryTestResource() {} + + public RetryTestResource(JsonObject instructions) { + this.instructions = instructions; + } + + public static RetryTestResource newRetryTestResource( + Method m, InstructionList l, String transport) { + RetryTestResource resource = new RetryTestResource(); + resource.instructions = new JsonObject(); + JsonArray instructions = new JsonArray(); + for (String s : l.getInstructionsList()) { + instructions.add(s); + } + resource.instructions.add(m.getName(), instructions); + resource.transport = transport; + return resource; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("id", id) + .add("completed", completed) + .add("transport", transport) + .add("instructions", instructions) + .toString(); + } + } + + static final class Builder { + private static final String DEFAULT_BASE_URI = "http://localhost:9000"; + private static final String DEFAULT_GRPC_BASE_URI = "http://localhost:9005"; + private static final String DEFAULT_IMAGE_NAME; + private static final String DEFAULT_IMAGE_TAG; + + static { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + Tuple nameAndTag = + SneakyException.unwrap( + () -> { + InputStream dockerfileText = + cl.getResourceAsStream( + "com/google/cloud/storage/it/runner/registry/Dockerfile"); + //noinspection UnstableApiUsage + return Optional.ofNullable(dockerfileText) + .map(is -> new InputStreamReader(is, Charsets.UTF_8)) + .flatMap( + reader -> + SneakyException.sneaky( + () -> + CharStreams.readLines(reader).stream() + .filter(line -> !line.startsWith("#")) + .filter(line -> line.startsWith("FROM")) + .findFirst() + .flatMap( + from -> { + Pattern pattern = + Pattern.compile("FROM (.*?):(.*)$"); + Matcher matcher = pattern.matcher(from); + if (matcher.matches()) { + return Optional.of( + Tuple.of( + matcher.group(1), matcher.group(2))); + } else { + return Optional.empty(); + } + }))); + }) + .orElse(Tuple.of(null, null)); + DEFAULT_IMAGE_NAME = nameAndTag.x(); + DEFAULT_IMAGE_TAG = nameAndTag.y(); + } + + private static final String DEFAULT_CONTAINER_NAME = "default"; + + private boolean ignorePullError; + private String baseUri; + private String gRPCBaseUri; + private String dockerImageName; + private String dockerImageTag; + private String containerName; + + private Builder() { + this( + false, + DEFAULT_BASE_URI, + DEFAULT_GRPC_BASE_URI, + DEFAULT_IMAGE_NAME, + DEFAULT_IMAGE_TAG, + DEFAULT_CONTAINER_NAME); + } + + private Builder( + boolean ignorePullError, + String baseUri, + String gRPCBaseUri, + String dockerImageName, + String dockerImageTag, + String containerName) { + this.ignorePullError = ignorePullError; + this.baseUri = baseUri; + this.gRPCBaseUri = gRPCBaseUri; + this.dockerImageName = dockerImageName; + this.dockerImageTag = dockerImageTag; + this.containerName = containerName; + } + + public Builder setIgnorePullError(boolean ignorePullError) { + this.ignorePullError = ignorePullError; + return this; + } + + public Builder setBaseUri(String baseUri) { + this.baseUri = requireNonNull(baseUri, "host must be non null"); + return this; + } + + public Builder setGRPCBaseUri(String gRPCBaseUri) { + this.gRPCBaseUri = requireNonNull(gRPCBaseUri, "gRPC host must be non null"); + return this; + } + + public Builder setDockerImageName(String dockerImageName) { + this.dockerImageName = requireNonNull(dockerImageName, "dockerImageName must be non null"); + return this; + } + + public Builder setDockerImageTag(String dockerImageTag) { + this.dockerImageTag = requireNonNull(dockerImageTag, "dockerImageTag must be non null"); + return this; + } + + public Builder setContainerName(String containerName) { + this.containerName = requireNonNull(containerName, "containerName must be non null"); + return this; + } + + public TestBench build() { + return new TestBench( + ignorePullError, + baseUri, + gRPCBaseUri, + requireNonNull(dockerImageName, "dockerImageName must be non null"), + requireNonNull(dockerImageTag, "dockerImageTag must be non null"), + String.format(Locale.US, "storage-testbench_%s", containerName)); + } + } + + private static final class NotShutdownException extends RuntimeException {} +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestRunScopedInstance.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestRunScopedInstance.java new file mode 100644 index 000000000000..4973e5fe9525 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestRunScopedInstance.java @@ -0,0 +1,127 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.context.Scope; +import java.util.function.Supplier; +import org.junit.runner.notification.RunListener.ThreadSafe; + +/** + * Thread safe shim to manage the initialization and lifecycle of a specific instance of a {@link + * com.google.cloud.storage.it.runner.registry.ManagedLifecycle} instance. + */ +@ThreadSafe +public final class TestRunScopedInstance implements Supplier { + + private final String name; + private final Supplier ctor; + private volatile T instance; + + private TestRunScopedInstance(String name, Supplier ctor) { + this.name = name; + this.ctor = ctor; + } + + static TestRunScopedInstance of(String name, Supplier ctor) { + return new TestRunScopedInstance<>(name, ctor); + } + + public String getName() { + return name; + } + + public T get() { + if (instance == null) { + synchronized (this) { + if (instance == null) { + // if we don't short-circuit for OTEL_SDK we will cause a stack overflow, because we would + // be trying to get our instance to record that we're starting our instance. + if (name.equals("fixture/OTEL_SDK")) { + T tmp = ctor.get(); + tmp.start(); + instance = tmp; + } else { + Span span = + Registry.getInstance() + .otelSdk + .get() + .get() + .getTracer("test") + .spanBuilder("registry/" + name + "/start") + .setAttribute("service.name", "registry") + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + T tmp = ctor.get(); + tmp.start(); + instance = tmp; + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + } + } + } + return instance; + } + + public void shutdown() throws Exception { + T tmp = instance; + if (tmp != null) { + synchronized (this) { + instance = null; + } + if (name.equals("OTEL_SDK")) { + tmp.stop(); + } else { + Span span = + Registry.getInstance() + .otelSdk + .get() + .get() + .getTracer("test") + .spanBuilder("registry/" + name + "/stop") + .setAttribute("service.name", "registry") + .startSpan(); + try (Scope ignore = span.makeCurrent()) { + tmp.stop(); + } catch (Throwable t) { + span.recordException(t); + span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); + throw t; + } finally { + span.end(); + } + } + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("ctor", ctor) + .add("instance", instance) + .toString(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Zone.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Zone.java new file mode 100644 index 000000000000..52e096141617 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/Zone.java @@ -0,0 +1,102 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it.runner.registry; + +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.util.Objects; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class Zone { + private static final Logger LOGGER = LoggerFactory.getLogger(Zone.class); + + private final String region; + private final String zone; + + private Zone(String region, String zone) { + this.zone = zone; + this.region = region; + } + + public String getRegion() { + return region; + } + + public String getZone() { + return zone; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Zone)) { + return false; + } + Zone zone1 = (Zone) o; + return Objects.equals(zone, zone1.zone) && Objects.equals(region, zone1.region); + } + + @Override + public int hashCode() { + return Objects.hash(zone, region); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("region", region).add("zone", zone).toString(); + } + + public static Zone parse(String s) { + String z = s; + int idx = z.lastIndexOf('/'); + if (idx > -1) { + z = z.substring(idx + 1); + } + String r = ""; + int idx2 = z.lastIndexOf('-'); + if (idx2 > -1) { + r = z.substring(0, idx2); + } + return new Zone(r, z); + } + + static final class ZoneShim implements ManagedLifecycle { + + private Zone zone; + + @Override + public Zone get() { + return zone; + } + + @Override + public void start() { + try { + zone = MetadataService.zone().orElseGet(() -> parse("us-east1-c")); + LOGGER.info("Resolved zone = {}", zone); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void stop() {} + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/BucketArbitraryProvider.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/BucketArbitraryProvider.java new file mode 100644 index 000000000000..6e6b9e2da3ad --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/BucketArbitraryProvider.java @@ -0,0 +1,116 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.jqwik; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.ifNonNull; + +import com.google.cloud.storage.jqwik.StorageArbitraries.ProjectNumber; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.ProjectName; +import java.util.Collections; +import java.util.Set; +import javax.annotation.ParametersAreNonnullByDefault; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Tuple; +import net.jqwik.api.providers.ArbitraryProvider; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; + +@ParametersAreNonnullByDefault +public final class BucketArbitraryProvider implements ArbitraryProvider { + + @Override + public boolean canProvideFor(TypeUsage targetType) { + return targetType.isOfType(Bucket.class); + } + + @NonNull + @Override + public Set> provideFor(TypeUsage targetType, SubtypeProvider subtypeProvider) { + Arbitrary as = + Combinators.combine( + Combinators.combine( + StorageArbitraries.buckets().name(), + StorageArbitraries.buckets().name(), + StorageArbitraries.storageClass(), + StorageArbitraries.buckets().location(), + StorageArbitraries.buckets().locationType(), + StorageArbitraries.metageneration(), + StorageArbitraries.buckets().versioning().injectNull(0.25), + StorageArbitraries.timestamp().injectNull(0.25)) // ctime + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.timestamp().injectNull(0.25), // utime + StorageArbitraries.buckets().website().injectNull(0.75), + StorageArbitraries.bool(), + StorageArbitraries.buckets().rpo(), + StorageArbitraries.buckets().billing().injectNull(0.01), + StorageArbitraries.buckets().encryption(), + StorageArbitraries.buckets().retentionPolicy().injectNull(0.5), + StorageArbitraries.buckets().lifecycle().injectNull(0.5)) + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.buckets().logging().injectNull(0.5), + StorageArbitraries.buckets().cors(), + StorageArbitraries.buckets().objectAccessControl().injectNull(0.5), + StorageArbitraries.buckets().bucketAccessControl().injectNull(0.5), + StorageArbitraries.owner().injectNull(0.01), + StorageArbitraries.buckets().iamConfig().injectNull(0.5), + StorageArbitraries.buckets().labels(), + StorageArbitraries.etag()) + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.projectNumber().map(ProjectNumber::toProjectName), + StorageArbitraries.buckets().ipFilter().injectNull(0.75)) + .as(Tuple::of)) + .as( + (t1, t2, t3, t4) -> { + Bucket.Builder b = Bucket.newBuilder(); + ifNonNull(t1.get1(), BucketName::getBucket, b::setBucketId); + ifNonNull(t1.get2(), BucketName::toString, b::setName); + ifNonNull(t1.get3(), b::setStorageClass); + ifNonNull(t1.get4(), b::setLocation); + ifNonNull(t1.get5(), b::setLocationType); + ifNonNull(t1.get6(), b::setMetageneration); + ifNonNull(t1.get7(), b::setVersioning); + ifNonNull(t1.get8(), b::setCreateTime); + ifNonNull(t2.get1(), b::setUpdateTime); + ifNonNull(t2.get2(), b::setWebsite); + ifNonNull(t2.get3(), b::setDefaultEventBasedHold); + ifNonNull(t2.get4(), b::setRpo); + ifNonNull(t2.get5(), b::setBilling); + ifNonNull(t2.get6(), b::setEncryption); + ifNonNull(t2.get7(), b::setRetentionPolicy); + ifNonNull(t2.get8(), b::setLifecycle); + ifNonNull(t3.get1(), b::setLogging); + ifNonNull(t3.get2(), b::addAllCors); + ifNonNull(t3.get3(), b::addAllDefaultObjectAcl); + ifNonNull(t3.get4(), b::addAllAcl); + ifNonNull(t3.get5(), b::setOwner); + ifNonNull(t3.get6(), b::setIamConfig); + ifNonNull(t3.get7(), b::putAllLabels); + ifNonNull(t3.get8(), b::setEtag); + ifNonNull(t4.get1(), ProjectName::toString, b::setProject); + ifNonNull(t4.get2(), b::setIpFilter); + // TODO: add CustomPlacementConfig + return b.build(); + }); + return Collections.singleton(as); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/IamPolicyArbitraryProvider.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/IamPolicyArbitraryProvider.java new file mode 100644 index 000000000000..7e9580066b22 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/IamPolicyArbitraryProvider.java @@ -0,0 +1,110 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.jqwik; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.ifNonNull; + +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.protobuf.ByteString; +import com.google.type.Expr; +import java.util.Collections; +import java.util.Locale; +import java.util.Set; +import javax.annotation.ParametersAreNonnullByDefault; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.providers.ArbitraryProvider; +import net.jqwik.api.providers.TypeUsage; +import net.jqwik.web.api.Web; +import org.checkerframework.checker.nullness.qual.NonNull; + +@ParametersAreNonnullByDefault +public final class IamPolicyArbitraryProvider implements ArbitraryProvider { + + @Override + public boolean canProvideFor(TypeUsage targetType) { + return targetType.isOfType(Policy.class); + } + + @NonNull + @Override + public Set> provideFor(TypeUsage targetType, SubtypeProvider subtypeProvider) { + Arbitrary as = + Combinators.combine( + StorageArbitraries.etag().injectNull(0.10), + Arbitraries.integers().between(0, 3).injectNull(0.05), + bindings().list().ofMinSize(0).ofMaxSize(10).injectNull(0.5)) + .as( + (etag, version, bindings) -> { + Policy.Builder b = Policy.newBuilder(); + ifNonNull(etag, ByteString::copyFromUtf8, b::setEtag); + ifNonNull(version, b::setVersion); + ifNonNull(bindings, b::addAllBindings); + return b.build(); + }); + return Collections.singleton(as); + } + + static Arbitrary bindings() { + return Combinators.combine( + role(), + member().list().ofMinSize(0).ofMaxSize(10).injectNull(0.2), + condition().injectNull(0.25)) + .as( + (role, members, condition) -> { + Binding.Builder b = Binding.newBuilder(); + ifNonNull(role, b::setRole); + ifNonNull(members, b::addAllMembers); + ifNonNull(condition, b::setCondition); + return b.build(); + }); + } + + static Arbitrary role() { + return Arbitraries.of("roles/viewer", "roles/editor", "roles/owner"); + } + + static Arbitrary member() { + return Arbitraries.oneOf( + Arbitraries.of("allUsers"), + Arbitraries.of("allAuthenticatedUsers"), + Web.emails().map(e -> String.format(Locale.US, "user:%s", e)), + Web.emails().map(e -> String.format(Locale.US, "serviceAccount:%s", e)), + Web.emails().map(e -> String.format(Locale.US, "group:%s", e)), + Web.webDomains().map(d -> String.format(Locale.US, "domain:%s", d))); + } + + static Arbitrary condition() { + return Combinators.combine(nonEmptyAlphaString(), nonEmptyAlphaString(), nonEmptyAlphaString()) + .as( + (title, description, expression) -> { + // location intentionally omitted as the json representation of an Expr does not + // specify location + return Expr.newBuilder() + .setTitle(title) + .setDescription(description) + .setExpression(expression) + .build(); + }); + } + + private static Arbitrary nonEmptyAlphaString() { + return StorageArbitraries.alphaString().filter(s -> !s.isEmpty()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/ObjectArbitraryProvider.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/ObjectArbitraryProvider.java new file mode 100644 index 000000000000..4b99a19da429 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/ObjectArbitraryProvider.java @@ -0,0 +1,120 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.jqwik; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.ifNonNull; + +import com.google.storage.v2.BucketName; +import com.google.storage.v2.Object; +import java.util.Collections; +import java.util.Set; +import javax.annotation.ParametersAreNonnullByDefault; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Tuple; +import net.jqwik.api.providers.ArbitraryProvider; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; + +@ParametersAreNonnullByDefault +public final class ObjectArbitraryProvider implements ArbitraryProvider { + + @Override + public boolean canProvideFor(TypeUsage targetType) { + return targetType.isOfType(Object.class); + } + + @NonNull + @Override + public Set> provideFor(TypeUsage targetType, SubtypeProvider subtypeProvider) { + Arbitrary size = Arbitraries.longs().greaterOrEqual(0); + Arbitrary objectArbitrary = + Combinators.combine( + Combinators.combine( + StorageArbitraries.objects().name(), + StorageArbitraries.buckets().name(), + StorageArbitraries.generation(), + StorageArbitraries.metageneration(), + StorageArbitraries.storageClass(), + size, + StorageArbitraries.httpHeaders().contentEncoding(), + StorageArbitraries.httpHeaders().contentDisposition()) + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.httpHeaders().cacheControl(), + StorageArbitraries.httpHeaders().contentLanguage(), + StorageArbitraries.timestamp().injectNull(0.25), // dtime + StorageArbitraries.httpHeaders().contentType(), + StorageArbitraries.timestamp().injectNull(0.25), // ctime + // componentCount is populated if the object is made from compose + Arbitraries.integers().greaterOrEqual(0).injectNull(0.85), + StorageArbitraries.objects().objectChecksums().injectNull(0.25)) + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.timestamp().injectNull(0.25), // utime + StorageArbitraries.kmsKey(), + StorageArbitraries.timestamp().injectNull(0.25), // UpdateStorageClassTime + StorageArbitraries.bool().injectNull(0.25), + StorageArbitraries.timestamp().injectNull(0.25), // RetentionExpireTime + StorageArbitraries.bool().injectNull(0.25), + StorageArbitraries.objects().customerEncryption().injectNull(0.90), + StorageArbitraries.httpHeaders().customTime()) + .as(Tuple::of), + Combinators.combine( + StorageArbitraries.objects().customMetadata(), + StorageArbitraries.owner().injectNull(0.1), + StorageArbitraries.objects().objectAccessControl().injectNull(0.5), + StorageArbitraries.etag(), + StorageArbitraries.objects().objectContexts()) + .as(Tuple::of)) + .as( + (t1, t2, t3, t4) -> { + Object.Builder b = Object.newBuilder(); + ifNonNull(t1.get1(), b::setName); + ifNonNull(t1.get2(), BucketName::toString, b::setBucket); + ifNonNull(t1.get3(), b::setGeneration); + ifNonNull(t1.get4(), b::setMetageneration); + ifNonNull(t1.get5(), b::setStorageClass); + ifNonNull(t1.get6(), b::setSize); + ifNonNull(t1.get7(), b::setContentEncoding); + ifNonNull(t1.get8(), b::setContentDisposition); + ifNonNull(t2.get1(), b::setCacheControl); + ifNonNull(t4.get3(), b::addAllAcl); + ifNonNull(t2.get2(), b::setContentLanguage); + ifNonNull(t2.get3(), b::setDeleteTime); + ifNonNull(t2.get4(), b::setContentType); + ifNonNull(t2.get5(), b::setCreateTime); + ifNonNull(t2.get6(), b::setComponentCount); + ifNonNull(t2.get7(), b::setChecksums); + ifNonNull(t3.get1(), b::setUpdateTime); + ifNonNull(t3.get2(), b::setKmsKey); + ifNonNull(t3.get3(), b::setUpdateStorageClassTime); + ifNonNull(t3.get4(), b::setTemporaryHold); + ifNonNull(t3.get5(), b::setRetentionExpireTime); + ifNonNull(t4.get1(), b::putAllMetadata); + ifNonNull(t3.get6(), b::setEventBasedHold); + ifNonNull(t4.get2(), b::setOwner); + ifNonNull(t3.get7(), b::setCustomerEncryption); + ifNonNull(t3.get8(), b::setCustomTime); + ifNonNull(t4.get4(), b::setEtag); + ifNonNull(t4.get5(), b::setContexts); + return b.build(); + }); + return Collections.singleton(objectArbitrary); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/StorageArbitraries.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/StorageArbitraries.java new file mode 100644 index 000000000000..ffcb2fcac0a2 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/jqwik/StorageArbitraries.java @@ -0,0 +1,980 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.jqwik; + +import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.ifNonNull; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.common.hash.HashCode; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import com.google.storage.v2.Bucket; +import com.google.storage.v2.Bucket.Billing; +import com.google.storage.v2.Bucket.Encryption; +import com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig; +import com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig; +import com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig; +import com.google.storage.v2.Bucket.IpFilter; +import com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource; +import com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource; +import com.google.storage.v2.Bucket.Lifecycle.Rule.Condition; +import com.google.storage.v2.Bucket.Logging; +import com.google.storage.v2.Bucket.RetentionPolicy; +import com.google.storage.v2.Bucket.Versioning; +import com.google.storage.v2.Bucket.Website; +import com.google.storage.v2.BucketAccessControl; +import com.google.storage.v2.BucketName; +import com.google.storage.v2.CustomerEncryption; +import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.ObjectContexts; +import com.google.storage.v2.ObjectCustomContextPayload; +import com.google.storage.v2.Owner; +import com.google.storage.v2.ProjectName; +import com.google.storage.v2.ProjectTeam; +import com.google.type.Date; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Stream; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Tuple; +import net.jqwik.api.arbitraries.CharacterArbitrary; +import net.jqwik.api.arbitraries.IntegerArbitrary; +import net.jqwik.api.arbitraries.ListArbitrary; +import net.jqwik.api.arbitraries.LongArbitrary; +import net.jqwik.api.arbitraries.StringArbitrary; +import net.jqwik.api.providers.TypeUsage; +import net.jqwik.time.api.DateTimes; +import net.jqwik.web.api.Web; +import org.checkerframework.checker.nullness.qual.Nullable; + +public final class StorageArbitraries { + + // Predefine some values for 128-bit integers. + // These are used in generation of md5 values. + private static final String hex_128_min = "80000000" + "00000000" + "00000000" + "00000000"; + private static final String hex_128_max = "7fffffff" + "ffffffff" + "ffffffff" + "ffffffff"; + // BigIntegers don't inherently have the sign-bit, so simulate it by calling .negate() + private static final BigInteger _128_bit_min = new BigInteger(hex_128_min, 16).negate(); + private static final BigInteger _128_bit_max = new BigInteger(hex_128_max, 16); + + private StorageArbitraries() {} + + public static Arbitrary timestamp() { + return Combinators.combine( + DateTimes.offsetDateTimes().offsetBetween(ZoneOffset.UTC, ZoneOffset.UTC), + millisecondsAsNanos()) + .as( + (odt, nanos) -> + Timestamp.newBuilder().setSeconds(odt.toEpochSecond()).setNanos(nanos).build()); + } + + public static Arbitrary duration() { + return Arbitraries.longs() + .between(0, 315_576_000_000L) + .map(seconds -> com.google.protobuf.Duration.newBuilder().setSeconds(seconds).build()); + } + + public static Arbitrary date() { + return DateTimes.offsetDateTimes() + .offsetBetween(ZoneOffset.UTC, ZoneOffset.UTC) + .map( + odt -> + Date.newBuilder() + .setYear(odt.getYear()) + .setMonth(odt.getMonthValue()) + .setDay(odt.getDayOfMonth()) + .build()); + } + + public static Arbitrary bool() { + return Arbitraries.defaultFor(TypeUsage.of(Boolean.class)); + } + + public static LongArbitrary metageneration() { + return Arbitraries.longs().greaterOrEqual(0); + } + + public static LongArbitrary generation() { + return Arbitraries.longs().greaterOrEqual(0); + } + + public static StringArbitrary randomString() { + return Arbitraries.strings().all().ofMinLength(1).ofMaxLength(1024); + } + + public static CharacterArbitrary alnum() { + return Arbitraries.chars().alpha().numeric(); + } + + public static StringArbitrary alphaString() { + return Arbitraries.strings().alpha(); + } + + public static Arbitrary projectID() { + return Combinators.combine( + // must start with a letter + Arbitraries.chars().range('a', 'z'), + // can only contain numbers, lowercase letters, and hyphens, and must be 6-30 chars + Arbitraries.strings() + .withCharRange('a', 'z') + .numeric() + .withChars('-') + .ofMinLength(4) + .ofMaxLength(28), + // must not end with a hyphen + Arbitraries.chars().range('a', 'z').numeric()) + .as((first, mid, last) -> new ProjectID(first + mid + last)); + } + + public static Arbitrary projectNumber() { + return Arbitraries.bigIntegers().greaterOrEqual(BigInteger.ONE).map(ProjectNumber::new); + } + + public static Arbitrary kmsKey() { + return Arbitraries.of("kms-key1", "kms-key2").injectNull(0.75); + } + + public static Buckets buckets() { + return Buckets.INSTANCE; + } + + public static AccessControl accessControl() { + return AccessControl.INSTANCE; + } + + public static Arbitrary storageClass() { + return Arbitraries.oneOf(storageClassWithoutEdgeCases(), Arbitraries.just("")) + .edgeCases(config -> config.add("")); + } + + public static Arbitrary owner() { + Arbitrary entity = alphaString().ofMinLength(1).ofMaxLength(1024); + return entity.map(e -> Owner.newBuilder().setEntity(e).build()); + } + + public static Arbitrary etag() { + return Arbitraries.strings() + .ascii() + .ofMinLength(0) + .ofMaxLength(8) + .edgeCases(config -> config.add("")); + } + + /** Arbitrary of ipv4 and ipv6 cidr ranges */ + public static Arbitrary cidr() { + return Arbitraries.oneOf(cidrIpv4(), cidrIpv6()); + } + + public static Arbitrary cidrIpv4() { + return Arbitraries.of("182.0.2.0/24"); + } + + public static Arbitrary cidrIpv6() { + return Arbitraries.of("2001:db8::/32"); + } + + public static final class Buckets { + private static final Buckets INSTANCE = new Buckets(); + + private Buckets() {} + + /** + * Generated bucket name based on the rules outlined in https://cloud.google.com/storage/docs/naming-buckets#requirements + */ + public Arbitrary name() { + return Combinators.combine( + Arbitraries.oneOf( + // projectID(), TODO: reinclude this once we support non-global scoped buckets + // Global buckets have prefix of projects/_ + Arbitraries.of(new ProjectID("_"))), + alnum(), + alnum().with('-', '_').list().ofMinSize(1).ofMaxSize(61), + alnum()) + .as( + (p, first, mid, last) -> { + final StringBuilder sb = new StringBuilder(); + sb.append(first); + mid.forEach(sb::append); + sb.append(last); + return BucketName.of(p.get(), sb.toString()); + }); + } + + public Arbitrary action() { + return Arbitraries.oneOf( + Arbitraries.just(Bucket.Lifecycle.Rule.Action.newBuilder().setType("Delete").build()), + storageClassWithoutEdgeCases() + .map( + c -> + Bucket.Lifecycle.Rule.Action.newBuilder() + .setType("SetStorageClass") + .setStorageClass(c) + .build())); + } + + public Arbitrary rule() { + IntegerArbitrary zeroThroughTen = Arbitraries.integers().between(0, 10); + + Arbitrary conditionIsLive = bool().injectNull(0.25); + Arbitrary conditionAgeDays = Arbitraries.integers().between(0, 100).injectNull(0.25); + Arbitrary conditionNumberOfNewVersions = zeroThroughTen.injectNull(0.25); + Arbitrary conditionCreatedBeforeTime = date().injectNull(0.25); + Arbitrary conditionDaysSinceNoncurrentTime = zeroThroughTen.injectNull(0.25); + Arbitrary conditionNoncurrentTime = date().injectNull(0.25); + Arbitrary conditionDaysSinceCustomTime = zeroThroughTen.injectNull(0.25); + Arbitrary conditionCustomTime = date().injectNull(0.25); + ListArbitrary storageClassMatches = + storageClassWithoutEdgeCases().list().uniqueElements(); + + return Combinators.combine( + action(), + Combinators.combine( + conditionIsLive, + conditionAgeDays, + conditionNumberOfNewVersions, + conditionCreatedBeforeTime, + conditionDaysSinceNoncurrentTime, + conditionNoncurrentTime, + conditionDaysSinceCustomTime, + conditionCustomTime) + .as(Tuple::of), + storageClassMatches) + .as( + (a, ct, s) -> { + Condition.Builder b = Condition.newBuilder(); + ifNonNull(ct.get1(), b::setIsLive); + ifNonNull(ct.get2(), b::setAgeDays); + ifNonNull(ct.get3(), b::setNumNewerVersions); + ifNonNull(ct.get4(), b::setCreatedBefore); + ifNonNull(ct.get5(), b::setDaysSinceNoncurrentTime); + ifNonNull(ct.get6(), b::setNoncurrentTimeBefore); + ifNonNull(ct.get7(), b::setDaysSinceCustomTime); + ifNonNull(ct.get8(), b::setCustomTimeBefore); + b.addAllMatchesStorageClass(s); + return Bucket.Lifecycle.Rule.newBuilder().setAction(a).setCondition(b).build(); + }); + } + + public Arbitrary lifecycle() { + return rule() + .list() + .ofMinSize(0) + .ofMaxSize(1) + .uniqueElements() + .map((r) -> Bucket.Lifecycle.newBuilder().addAllRule(r).build()); + } + + public Arbitrary website() { + // TODO: create a "URLEncodedString we can use here + Arbitrary indexPage = + Arbitraries.strings().all().ofMinLength(1).ofMaxLength(25).injectNull(0.75); + Arbitrary notFoundPage = + Arbitraries.strings().all().ofMinLength(1).ofMaxLength(25).injectNull(0.75); + return Combinators.combine(indexPage, notFoundPage) + .as( + (i, n) -> { + //noinspection ConstantConditions + if (i == null && n == null) { + return null; + } else { + Website.Builder b = Website.newBuilder(); + ifNonNull(i, b::setMainPageSuffix); + ifNonNull(n, b::setNotFoundPage); + return b.build(); + } + }); + } + + public Arbitrary logging() { + Arbitrary loggingBucketName = name(); + Arbitrary loggingPrefix = + Arbitraries.strings() + .all() + .ofMinLength(0) + .ofMaxLength(10) + .injectNull(0.25) + .edgeCases(config -> config.add("")); + return Combinators.combine(loggingBucketName, loggingPrefix) + .as( + (b, p) -> { + Logging.Builder bld = Logging.newBuilder(); + ifNonNull(p, bld::setLogObjectPrefix); + ifNonNull(b, BucketName::toString, bld::setLogBucket); + return bld.build(); + }); + } + + public ListArbitrary cors() { + Arbitrary maxAgeSeconds = + Arbitraries.integers().between(0, OffsetDateTime.MAX.getSecond()); + ListArbitrary methods = + Arbitraries.of("GET", "DELETE", "UPDATE", "PATCH").list().uniqueElements(); + ListArbitrary responseHeaders = + Arbitraries.of("Content-Type", "Origin").list().uniqueElements(); + ListArbitrary origins = Arbitraries.of("*", "google.com").list().uniqueElements(); + return Combinators.combine(methods, responseHeaders, origins, maxAgeSeconds) + .as( + (m, r, o, a) -> + Bucket.Cors.newBuilder() + .addAllMethod(m) + .addAllResponseHeader(r) + .addAllOrigin(o) + .setMaxAgeSeconds(a) + .build()) + .list() + .ofMinSize(0) + .ofMaxSize(10); + } + + public Arbitrary billing() { + return bool().map(b -> Billing.newBuilder().setRequesterPays(b).build()); + } + + public ListArbitrary objectAccessControl() { + return Combinators.combine(accessControl().entity(), accessControl().role(), etag()) + .as( + (entity, role, etag) -> { + ObjectAccessControl.Builder b = entity.newObjectBuilder(); + ifNonNull(role, b::setRole); + ifNonNull(etag, b::setEtag); + return b.build(); + }) + .list() + .ofMinSize(0) + .ofMaxSize(10); + } + + public ListArbitrary bucketAccessControl() { + return Combinators.combine(accessControl().entity(), accessControl().role(), etag()) + .as( + (entity, role, etag) -> { + BucketAccessControl.Builder b = entity.newBucketBuilder(); + ifNonNull(role, b::setRole); + ifNonNull(etag, b::setEtag); + return b.build(); + }) + .list() + .ofMinSize(0) + .ofMaxSize(10); + } + + public Arbitrary uniformBucketLevelAccess() { + return Combinators.combine(bool(), timestamp()) + .as( + (e, l) -> { + Bucket.IamConfig.UniformBucketLevelAccess.Builder ublaBuilder = + Bucket.IamConfig.UniformBucketLevelAccess.newBuilder(); + ublaBuilder.setEnabled(e); + if (e) { + ublaBuilder.setLockTime(l); + } + return ublaBuilder.build(); + }); + } + + public Arbitrary iamConfig() { + Arbitrary pap = Arbitraries.of("enforced", "inherited"); + return Combinators.combine(pap, uniformBucketLevelAccess()) + .as( + (p, u) -> { + Bucket.IamConfig.Builder iamConfigBuilder = Bucket.IamConfig.newBuilder(); + iamConfigBuilder.setUniformBucketLevelAccess(u); + if (u.getEnabled()) { + iamConfigBuilder.setPublicAccessPrevention(p); + } + return iamConfigBuilder.build(); + }); + } + + public Arbitrary encryption() { + return Combinators.combine( + Arbitraries.strings().all().ofMinLength(1).ofMaxLength(1024).injectNull(0.9), + googleManagedEncryptionEnforcementConfig(), + customerManagedEncryptionEnforcementConfig(), + customerSuppliedEncryptionEnforcementConfig()) + .flatAs( + (kmsKey, gmek, cmek, csek) -> { + if (Stream.of(kmsKey, gmek, cmek, csek).allMatch(java.util.Objects::isNull)) { + return Arbitraries.just(null); + } + Encryption.Builder b = Encryption.newBuilder(); + ifNonNull(kmsKey, b::setDefaultKmsKey); + ifNonNull(gmek, b::setGoogleManagedEncryptionEnforcementConfig); + ifNonNull(cmek, b::setCustomerManagedEncryptionEnforcementConfig); + ifNonNull(csek, b::setCustomerSuppliedEncryptionEnforcementConfig); + return Arbitraries.just(b.build()); + }); + } + + public Arbitrary retentionPolicy() { + return Combinators.combine(bool(), duration().injectNull(0.25), timestamp()) + .as( + (locked, duration, effectiveTime) -> { + RetentionPolicy.Builder retentionBuilder = RetentionPolicy.newBuilder(); + ifNonNull(duration, retentionBuilder::setRetentionDuration); + retentionBuilder.setIsLocked(locked); + if (locked) { + retentionBuilder.setEffectiveTime(effectiveTime); + } + return retentionBuilder.build(); + }); + } + + public Arbitrary versioning() { + return bool().map(b -> Versioning.newBuilder().setEnabled(b).build()); + } + + public Arbitrary rpo() { + return Arbitraries.of("DEFAULT", "ASYNC_TURBO", "") + .edgeCases(config -> config.add("")); // denote "" as an edge case + } + + public Arbitrary location() { + return Arbitraries.of( + "US", "US-CENTRAL1", "US-EAST1", "EUROPE-CENTRAL2", "SOUTHAMERICA-EAST1"); + } + + public Arbitrary locationType() { + return Arbitraries.of("region", "dual-region", "multi-region"); + } + + public Arbitrary> labels() { + return objects().customMetadata(); + } + + public Arbitrary ipFilter() { + return Combinators.combine( + Arbitraries.of("Enabled", "Disabled").injectNull(0.33), // mode + publicNetworkSource(), + vpcNetworkSource().list().ofMinSize(1).ofMaxSize(3).injectNull(0.5), + bool().injectNull(0.5), // allow_cross_org_vpcs + bool().injectNull(0.5) // allow_all_service_agent_access + ) + .as( + (mode, pns, vnss, allowCrossOrgVpcs, allowAllServiceAgentAccess) -> { + IpFilter.Builder b = IpFilter.newBuilder(); + ifNonNull(mode, b::setMode); + ifNonNull(pns, b::setPublicNetworkSource); + ifNonNull(vnss, b::addAllVpcNetworkSources); + ifNonNull(allowCrossOrgVpcs, b::setAllowCrossOrgVpcs); + ifNonNull(allowAllServiceAgentAccess, b::setAllowAllServiceAgentAccess); + return b.build(); + }); + } + + public Arbitrary publicNetworkSource() { + return Arbitraries.oneOf(cidr().list().ofMinSize(1).ofMaxSize(3).injectNull(0.5)) + .map( + ranges -> { + PublicNetworkSource.Builder b = PublicNetworkSource.newBuilder(); + ifNonNull(ranges, b::addAllAllowedIpCidrRanges); + return b.build(); + }); + } + + public Arbitrary vpcNetworkSource() { + return Combinators.combine( + networkResource().injectNull(0.25), + cidr().list().ofMinSize(1).ofMaxSize(3).injectNull(0.5)) + .as( + (network, ranges) -> { + VpcNetworkSource.Builder b = VpcNetworkSource.newBuilder(); + ifNonNull(network, b::setNetwork); + ifNonNull(ranges, b::addAllAllowedIpCidrRanges); + return b.build(); + }); + } + + Arbitrary networkResource() { + return Combinators.combine(projectID(), networkName()) + .as( + (projectId, networkName) -> + String.format( + Locale.US, "projects/%s/global/networks/%s", projectId, networkName)); + } + + Arbitrary<@Nullable String> networkName() { + return Arbitraries.strings() + .withCharRange('a', 'z') + .numeric() + .withChars('-') + .ofMinLength(1) + .ofMaxLength(10); + } + + public Arbitrary encryptionEnforcementRestrictionMode() { + return Arbitraries.of("NotRestricted", "FullyRestricted", "NOT_YET_DEFINED"); + } + + public Arbitrary encryptionEnforcementRestrictionModeWithoutEdgeCases() { + return Arbitraries.of("NotRestricted", "FullyRestricted"); + } + + public Arbitrary + googleManagedEncryptionEnforcementConfig() { + return encryptionEnforcementRestrictionMode() + .injectNull(0.9) + .flatMap( + mode -> { + if (mode == null) { + return Arbitraries.just(null); + } + return StorageArbitraries.timestamp() + .injectNull(0.5) + .map( + time -> { + GoogleManagedEncryptionEnforcementConfig.Builder b = + GoogleManagedEncryptionEnforcementConfig.newBuilder(); + ifNonNull(time, b::setEffectiveTime); + b.setRestrictionMode(mode); + return b.build(); + }); + }); + } + + public Arbitrary + customerManagedEncryptionEnforcementConfig() { + return encryptionEnforcementRestrictionMode() + .injectNull(0.9) + .flatMap( + mode -> { + if (mode == null) { + return Arbitraries.just(null); + } + return StorageArbitraries.timestamp() + .injectNull(0.5) + .map( + time -> { + CustomerManagedEncryptionEnforcementConfig.Builder b = + CustomerManagedEncryptionEnforcementConfig.newBuilder(); + ifNonNull(time, b::setEffectiveTime); + b.setRestrictionMode(mode); + return b.build(); + }); + }); + } + + public Arbitrary + customerSuppliedEncryptionEnforcementConfig() { + return encryptionEnforcementRestrictionMode() + .injectNull(0.9) + .flatMap( + mode -> { + if (mode == null) { + return Arbitraries.just(null); + } + return StorageArbitraries.timestamp() + .injectNull(0.5) + .map( + time -> { + CustomerSuppliedEncryptionEnforcementConfig.Builder b = + CustomerSuppliedEncryptionEnforcementConfig.newBuilder(); + ifNonNull(time, b::setEffectiveTime); + b.setRestrictionMode(mode); + return b.build(); + }); + }); + } + } + + public static final class ProjectID { + + private final String value; + + private ProjectID(String value) { + this.value = value; + } + + public String get() { + return value; + } + + public ProjectName toProjectName() { + return ProjectName.of(value); + } + } + + public static final class ProjectNumber { + + private final BigInteger value; + + private ProjectNumber(BigInteger value) { + this.value = value; + } + + public BigInteger get() { + return value; + } + + public ProjectName toProjectName() { + return ProjectName.of(value.toString()); + } + } + + public static Objects objects() { + return Objects.INSTANCE; + } + + public static final class Objects { + private static final Objects INSTANCE = new Objects(); + + private Objects() {} + + /** + * Generated object name based on the rules outlined in https://cloud.google.com/storage/docs/naming-objects#objectnames + */ + public Arbitrary name() { + return Arbitraries.strings() + .all() + .excludeChars('#', '[', ']', '*', '?') + .excludeChars(enumerate(0x7f, 0x84)) + .excludeChars(enumerate(0x86, 0x9f)) + .ofMinLength(1) + .ofMaxLength(1024) + .filter(s -> !s.equals(".")) + .filter(s -> !s.equals("..")) + .filter(s -> !s.startsWith(".well-known/acme-challenge/")); + } + + public Arbitrary objectChecksums() { + return Combinators.combine( + Arbitraries.integers().greaterOrEqual(0).injectNull(0.25), // crc32c + // md5s can be absent for composed objects, increase the nullness factor + Arbitraries.bigIntegers().between(_128_bit_min, _128_bit_max).injectNull(0.55) // md5 + ) + .as( + (crc32c, md5) -> { + ObjectChecksums.Builder b = ObjectChecksums.newBuilder(); + ifNonNull(crc32c, b::setCrc32C); + ifNonNull(md5, StorageArbitraries::md5ToByteString, b::setMd5Hash); + return b.build(); + }) + // make sure we don't yield an empty value, while theoretically possible, it isn't + // interesting from the standpoint of our tests, we explicitly need to test our handling + // of no checksum value being specified + .filter(oc -> oc.hasCrc32C() || !oc.getMd5Hash().isEmpty()); + } + + public Arbitrary customerEncryption() { + return Combinators.combine( + Arbitraries.strings().ofMinLength(1).ofMaxLength(1024), + Arbitraries.strings() + .map(s -> Hashing.sha256().hashString(s, StandardCharsets.UTF_8).asBytes()) + .map(ByteString::copyFrom)) + .as( + (algorithm, key) -> + CustomerEncryption.newBuilder() + .setEncryptionAlgorithm(algorithm) + .setKeySha256Bytes(key) + .build()); + } + + /** + * Custom metadata from https://cloud.google.com/storage/docs/metadata + */ + public Arbitrary> customMetadata() { + // TODO: are we going to need to care about non-url encoded characters? + // Not for grpc itself, but possibly for compatibility tests. + return Arbitraries.maps( + alphaString().ofMinLength(1).ofMaxLength(32), + alphaString().ofMinLength(1).ofMaxLength(128)) + .ofMinSize(0) + .ofMaxSize(15) + .injectNull(0.5); + } + + public ListArbitrary objectAccessControl() { + return buckets().objectAccessControl(); + } + + public Arbitrary objectCustomContextPayload() { + return Combinators.combine( + randomString().ofMinLength(1).ofMaxLength(128), + timestamp().injectNull(0.5), + timestamp().injectNull(0.5)) + .as( + (value, createTime, updateTime) -> { + ObjectCustomContextPayload.Builder builder = + ObjectCustomContextPayload.newBuilder().setValue(value); + if (createTime != null) { + builder.setCreateTime(createTime); + } + if (updateTime != null) { + builder.setUpdateTime(updateTime); + } + return builder.build(); + }); + } + + public Arbitrary objectContexts() { + Arbitrary key = alphaString().ofMinLength(1).ofMaxLength(32); + Arbitrary> customMap = + Arbitraries.maps(key, objectCustomContextPayload()).ofMinSize(0).ofMaxSize(5); + + return customMap + .map(c -> ObjectContexts.newBuilder().putAllCustom(c).build()) + .injectNull(0.5); + } + } + + public static HttpHeaders httpHeaders() { + return HttpHeaders.INSTANCE; + } + + /** + * Fixed-key metadata from https://cloud.google.com/storage/docs/metadata + */ + public static final class HttpHeaders { + private static final HttpHeaders INSTANCE = new HttpHeaders(); + + private HttpHeaders() {} + + public Arbitrary cacheControl() { + return Combinators.combine( + Arbitraries.of("public", "private", "no-cache", "no-store"), + // bound to 10K to ease exhaustion processing + Arbitraries.integers().between(0, 10_000).injectNull(0.5), + Arbitraries.of("no-transform").injectNull(0.5)) + .as( + (visibility, maxAge, transform) -> { + //noinspection ConstantConditions + if (maxAge == null && transform == null) { + return visibility; + } else { + //noinspection ConstantConditions + if (maxAge != null) { + return String.format(Locale.US, "%s, max-age=%d", visibility, maxAge); + } else if (transform != null) { + return String.format(Locale.US, "%s, %s", visibility, transform); + } else { + return String.format( + Locale.US, "%s, max-age=%d, %s", visibility, maxAge, transform); + } + } + }); + } + + public Arbitrary contentDisposition() { + return Arbitraries.of("inline", "attachment;filename=blob.bin").injectNull(0.75); + } + + public Arbitrary contentEncoding() { + return Arbitraries.of("gzip").injectNull(0.5); + } + + public Arbitrary contentLanguage() { + return Arbitraries.of("en", "es", "zh").injectNull(0.75); + } + + public Arbitrary contentType() { + return Arbitraries.of( + "text/plain", + "application/json", + "application/octet-stream", + "application/x-www-form-urlencoded") + .injectNull(0.33); + } + + public Arbitrary customTime() { + return timestamp().injectNull(0.75); + } + } + + public static final class AccessControl { + private static final AccessControl INSTANCE = new AccessControl(); + + private AccessControl() {} + + public Arbitrary id() { + return Arbitraries.shorts().greaterOrEqual((short) 1).map(s -> Short.toString(s)); + } + + /** + * https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls#resource + */ + public Arbitrary role() { + return Arbitraries.of("OWNER", "READER"); + } + + /** + * https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls#resource + */ + public Arbitrary team() { + return Arbitraries.of("owner", "editors", "viewers"); + } + + public Arbitrary entity() { + return Arbitraries.oneOf( + id().map(AclEntity::userId), + Web.emails().map(AclEntity::user), + id().map(AclEntity::groupId), + Web.emails().map(AclEntity::group), + Web.webDomains().map(AclEntity::domain), + projectTeam().map(AclEntity::project), + Arbitraries.just(new PredefinedEntity("allUsers")), + Arbitraries.just(new PredefinedEntity("allAuthenticatedUsers"))); + } + + public Arbitrary projectTeam() { + return Combinators.combine(id(), team()) + .as( + (projectNumber, team) -> + ProjectTeam.newBuilder().setProjectNumber(projectNumber).setTeam(team).build()); + } + + public abstract static class AclEntity { + + private AclEntity() {} + + abstract ObjectAccessControl.Builder newObjectBuilder(); + + abstract BucketAccessControl.Builder newBucketBuilder(); + + static EntityWithId userId(String id) { + return new EntityWithId(id); + } + + static EntityWithoutId user(String email) { + return new EntityWithoutId(String.format(Locale.US, "user-%s", email)); + } + + static EntityWithId groupId(String id) { + return new EntityWithId(id); + } + + static EntityWithoutId group(String email) { + return new EntityWithoutId(String.format(Locale.US, "group-%s", email)); + } + + static EntityWithoutId domain(String email) { + return new EntityWithoutId(String.format(Locale.US, "domain-%s", email)); + } + + static EntityWithoutId project(ProjectTeam projectTeam) { + return new EntityWithoutId( + String.format( + Locale.US, "project-%s-%s", projectTeam.getTeam(), projectTeam.getProjectNumber())); + } + } + + public static final class PredefinedEntity extends AclEntity { + private final String name; + + private PredefinedEntity(String name) { + this.name = name; + } + + @Override + ObjectAccessControl.Builder newObjectBuilder() { + return ObjectAccessControl.newBuilder().setEntity(name); + } + + @Override + BucketAccessControl.Builder newBucketBuilder() { + return BucketAccessControl.newBuilder().setEntity(name); + } + } + + public static final class EntityWithId extends AclEntity { + private final String id; + + private EntityWithId(String id) { + this.id = id; + } + + @Override + ObjectAccessControl.Builder newObjectBuilder() { + return ObjectAccessControl.newBuilder().setId(id); + } + + @Override + BucketAccessControl.Builder newBucketBuilder() { + return BucketAccessControl.newBuilder().setId(id); + } + } + + public static final class EntityWithoutId extends AclEntity { + private final String entity; + + private EntityWithoutId(String entity) { + this.entity = entity; + } + + @Override + ObjectAccessControl.Builder newObjectBuilder() { + return ObjectAccessControl.newBuilder().setEntity(entity); + } + + @Override + BucketAccessControl.Builder newBucketBuilder() { + return BucketAccessControl.newBuilder().setEntity(entity); + } + } + } + + private static char[] enumerate(int lower, int upperInclusive) { + checkArgument(lower <= upperInclusive, "lower <= upperInclusive"); + int length = upperInclusive - lower + 1; + char[] chars = new char[length]; + for (int i = 0; i < length; i++) { + chars[i] = (char) (i + lower); + } + return chars; + } + + /** + * gRPC has nanosecond level precision for timestamps, whereas JSON is limited to millisecond + * precision due to limitations in {@link com.google.api.client.util.DateTime}. + * + *

Define an arbitrary, which will always produce a nanosecond value that is in the range of + * milliseconds. + */ + private static Arbitrary millisecondsAsNanos() { + return Arbitraries.integers().between(0, 999).map(i -> i * 1_000_000); + } + + private static Arbitrary storageClassWithoutEdgeCases() { + return Arbitraries.of( + "STANDARD", + "NEARLINE", + "COLDLINE", + "ARCHIVE", + "MULTI_REGIONAL", + "REGIONAL", + "DURABLE_REDUCED_AVAILABILITY"); + } + + private static ByteString md5ToByteString(BigInteger md5) { + HashCode hashCode = Hashing.md5().hashBytes(md5.toByteArray()); + byte[] bytes = hashCode.asBytes(); + return ByteString.copyFrom(bytes); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/otel/TestExporter.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/otel/TestExporter.java new file mode 100644 index 000000000000..1532fa836b87 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/otel/TestExporter.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.otel; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class TestExporter implements SpanExporter { + + public final List exportedSpans = Collections.synchronizedList(new ArrayList<>()); + + @Override + public CompletableResultCode export(Collection spans) { + exportedSpans.addAll(spans); + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } + + public List getExportedSpans() { + return exportedSpans; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/AuditingHttpTransport.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/AuditingHttpTransport.java new file mode 100644 index 000000000000..edb7d64c14f5 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/AuditingHttpTransport.java @@ -0,0 +1,62 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.http.LowLevelHttpResponse; +import com.google.cloud.Tuple; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public final class AuditingHttpTransport extends HttpTransport { + private final LowLevelHttpResponse response; + private final List> buildRequestCalls; + private final List> addHeaderCalls; + + public AuditingHttpTransport(LowLevelHttpResponse response) { + this.response = response; + this.buildRequestCalls = Collections.synchronizedList(new ArrayList<>()); + this.addHeaderCalls = Collections.synchronizedList(new ArrayList<>()); + } + + public List> getBuildRequestCalls() { + return ImmutableList.copyOf(buildRequestCalls); + } + + public List> getAddHeaderCalls() { + return ImmutableList.copyOf(addHeaderCalls); + } + + @Override + protected LowLevelHttpRequest buildRequest(String method, String url) { + buildRequestCalls.add(Tuple.of(method, url)); + return new LowLevelHttpRequest() { + @Override + public void addHeader(String name, String value) { + addHeaderCalls.add(Tuple.of(name, value)); + } + + @Override + public LowLevelHttpResponse execute() { + return response; + } + }; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/HttpRpcContextTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/HttpRpcContextTest.java new file mode 100644 index 000000000000..ce3f57f257dd --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/spi/v1/HttpRpcContextTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.spi.v1; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.cloud.NoCredentials; +import com.google.cloud.TransportOptions; +import com.google.cloud.Tuple; +import com.google.cloud.WriteChannel; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Optional; +import java.util.UUID; +import org.junit.Test; + +public class HttpRpcContextTest { + @Test + public void testNewInvocationId() { + UUID uuid = UUID.fromString("28220dff-1e8b-4770-9e10-022c2a99d8f3"); + HttpRpcContext testContext = new HttpRpcContext(() -> uuid); + + try { + assertThat(testContext.newInvocationId()).isEqualTo(uuid); + assertThat(testContext.getInvocationId()).isEqualTo(uuid); + // call again to ensure the id is consistent with our supplier + assertThat(testContext.newInvocationId()).isEqualTo(uuid); + assertThat(testContext.getInvocationId()).isEqualTo(uuid); + } finally { + testContext.clearInvocationId(); + } + } + + @Test + public void testInvocationIdIsPassedThrough() { + MockLowLevelHttpResponse response = + new MockLowLevelHttpResponse() + .setContentType("application/json") + .setContent( + "{\n" + + " \"kind\": \"storage#serviceAccount\",\n" + + " \"email_address\":" + + " \"service-234234@gs-project-accounts.iam.gserviceaccount.com\"\n" + + "}\n") + .setStatusCode(200); + AuditingHttpTransport transport = new AuditingHttpTransport(response); + TransportOptions transportOptions = + HttpTransportOptions.newBuilder().setHttpTransportFactory(() -> transport).build(); + Storage service = + StorageOptions.getDefaultInstance().toBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .setTransportOptions(transportOptions) + .build() + .getService(); + service.getServiceAccount("test-project"); + Optional> anyXGoogApiClientWithGcclInvocationId = + transport.getAddHeaderCalls().stream() + .filter(t -> "x-goog-api-client".equals(t.x()) && t.y().contains("gccl-invocation-id/")) + .findFirst(); + assertTrue(anyXGoogApiClientWithGcclInvocationId.isPresent()); + assertThat(transport.getBuildRequestCalls()).hasSize(1); + } + + @Test + public void testInvocationIdNotInSignedURL_v2() throws IOException { + URL signedUrlV2 = + new URL( + "http://www.test.com/test-bucket/test1.txt?GoogleAccessId=testClient-test@test.com&Expires=1553839761&Signature=MJUBXAZ7"); + doTestInvocationIdNotInSignedURL(signedUrlV2); + } + + @Test + public void testInvocationIdNotInSignedURL_v4() throws IOException { + URL signedUrlV4 = + new URL( + "http://www.test.com/test-bucket/test1.txt?X-Goog-Algorithm=&X-Goog-Credential=&X-Goog-Date=&X-Goog-Expires=&X-Goog-SignedHeaders=&X-Goog-Signature=MJUBXAZ7"); + doTestInvocationIdNotInSignedURL(signedUrlV4); + } + + private void doTestInvocationIdNotInSignedURL(URL signedUrl) throws IOException { + MockLowLevelHttpResponse response = + new MockLowLevelHttpResponse() + .setContentType("text/plain") + .setHeaderNames(ImmutableList.of("Location")) + .setHeaderValues(ImmutableList.of("http://test")) + .setStatusCode(201); + AuditingHttpTransport transport = new AuditingHttpTransport(response); + TransportOptions transportOptions = + HttpTransportOptions.newBuilder().setHttpTransportFactory(() -> transport).build(); + Storage service = + StorageOptions.getDefaultInstance().toBuilder() + .setTransportOptions(transportOptions) + .build() + .getService(); + WriteChannel writer = service.writer(signedUrl); + writer.write(ByteBuffer.wrap("hello".getBytes(StandardCharsets.UTF_8))); + Optional> anyXGoogApiClientWithGcclInvocationId = + transport.getAddHeaderCalls().stream() + .filter(t -> "x-goog-api-client".equals(t.x()) && t.y().contains("gccl-invocation-id/")) + .findFirst(); + assertFalse(anyXGoogApiClientWithGcclInvocationId.isPresent()); + assertThat(transport.getBuildRequestCalls()).hasSize(1); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/RemoteStorageHelperTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/RemoteStorageHelperTest.java new file mode 100644 index 000000000000..877139be4b86 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/RemoteStorageHelperTest.java @@ -0,0 +1,289 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.testing; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.paging.Page; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobListOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class RemoteStorageHelperTest { + + private static final String BUCKET_NAME = "bucket-name"; + private static final String PROJECT_ID = "project-id"; + private static final String JSON_KEY = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\"\n" + + "}"; + private static final InputStream JSON_KEY_STREAM = new ByteArrayInputStream(JSON_KEY.getBytes()); + private static final StorageException RETRYABLE_EXCEPTION = new StorageException(409, ""); + private static final StorageException FATAL_EXCEPTION = new StorageException(500, ""); + private static final String BLOB_NAME2 = "n2"; + private static final BlobId BLOB_ID1 = BlobId.of(BUCKET_NAME, "n1"); + private static final BlobId BLOB_ID2 = BlobId.of(BUCKET_NAME, BLOB_NAME2); + + private Blob blob1; + private Blob blob2; + private List blobList; + private Page blobPage; + + @Before + public void setUp() { + blob1 = Mockito.mock(Blob.class); + blob2 = Mockito.mock(Blob.class); + blobList = ImmutableList.of(blob1, blob2); + blobPage = + new Page() { + + @Override + public boolean hasNextPage() { + return true; + } + + @Override + public String getNextPageToken() { + return "nextPageCursor"; + } + + @Override + public Page getNextPage() { + return null; + } + + @Override + public Iterable getValues() { + return blobList; + } + + @Override + public Iterable iterateAll() { + return blobList; + } + }; + } + + @Test + public void testForceDelete() throws InterruptedException, ExecutionException { + Storage storageMock = Mockito.mock(Storage.class); + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(Collections.nCopies(2, true)); + when(storageMock.list(BUCKET_NAME, BlobListOption.versions(true))).thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME)).thenReturn(true); + assertTrue(RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME, 5, TimeUnit.SECONDS)); + + verify(blob1).getBlobId(); + verify(blob2).getBlobId(); + verify(storageMock).delete(ids); + verify(storageMock).list(BUCKET_NAME, BlobListOption.versions(true)); + verify(storageMock).delete(BUCKET_NAME); + } + + @Test + public void testForceDeleteTimeout() throws InterruptedException, ExecutionException { + Storage storageMock = Mockito.mock(Storage.class); + + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(Collections.nCopies(2, true)); + + when(storageMock.list(BUCKET_NAME, BlobListOption.versions(true))).thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME)).thenThrow(RETRYABLE_EXCEPTION); + assertFalse( + RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME, 50, TimeUnit.MICROSECONDS)); + } + + @Test + public void testForceDeleteFail() throws InterruptedException, ExecutionException { + Storage storageMock = Mockito.mock(Storage.class); + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(Collections.nCopies(2, true)); + when(storageMock.list(BUCKET_NAME, BlobListOption.versions(true))).thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME)).thenThrow(FATAL_EXCEPTION); + try { + RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME, 5, TimeUnit.SECONDS); + Assert.fail(); + } catch (ExecutionException ex) { + assertNotNull(ex.getMessage()); + } finally { + verify(blob1).getBlobId(); + verify(blob2).getBlobId(); + verify(storageMock).delete(ids); + verify(storageMock).list(BUCKET_NAME, BlobListOption.versions(true)); + verify(storageMock).delete(BUCKET_NAME); + } + } + + @Test + public void testForceDeleteNoTimeout() { + Storage storageMock = Mockito.mock(Storage.class); + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(Collections.nCopies(2, true)); + when(storageMock.list(BUCKET_NAME, BlobListOption.versions(true))).thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME)).thenReturn(true); + RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME); + + verify(blob1).getBlobId(); + verify(blob2).getBlobId(); + verify(storageMock).delete(ids); + verify(storageMock).list(BUCKET_NAME, BlobListOption.versions(true)); + verify(storageMock).delete(BUCKET_NAME); + } + + @Test + public void testForceDeleteNoTimeoutFail() { + Storage storageMock = Mockito.mock(Storage.class); + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(Collections.nCopies(2, true)); + when(storageMock.list(BUCKET_NAME, BlobListOption.versions(true))).thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME)).thenThrow(FATAL_EXCEPTION); + try { + RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME); + Assert.fail(); + } catch (StorageException ex) { + assertNotNull(ex.getMessage()); + } finally { + verify(blob1).getBlobId(); + verify(blob2).getBlobId(); + verify(storageMock).delete(ids); + verify(storageMock).list(BUCKET_NAME, BlobListOption.versions(true)); + verify(storageMock).delete(BUCKET_NAME); + } + } + + @Test + public void testForceDeleteRetriesWithUserProject() throws Exception { + final String USER_PROJECT = "user-project"; + Storage storageMock = Mockito.mock(Storage.class); + when(blob1.getBlobId()).thenReturn(BLOB_ID1); + when(blob2.getBlobId()).thenReturn(BLOB_ID2); + ArrayList ids = new ArrayList<>(); + ids.add(BLOB_ID1); + ids.add(BLOB_ID2); + when(storageMock.delete(ids)).thenReturn(ImmutableList.of(Boolean.TRUE, Boolean.FALSE)); + when(storageMock.delete( + BUCKET_NAME, BLOB_NAME2, Storage.BlobSourceOption.userProject(USER_PROJECT))) + .thenReturn(true); + when(storageMock.list( + BUCKET_NAME, BlobListOption.versions(true), BlobListOption.userProject(USER_PROJECT))) + .thenReturn(blobPage); + when(storageMock.delete(BUCKET_NAME, Storage.BucketSourceOption.userProject(USER_PROJECT))) + .thenReturn(true); + try { + RemoteStorageHelper.forceDelete(storageMock, BUCKET_NAME, 5, TimeUnit.SECONDS, USER_PROJECT); + } finally { + verify(blob1).getBlobId(); + verify(blob2).getBlobId(); + verify(storageMock).delete(ids); + verify(storageMock) + .delete(BUCKET_NAME, BLOB_NAME2, Storage.BlobSourceOption.userProject(USER_PROJECT)); + verify(storageMock) + .list( + BUCKET_NAME, BlobListOption.versions(true), BlobListOption.userProject(USER_PROJECT)); + verify(storageMock).delete(BUCKET_NAME, Storage.BucketSourceOption.userProject(USER_PROJECT)); + } + } + + @Test + public void testCreateFromStream() { + RemoteStorageHelper helper = RemoteStorageHelper.create(PROJECT_ID, JSON_KEY_STREAM); + StorageOptions options = helper.getOptions(); + assertEquals(PROJECT_ID, options.getProjectId()); + assertEquals(60000, ((HttpTransportOptions) options.getTransportOptions()).getConnectTimeout()); + assertEquals(60000, ((HttpTransportOptions) options.getTransportOptions()).getReadTimeout()); + assertEquals(10, options.getRetrySettings().getMaxAttempts()); + assertEquals(Duration.ofMillis(30000), options.getRetrySettings().getMaxRetryDelayDuration()); + assertEquals(Duration.ofMillis(120000), options.getRetrySettings().getTotalTimeoutDuration()); + assertEquals(Duration.ofMillis(250), options.getRetrySettings().getInitialRetryDelayDuration()); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/StorageRpcTestBaseTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/StorageRpcTestBaseTest.java new file mode 100644 index 000000000000..7feef9f18387 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/testing/StorageRpcTestBaseTest.java @@ -0,0 +1,612 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.testing; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.BucketAccessControl; +import com.google.api.services.storage.model.HmacKey; +import com.google.api.services.storage.model.HmacKeyMetadata; +import com.google.api.services.storage.model.Notification; +import com.google.api.services.storage.model.ObjectAccessControl; +import com.google.api.services.storage.model.Policy; +import com.google.api.services.storage.model.ServiceAccount; +import com.google.api.services.storage.model.StorageObject; +import com.google.api.services.storage.model.TestIamPermissionsResponse; +import com.google.cloud.Tuple; +import com.google.cloud.storage.spi.v1.RpcBatch; +import com.google.cloud.storage.spi.v1.StorageRpc; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class StorageRpcTestBaseTest { + + private Callable rpc; + + private static final StorageRpc STORAGE_RPC = new StorageRpcTestBase(); + private static final Map OPTIONS = new HashMap<>(); + private static final Bucket BUCKET = new Bucket().setName("fake-bucket"); + private static final byte[] BYTES = {0, 1, 2, 3, 4, 5, 6, 7}; + private static final StorageObject OBJECT = + new StorageObject().setName("object name").setBucket("bucket name"); + + @Before + public void setUp() { + rpc = null; + } + + @After + public void tearDown() throws Exception { + assertNotNull(rpc); + try { + rpc.call(); + fail("UnsupportedOperationException expected"); + } catch (UnsupportedOperationException e) { + // expected + } + } + + @Test + public void testCreateBucket() { + rpc = + new Callable() { + @Override + public Bucket call() { + return STORAGE_RPC.create(BUCKET, OPTIONS); + } + }; + } + + @Test + public void testCreateObject() { + rpc = + new Callable() { + @Override + public StorageObject call() { + return STORAGE_RPC.create(OBJECT, new ByteArrayInputStream(BYTES), OPTIONS); + } + }; + } + + @Test + public void testList() { + rpc = + new Callable>>() { + @Override + public Tuple> call() { + return STORAGE_RPC.list(OPTIONS); + } + }; + } + + @Test + public void testListBucket() { + rpc = + new Callable>>() { + @Override + public Tuple> call() { + return STORAGE_RPC.list(BUCKET.getName(), OPTIONS); + } + }; + } + + @Test + public void testGetBucket() { + rpc = + new Callable() { + @Override + public Bucket call() { + return STORAGE_RPC.get(BUCKET, OPTIONS); + } + }; + } + + @Test + public void testGetObject() { + rpc = + new Callable() { + @Override + public StorageObject call() { + return STORAGE_RPC.get(OBJECT, OPTIONS); + } + }; + } + + @Test + public void testPatchBucket() { + rpc = + new Callable() { + @Override + public Bucket call() { + return STORAGE_RPC.patch(BUCKET, OPTIONS); + } + }; + } + + @Test + public void testPatchObject() { + rpc = + new Callable() { + @Override + public StorageObject call() { + return STORAGE_RPC.patch(OBJECT, OPTIONS); + } + }; + } + + @Test + public void testDeleteBucket() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.delete(BUCKET, OPTIONS); + } + }; + } + + @Test + public void testDeleteObject() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.delete(OBJECT, OPTIONS); + } + }; + } + + @Test + public void testCreateBatch() { + rpc = + new Callable() { + @Override + public RpcBatch call() { + return STORAGE_RPC.createBatch(); + } + }; + } + + @Test + public void testCompose() { + rpc = + new Callable() { + @Override + public StorageObject call() { + return STORAGE_RPC.compose(null, OBJECT, OPTIONS); + } + }; + } + + @Test + public void testLoad() { + rpc = + new Callable() { + @Override + public byte[] call() { + return STORAGE_RPC.load(OBJECT, OPTIONS); + } + }; + } + + @Test + public void testReadBytes() { + rpc = + new Callable>() { + @Override + public Tuple call() { + return STORAGE_RPC.read(OBJECT, OPTIONS, 0, 0); + } + }; + } + + @Test + public void testReadOutputStream() { + rpc = + new Callable() { + @Override + public Long call() { + return STORAGE_RPC.read(OBJECT, OPTIONS, 0, new ByteArrayOutputStream(100)); + } + }; + } + + @Test + public void testOpenObject() { + rpc = + new Callable() { + @Override + public String call() { + return STORAGE_RPC.open(OBJECT, OPTIONS); + } + }; + } + + @Test + public void testOpenSignedURL() { + rpc = + new Callable() { + @Override + public String call() { + return STORAGE_RPC.open("signedURL"); + } + }; + } + + @Test + public void testWrite() { + rpc = + new Callable() { + @Override + public Void call() { + STORAGE_RPC.write("uploadId", new byte[10], 1, 2L, 3, false); + return null; + } + }; + } + + @Test + public void testOpenRewrite() { + rpc = + new Callable() { + @Override + public StorageRpc.RewriteResponse call() { + return STORAGE_RPC.openRewrite(null); + } + }; + } + + @Test + public void testContinueRewrite() { + rpc = + new Callable() { + @Override + public StorageRpc.RewriteResponse call() { + return STORAGE_RPC.continueRewrite(null); + } + }; + } + + @Test + public void testGetAclBucket() { + rpc = + new Callable() { + @Override + public BucketAccessControl call() { + return STORAGE_RPC.getAcl("bucket", "entity", OPTIONS); + } + }; + } + + @Test + public void testGetAclObject() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.getAcl("bucket", "object", 1L, "entity"); + } + }; + } + + @Test + public void testDeleteAclBucket() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.deleteAcl("bucketName", "entity", OPTIONS); + } + }; + } + + @Test + public void testDeleteAclObject() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.deleteAcl("bucketName", "object", 0L, "entity"); + } + }; + } + + @Test + public void testCreateAclBucket() { + rpc = + new Callable() { + @Override + public BucketAccessControl call() { + return STORAGE_RPC.createAcl(null, OPTIONS); + } + }; + } + + @Test + public void testCreateAclObject() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.createAcl(null); + } + }; + } + + @Test + public void testPatchAclBucket() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.createAcl(null); + } + }; + } + + @Test + public void testPatchAclObject() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.patchAcl(null); + } + }; + } + + @Test + public void testListAclsBucket() { + rpc = + new Callable>() { + @Override + public List call() { + return STORAGE_RPC.listAcls("BUCKET_NAME", OPTIONS); + } + }; + } + + @Test + public void testListAclsObject() { + rpc = + new Callable>() { + @Override + public List call() { + return STORAGE_RPC.listAcls("BUCKET_NAME", "OBJECT_NAME", 100L); + } + }; + } + + @Test + public void testCreateHmacKey() { + rpc = + new Callable() { + @Override + public HmacKey call() { + return STORAGE_RPC.createHmacKey("account", OPTIONS); + } + }; + } + + @Test + public void testListHmacKeys() { + rpc = + new Callable>>() { + @Override + public Tuple> call() { + return STORAGE_RPC.listHmacKeys(OPTIONS); + } + }; + } + + @Test + public void testUpdateHmacKey() { + rpc = + new Callable() { + @Override + public HmacKeyMetadata call() { + return STORAGE_RPC.updateHmacKey(null, OPTIONS); + } + }; + } + + @Test + public void testGetHmacKey() { + rpc = + new Callable() { + @Override + public HmacKeyMetadata call() { + return STORAGE_RPC.getHmacKey("account", OPTIONS); + } + }; + } + + @Test + public void testDeleteHmacKey() { + rpc = + new Callable() { + @Override + public Void call() { + STORAGE_RPC.deleteHmacKey(null, OPTIONS); + return null; + } + }; + } + + @Test + public void testGetDefaultAcl() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.getDefaultAcl("bucket", "entity"); + } + }; + } + + @Test + public void testDeleteDefaultAcl() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.deleteDefaultAcl("bucket", "entity"); + } + }; + } + + @Test + public void testCreateDefaultAcl() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.createDefaultAcl(null); + } + }; + } + + @Test + public void testPatchDefaultAcl() { + rpc = + new Callable() { + @Override + public ObjectAccessControl call() { + return STORAGE_RPC.patchDefaultAcl(null); + } + }; + } + + @Test + public void testListDefaultAcls() { + rpc = + new Callable>() { + @Override + public List call() { + return STORAGE_RPC.listDefaultAcls("bucket"); + } + }; + } + + @Test + public void testGetIamPolicy() { + rpc = + new Callable() { + @Override + public Policy call() { + return STORAGE_RPC.getIamPolicy("bucket", OPTIONS); + } + }; + } + + @Test + public void testSetIamPolicy() { + rpc = + new Callable() { + @Override + public Policy call() { + return STORAGE_RPC.setIamPolicy("bucket", null, OPTIONS); + } + }; + } + + @Test + public void testTestIamPermissions() { + rpc = + new Callable() { + @Override + public TestIamPermissionsResponse call() { + return STORAGE_RPC.testIamPermissions("bucket", null, OPTIONS); + } + }; + } + + @Test + public void testDeleteNotification() { + rpc = + new Callable() { + @Override + public Boolean call() { + return STORAGE_RPC.deleteNotification("bucket", "entity"); + } + }; + } + + @Test + public void testListNotifications() { + rpc = + new Callable>() { + @Override + public List call() { + return STORAGE_RPC.listNotifications("bucket"); + } + }; + } + + @Test + public void testCreateNotification() { + rpc = + new Callable() { + @Override + public Notification call() { + return STORAGE_RPC.createNotification("bucket", null); + } + }; + } + + @Test + public void testGetNotification() { + rpc = + new Callable() { + @Override + public Notification call() { + return STORAGE_RPC.getNotification("bucket", "notification"); + } + }; + } + + @Test + public void testLockRetentionPolicy() { + rpc = + new Callable() { + @Override + public Bucket call() { + return STORAGE_RPC.lockRetentionPolicy(BUCKET, OPTIONS); + } + }; + } + + @Test + public void testGetServiceAccount() { + rpc = + new Callable() { + @Override + public ServiceAccount call() { + return STORAGE_RPC.getServiceAccount("project"); + } + }; + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerConfigTestingInstances.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerConfigTestingInstances.java new file mode 100644 index 000000000000..da89ada5ae1c --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerConfigTestingInstances.java @@ -0,0 +1,36 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import com.google.cloud.storage.StorageOptions; + +public final class TransferManagerConfigTestingInstances { + private TransferManagerConfigTestingInstances() {} + + public static TransferManagerConfig defaults() { + return defaults(StorageOptions.newBuilder().build()); + } + + public static TransferManagerConfig defaults(StorageOptions options) { + return TransferManagerConfig.newBuilder() + .setAllowDivideAndConquerDownload(false) + .setMaxWorkers(5) + .setPerWorkerBufferSize(512 * 1024) + .setStorageOptions(options) + .build(); + } +} diff --git a/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerTest.java b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerTest.java new file mode 100644 index 000000000000..cc489ca07afc --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/java/com/google/cloud/storage/transfermanager/TransferManagerTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.transfermanager; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.transfermanager.ParallelUploadConfig.UploadBlobInfoFactory; +import java.util.function.Function; +import org.junit.Test; + +public final class TransferManagerTest { + + @Test + public void uploadBlobInfoFactory_prefixObjectNames_leadingSlash() { + UploadBlobInfoFactory factory = UploadBlobInfoFactory.prefixObjectNames("asdf"); + + BlobInfo info = factory.apply("bucket", "/f/i/l/e/n/a/m/e.txt"); + assertThat(info.getBucket()).isEqualTo("bucket"); + assertThat(info.getName()).isEqualTo("asdf/f/i/l/e/n/a/m/e.txt"); + } + + @Test + public void uploadBlobInfoFactory_prefixObjectNames() { + UploadBlobInfoFactory factory = UploadBlobInfoFactory.prefixObjectNames("asdf"); + + BlobInfo info = factory.apply("bucket", "n/a/m/e.txt"); + assertThat(info.getBucket()).isEqualTo("bucket"); + assertThat(info.getName()).isEqualTo("asdf/n/a/m/e.txt"); + } + + @Test + public void uploadBlobInfoFactory_transformFileName() { + UploadBlobInfoFactory factory = + UploadBlobInfoFactory.transformFileName( + Function.identity().andThen(s -> s + "|").compose(s -> "|" + s)); + + BlobInfo info = factory.apply("bucket", "/e.txt"); + assertThat(info.getBucket()).isEqualTo("bucket"); + assertThat(info.getName()).isEqualTo("|/e.txt|"); + } + + @Test + public void uploadBlobInfoFactory_default_doesNotModify() { + UploadBlobInfoFactory factory = UploadBlobInfoFactory.defaultInstance(); + + BlobInfo info = factory.apply("bucket", "/e.txt"); + assertThat(info.getBucket()).isEqualTo("bucket"); + assertThat(info.getName()).isEqualTo("/e.txt"); + } +} diff --git a/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/native-image.properties b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/native-image.properties new file mode 100644 index 000000000000..dd22e6513f50 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/native-image.properties @@ -0,0 +1,8 @@ +# The JUnitFeature which is brought in by Graal explicitly initializes +# Parameterized at image build time. This causes ParallelParameterized and +# subsequently com.google.cloud.storage.conformance.retry.ITRetryConformanceTest +# and other classes ITRetryConformanceTest references to also be initialized at +# build time. Initializing these classes explicitly at build time results in a +# successful build. +Args = \ + --initialize-at-build-time=net.jqwik diff --git a/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json new file mode 100644 index 000000000000..500a610c113d --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/reflect-config.json @@ -0,0 +1,1518 @@ +[ + { + "name":"org.apache.commons.logging.LogFactory", + "allDeclaredFields":true, + "allDeclaredMethods":true, + "allDeclaredConstructors": true + }, + { + "name":"org.apache.commons.logging.impl.Jdk14Logger", + "methods":[{"name":"","parameterTypes":["java.lang.String"] }] + }, + { + "name":"org.apache.commons.logging.impl.LogFactoryImpl", + "allDeclaredFields":true, + "allDeclaredMethods":true, + "methods":[{"name":"","parameterTypes":[] }] + }, + { "name": "com.google.protobuf.GeneratedMessageV3", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "com.google.protobuf.GeneratedMessageV3$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.AltsContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.AltsContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.Endpoint", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.Endpoint$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakeProtocol", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerReq", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerReq$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerResp", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerResp$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerResult", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerResult$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.HandshakerStatus$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.Identity", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.Identity$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.NetworkProtocol", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.NextHandshakeMessageReq", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.NextHandshakeMessageReq$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.RpcProtocolVersions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.RpcProtocolVersions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.RpcProtocolVersions$Version", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.RpcProtocolVersions$Version$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.SecurityLevel", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.ServerHandshakeParameters", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.ServerHandshakeParameters$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.StartClientHandshakeReq", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.StartClientHandshakeReq$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.StartServerHandshakeReq", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.alts.internal.StartServerHandshakeReq$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Address", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Address$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Address$Type", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.ClientHeader", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.ClientHeader$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.GrpcLogEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.GrpcLogEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.GrpcLogEntry$EventType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.GrpcLogEntry$Logger", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Message", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Message$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Metadata", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Metadata$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.MetadataEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.MetadataEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.ServerHeader", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.ServerHeader$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Trailer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.binarylog.v1.Trailer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$OtherAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$OtherAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$TcpIpAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$TcpIpAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$UdsAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Address$UdsAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Channel", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Channel$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelConnectivityState", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelConnectivityState$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelConnectivityState$State", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelData", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelData$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelRef", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelRef$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelTrace", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelTrace$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelTraceEvent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelTraceEvent$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ChannelTraceEvent$Severity", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetChannelRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetChannelRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetChannelResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetChannelResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerSocketsRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerSocketsRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerSocketsResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServerSocketsResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServersRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServersRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServersResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetServersResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSocketRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSocketRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSocketResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSocketResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSubchannelRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSubchannelRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSubchannelResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetSubchannelResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetTopChannelsRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetTopChannelsRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetTopChannelsResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.GetTopChannelsResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security$OtherSecurity", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security$OtherSecurity$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security$Tls", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Security$Tls$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Server", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Server$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ServerData", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ServerData$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ServerRef", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.ServerRef$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Socket", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Socket$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketData", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketData$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOption", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOption$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionLinger", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionLinger$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionTcpInfo", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionTcpInfo$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionTimeout", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketOptionTimeout$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketRef", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SocketRef$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Subchannel", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.Subchannel$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SubchannelRef", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.channelz.v1.SubchannelRef$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.health.v1.HealthCheckRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.health.v1.HealthCheckRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.health.v1.HealthCheckResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.health.v1.HealthCheckResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.health.v1.HealthCheckResponse$ServingStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ClientStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ClientStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ClientStatsPerToken", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ClientStatsPerToken$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.FallbackResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.FallbackResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.InitialLoadBalanceRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.InitialLoadBalanceRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.InitialLoadBalanceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.InitialLoadBalanceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.LoadBalanceRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.LoadBalanceRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.LoadBalanceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.LoadBalanceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.Server", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.Server$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ServerList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lb.v1.ServerList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder$ExtraKeys", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder$ExtraKeys$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder$Name", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.GrpcKeyBuilder$Name$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.HttpKeyBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.HttpKeyBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.NameMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.NameMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupClusterSpecifier", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupClusterSpecifier$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupRequest$Reason", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.lookup.v1.RouteLookupResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ErrorResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ErrorResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ExtensionNumberResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ExtensionNumberResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ExtensionRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ExtensionRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.FileDescriptorResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.FileDescriptorResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ListServiceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ListServiceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServerReflectionRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServerReflectionRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServerReflectionResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServerReflectionResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServiceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1.ServiceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ErrorResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ErrorResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ExtensionNumberResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ExtensionNumberResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ExtensionRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ExtensionRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.FileDescriptorResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.FileDescriptorResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ListServiceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ListServiceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServerReflectionRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServerReflectionRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServerReflectionResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServerReflectionResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServiceResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.reflection.v1alpha.ServiceResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.udpa.udpa.type.v1.TypedStruct", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.udpa.udpa.type.v1.TypedStruct$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.Authority", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.Authority$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CidrRange", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CidrRange$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CollectionEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CollectionEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CollectionEntry$InlineEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.CollectionEntry$InlineEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ContextParams", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ContextParams$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceLocator", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceLocator$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceLocator$Directive", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceLocator$Directive$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceLocator$Scheme", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceName", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.ResourceName$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.TypedExtensionConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.core.v3.TypedExtensionConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.data.orca.v3.OrcaLoadReport", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.data.orca.v3.OrcaLoadReport$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.service.orca.v3.OrcaLoadReportRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.service.orca.v3.OrcaLoadReportRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.CelMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.CelMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.HttpAttributesCelMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.HttpAttributesCelMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.ListStringMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.ListStringMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$FieldMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$FieldMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate$PredicateList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate$PredicateList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate$SinglePredicate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherList$Predicate$SinglePredicate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherTree", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherTree$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherTree$MatchMap", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$MatcherTree$MatchMap$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$OnMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.Matcher$OnMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.RegexMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.RegexMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.RegexMatcher$GoogleRE2", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.RegexMatcher$GoogleRE2$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.StringMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.matcher.v3.StringMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.CelExpression", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.CelExpression$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.CelExtractString", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.CelExtractString$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.TypedStruct", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.github.xds.type.v3.TypedStruct$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.CheckedExpr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.CheckedExpr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Constant", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Constant$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$FunctionDecl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$FunctionDecl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$FunctionDecl$Overload", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$FunctionDecl$Overload$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$IdentDecl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Decl$IdentDecl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Call", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Call$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Comprehension", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Comprehension$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateStruct", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateStruct$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateStruct$Entry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$CreateStruct$Entry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Ident", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Ident$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Select", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Expr$Select$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.ParsedExpr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.ParsedExpr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Reference", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Reference$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.SourceInfo", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.SourceInfo$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.SourcePosition", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.SourcePosition$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$AbstractType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$AbstractType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$FunctionType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$FunctionType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$ListType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$ListType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$MapType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$MapType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$PrimitiveType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.com.google.api.expr.v1alpha1.Type$WellKnownType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.CheckedExpr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.CheckedExpr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Constant", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Constant$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$FunctionDecl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$FunctionDecl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$FunctionDecl$Overload", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$FunctionDecl$Overload$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$IdentDecl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Decl$IdentDecl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Call", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Call$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Comprehension", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Comprehension$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateStruct", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateStruct$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateStruct$Entry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$CreateStruct$Entry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Ident", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Ident$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Select", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Expr$Select$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.ParsedExpr", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.ParsedExpr$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Reference", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Reference$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Extension", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Extension$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Extension$Component", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Extension$Version", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.SourceInfo$Extension$Version$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$AbstractType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$AbstractType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$FunctionType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$FunctionType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$ListType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$ListType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$MapType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$MapType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$PrimitiveType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.dev.cel.expr.Type$WellKnownType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.BootstrapConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.BootstrapConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClientResourceStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump$DynamicCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump$DynamicCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump$StaticCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ClustersConfigDump$StaticCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EcdsConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EcdsConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EcdsConfigDump$EcdsFilterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EcdsConfigDump$EcdsFilterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump$DynamicEndpointConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump$DynamicEndpointConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump$StaticEndpointConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.EndpointsConfigDump$StaticEndpointConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$DynamicListener", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$DynamicListener$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$DynamicListenerState", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$DynamicListenerState$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$StaticListener", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ListenersConfigDump$StaticListener$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump$DynamicRouteConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump$DynamicRouteConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump$StaticRouteConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.RoutesConfigDump$StaticRouteConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump$DynamicScopedRouteConfigs", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump$DynamicScopedRouteConfigs$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump$InlineScopedRouteConfigs", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.ScopedRoutesConfigDump$InlineScopedRouteConfigs$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump$DynamicSecret", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump$DynamicSecret$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump$StaticSecret", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.SecretsConfigDump$StaticSecret$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.UpdateFailureState", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.admin.v3.UpdateFailureState$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AccessLog", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AccessLog$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AccessLogFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AccessLogFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AndFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.AndFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ComparisonFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ComparisonFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ComparisonFilter$Op", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.DurationFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.DurationFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ExtensionFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ExtensionFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.GrpcStatusFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.GrpcStatusFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.GrpcStatusFilter$Status", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.HeaderFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.HeaderFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.LogTypeFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.LogTypeFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.MetadataFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.MetadataFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.NotHealthCheckFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.NotHealthCheckFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.OrFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.OrFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ResponseFlagFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.ResponseFlagFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.RuntimeFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.RuntimeFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.StatusCodeFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.StatusCodeFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.TraceableFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.accesslog.v3.TraceableFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Admin", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Admin$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$ApplicationLogConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$ApplicationLogConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$ApplicationLogConfig$LogFormat", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$ApplicationLogConfig$LogFormat$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$DeferredStatOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$DeferredStatOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$DynamicResources", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$DynamicResources$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$GrpcAsyncClientManagerConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$GrpcAsyncClientManagerConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$StaticResources", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Bootstrap$StaticResources$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.ClusterManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.ClusterManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.ClusterManager$OutlierDetection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.ClusterManager$OutlierDetection$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.CustomInlineHeader", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.CustomInlineHeader$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.CustomInlineHeader$InlineHeaderType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.FatalAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.FatalAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.LayeredRuntime", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.LayeredRuntime$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.MemoryAllocatorManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.MemoryAllocatorManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Runtime", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Runtime$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$AdminLayer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$AdminLayer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$DiskLayer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$DiskLayer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$RtdsLayer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.RuntimeLayer$RtdsLayer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdog", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdog$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdog$WatchdogAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdog$WatchdogAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdog$WatchdogAction$WatchdogEvent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdogs", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.bootstrap.v3.Watchdogs$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers$Thresholds", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers$Thresholds$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers$Thresholds$RetryBudget", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers$Thresholds$RetryBudget$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$ClusterProtocolSelection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$ConsistentHashingLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$ConsistentHashingLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$LocalityWeightedLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$LocalityWeightedLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$ZoneAwareLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CommonLbConfig$ZoneAwareLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CustomClusterType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$CustomClusterType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$DiscoveryType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$DnsLookupFamily", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$EdsClusterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$EdsClusterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$LbSubsetFallbackPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$LbSubsetMetadataFallbackPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$LbSubsetSelector", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$LbSubsetSelector$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LbSubsetConfig$LbSubsetSelector$LbSubsetSelectorFallbackPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LeastRequestLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$LeastRequestLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$MaglevLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$MaglevLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$OriginalDstLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$OriginalDstLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$PreconnectPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$PreconnectPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RefreshRate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RefreshRate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RingHashLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RingHashLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RingHashLbConfig$HashFunction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RoundRobinLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$RoundRobinLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$SlowStartConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$SlowStartConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$TransportSocketMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Cluster$TransportSocketMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.ClusterCollection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.ClusterCollection$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Filter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.Filter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy$Policy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy$Policy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.OutlierDetection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.OutlierDetection$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.TrackClusterStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.TrackClusterStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.UpstreamConnectionOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.UpstreamConnectionOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.UpstreamConnectionOptions$FirstAddressFamilyVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.UpstreamConnectionOptions$HappyEyeballsConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.cluster.v3.UpstreamConnectionOptions$HappyEyeballsConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Address", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Address$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AggregatedConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AggregatedConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AlternateProtocolsCacheOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AlternateProtocolsCacheOptions$AlternateProtocolsCacheEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AlternateProtocolsCacheOptions$AlternateProtocolsCacheEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AlternateProtocolsCacheOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ApiConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ApiConfigSource$ApiType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ApiConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ApiVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AsyncDataSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.AsyncDataSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BackoffStrategy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BackoffStrategy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BindConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BindConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BuildVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.BuildVersion$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.CidrRange", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.CidrRange$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ControlPlane", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ControlPlane$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DataSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DataSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DnsResolutionConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DnsResolutionConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DnsResolverOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.DnsResolverOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.EnvoyInternalAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.EnvoyInternalAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.EventServiceConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.EventServiceConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Extension", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Extension$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ExtensionConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ExtensionConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ExtraSourceAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ExtraSourceAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$EnvoyGrpc", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$EnvoyGrpc$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$GoogleIAMCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$GoogleIAMCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$MetadataCredentialsFromPlugin", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$MetadataCredentialsFromPlugin$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$ServiceAccountJWTAccessCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$ServiceAccountJWTAccessCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$StsService", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$CallCredentials$StsService$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelArgs", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelArgs$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelArgs$Value", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelArgs$Value$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$ChannelCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$GoogleLocalCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$GoogleLocalCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$SslCredentials", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.GrpcService$GoogleGrpc$SslCredentials$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderMap", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderMap$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderValue", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderValue$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderValueOption", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderValueOption$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HeaderValueOption$HeaderAppendAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$CustomHealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$CustomHealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$GrpcHealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$GrpcHealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$HttpHealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$HttpHealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$Payload", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$Payload$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$RedisHealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$RedisHealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$TcpHealthCheck", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$TcpHealthCheck$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$TlsOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthCheck$TlsOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthStatusSet", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HealthStatusSet$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions$HeaderKeyFormat", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions$HeaderKeyFormat$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions$HeaderKeyFormat$ProperCaseWords", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http1ProtocolOptions$HeaderKeyFormat$ProperCaseWords$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http2ProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http2ProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http2ProtocolOptions$SettingsParameter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http2ProtocolOptions$SettingsParameter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http3ProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Http3ProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpProtocolOptions$HeadersWithUnderscoresAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpService", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpService$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpUri", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.HttpUri$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.JsonFormatOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.JsonFormatOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeepaliveSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeepaliveSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValue", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValue$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValueAppend", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValueAppend$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValueAppend$KeyValueAppendAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValueMutation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValueMutation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValuePair", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.KeyValuePair$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Locality", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Locality$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Metadata", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Metadata$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Node", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Node$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.PathConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.PathConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.PerHostConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.PerHostConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Pipe", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.Pipe$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolConfig$Version", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolPassThroughTLVs", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolPassThroughTLVs$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.ProxyProtocolPassThroughTLVs$PassTLVsMatchType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QueryParameter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QueryParameter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QuicKeepAliveSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QuicKeepAliveSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QuicProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.QuicProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RateLimitSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RateLimitSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RemoteDataSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RemoteDataSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RequestMethod", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy$RetryHostPredicate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy$RetryHostPredicate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy$RetryPriority", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RetryPolicy$RetryPriority$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RoutingPriority", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeDouble", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeDouble$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeFeatureFlag", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeFeatureFlag$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeFractionalPercent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeFractionalPercent$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimePercent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimePercent$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeUInt32", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.RuntimeUInt32$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SchemeHeaderTransformation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SchemeHeaderTransformation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SelfConfigSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SelfConfigSource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketAddress$Protocol", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketCmsgHeaders", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketCmsgHeaders$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketState", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType$Datagram", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType$Datagram$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType$Stream", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOption$SocketType$Stream$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOptionsOverride", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SocketOptionsOverride$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SubstitutionFormatString", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.SubstitutionFormatString$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TcpKeepalive", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TcpKeepalive$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TcpProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TcpProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TlvEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TlvEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TrafficDirection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TransportSocket", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TransportSocket$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TypedExtensionConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.TypedExtensionConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.UdpSocketConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.UdpSocketConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.UpstreamHttpProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.UpstreamHttpProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.WatchedDirectory", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.core.v3.WatchedDirectory$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment$Policy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment$Policy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment$Policy$DropOverload", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment$Policy$DropOverload$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterStats$DroppedRequests", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.ClusterStats$DroppedRequests$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint$AdditionalAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint$AdditionalAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint$HealthCheckConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.Endpoint$HealthCheckConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.EndpointLoadMetricStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.EndpointLoadMetricStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LbEndpointCollection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LbEndpointCollection$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LedsClusterLocalityConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LedsClusterLocalityConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints$LbEndpointList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints$LbEndpointList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UpstreamEndpointStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UpstreamEndpointStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UpstreamLocalityStats", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.endpoint.v3.UpstreamLocalityStats$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ActiveRawUdpListenerConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ActiveRawUdpListenerConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.AdditionalAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.AdditionalAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ApiListener", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ApiListener$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ApiListenerManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ApiListenerManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Filter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Filter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.FilterChain", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.FilterChain$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.FilterChainMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.FilterChainMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.FilterChainMatch$ConnectionSourceType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$ConnectionBalanceConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$ConnectionBalanceConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$ConnectionBalanceConfig$ExactBalance", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$ConnectionBalanceConfig$ExactBalance$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$DeprecatedV1", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$DeprecatedV1$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$DrainType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$FcdsConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$FcdsConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$InternalListenerConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.Listener$InternalListenerConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerCollection", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerCollection$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilterChainMatchPredicate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilterChainMatchPredicate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilterChainMatchPredicate$MatchSet", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerFilterChainMatchPredicate$MatchSet$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ListenerManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.QuicProtocolOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.QuicProtocolOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.UdpListenerConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.UdpListenerConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ValidationListenerManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.listener.v3.ValidationListenerManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.DogStatsdSink", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.DogStatsdSink$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.HistogramBucketSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.HistogramBucketSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.HystrixSink", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.HystrixSink$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsSink", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsSink$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsdSink", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.StatsdSink$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.TagSpecifier", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.metrics.v3.TagSpecifier$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.BufferFactoryConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.BufferFactoryConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.LoadShedPoint", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.LoadShedPoint$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.OverloadAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.OverloadAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.OverloadManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.OverloadManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ResourceMonitor", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ResourceMonitor$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaleTimersOverloadActionConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaleTimersOverloadActionConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaleTimersOverloadActionConfig$ScaleTimer", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaleTimersOverloadActionConfig$ScaleTimer$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaleTimersOverloadActionConfig$TimerType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaledTrigger", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ScaledTrigger$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ThresholdTrigger", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.ThresholdTrigger$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.Trigger", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.overload.v3.Trigger$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Action", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Action$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.MetadataSource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Permission", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Permission$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Permission$Set", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Permission$Set$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Policy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Policy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal$Authenticated", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal$Authenticated$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal$Set", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.Principal$Set$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$Action", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$AuditLoggingOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$AuditLoggingOptions$AuditCondition", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$AuditLoggingOptions$AuditLoggerConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$AuditLoggingOptions$AuditLoggerConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$AuditLoggingOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.RBAC$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.SourcedMetadata", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.rbac.v3.SourcedMetadata$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.CorsPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.CorsPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Decorator", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Decorator$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.DirectResponseAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.DirectResponseAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.FilterAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.FilterAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.FilterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.FilterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.HeaderMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.HeaderMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.HedgePolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.HedgePolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.InternalRedirectPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.InternalRedirectPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.NonForwardingAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.NonForwardingAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.QueryParameterMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.QueryParameterMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$DestinationCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$DestinationCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$DynamicMetaData", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$DynamicMetaData$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$GenericKey", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$GenericKey$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$HeaderValueMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$HeaderValueMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$MaskedRemoteAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$MaskedRemoteAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$MetaData", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$MetaData$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$MetaData$Source", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$QueryParameterValueMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$QueryParameterValueMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$QueryParameters", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$QueryParameters$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$RemoteAddress", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$RemoteAddress$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$RequestHeaders", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$RequestHeaders$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$SourceCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Action$SourceCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$HitsAddend", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$HitsAddend$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Override", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Override$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Override$DynamicMetadata", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RateLimit$Override$DynamicMetadata$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RedirectAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RedirectAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RedirectAction$RedirectResponseCode", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RateLimitedRetryBackOff", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RateLimitedRetryBackOff$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$ResetHeader", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$ResetHeader$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$ResetHeaderFormat", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryBackOff", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryBackOff$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryHostPredicate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryHostPredicate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryPriority", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RetryPolicy$RetryPriority$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Route", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Route$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$ClusterNotFoundResponseCode", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$ConnectionProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$ConnectionProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$Cookie", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$Cookie$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$CookieAttribute", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$CookieAttribute$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$FilterState", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$FilterState$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$Header", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$Header$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$QueryParameter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$HashPolicy$QueryParameter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$InternalRedirectAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$MaxStreamDuration", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$MaxStreamDuration$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$RequestMirrorPolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$RequestMirrorPolicy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$UpgradeConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$UpgradeConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$UpgradeConfig$ConnectConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteAction$UpgradeConfig$ConnectConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteConfiguration", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteConfiguration$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$ConnectMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$ConnectMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$GrpcRouteMatchOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$GrpcRouteMatchOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$TlsContextMatchOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.RouteMatch$TlsContextMatchOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration$Key", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration$Key$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration$Key$Fragment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.ScopedRouteConfiguration$Key$Fragment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Tracing", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Tracing$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Vhds", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.Vhds$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.VirtualCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.VirtualCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.VirtualHost", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.VirtualHost$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.VirtualHost$TlsRequirementType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.WeightedCluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.WeightedCluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.WeightedCluster$ClusterWeight", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.route.v3.WeightedCluster$ClusterWeight$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DatadogConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DatadogConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DatadogRemoteConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DatadogRemoteConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DynamicOtConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.DynamicOtConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.LightstepConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.LightstepConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.LightstepConfig$PropagationMode", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.OpenTelemetryConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.OpenTelemetryConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.TraceServiceConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.TraceServiceConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.Tracing", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.Tracing$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.Tracing$Http", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.Tracing$Http$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.ZipkinConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.ZipkinConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.config.trace.v3.ZipkinConfig$CollectorEndpointVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.AccessLogCommon", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.AccessLogCommon$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.AccessLogType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ConnectionProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ConnectionProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPAccessLogEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPAccessLogEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPAccessLogEntry$HTTPVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPRequestProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPRequestProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPResponseProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.HTTPResponseProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ResponseFlags", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ResponseFlags$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ResponseFlags$Unauthorized", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ResponseFlags$Unauthorized$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.ResponseFlags$Unauthorized$Reason", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TCPAccessLogEntry", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TCPAccessLogEntry$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$CertificateProperties", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$CertificateProperties$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$CertificateProperties$SubjectAltName", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$CertificateProperties$SubjectAltName$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.data.accesslog.v3.TLSProperties$TLSVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.clusters.aggregate.v3.AggregateClusterResource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.clusters.aggregate.v3.AggregateClusterResource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.clusters.aggregate.v3.ClusterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.clusters.aggregate.v3.ClusterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay$FaultDelayType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay$HeaderDelay", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay$HeaderDelay$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit$FixedLimit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit$FixedLimit$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit$HeaderLimit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultRateLimit$HeaderLimit$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort$HeaderAbort", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort$HeaderAbort$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.HTTPFault", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.fault.v3.HTTPFault$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.Audience", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.Audience$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.GcpAuthnFilterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.GcpAuthnFilterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.TokenCacheConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.TokenCacheConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.TokenHeader", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.gcp_authn.v3.TokenHeader$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$BucketIdBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$BucketIdBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$BucketIdBuilder$ValueBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$BucketIdBuilder$ValueBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$DenyResponseSettings", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$DenyResponseSettings$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$ExpiredAssignmentBehavior", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$ExpiredAssignmentBehavior$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$ExpiredAssignmentBehavior$ReuseLastAssignment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$ExpiredAssignmentBehavior$ReuseLastAssignment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$NoAssignmentBehavior", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings$NoAssignmentBehavior$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaFilterConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaFilterConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaOverride", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaOverride$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBACPerRoute", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBACPerRoute$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.router.v3.Router", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.router.v3.Router$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.router.v3.Router$UpstreamAccessLogOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.http.router.v3.Router$UpstreamAccessLogOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$CodecType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$ForwardClientCertDetails", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$HcmAccessLogOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$HcmAccessLogOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$InternalAddressConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$InternalAddressConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$PathNormalizationOptions", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$PathNormalizationOptions$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$PathWithEscapedSlashesAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$ProxyStatusConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$ProxyStatusConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$ServerHeaderTransformation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$SetCurrentClientCertDetails", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$SetCurrentClientCertDetails$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$Tracing", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$Tracing$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$Tracing$OperationName", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$UpgradeConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager$UpgradeConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.Rds", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.Rds$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder$HeaderValueExtractor", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder$HeaderValueExtractor$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder$HeaderValueExtractor$KvElement", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes$ScopeKeyBuilder$FragmentBuilder$HeaderValueExtractor$KvElement$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$LocalityWeightedLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$LocalityWeightedLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$ZoneAwareLbConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$ZoneAwareLbConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$ZoneAwareLbConfig$ForceLocalZone", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig$ZoneAwareLbConfig$ForceLocalZone$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest$SelectionMethod", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash$HashFunction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext$SystemRootCerts", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext$SystemRootCerts$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext$TrustChainVerification", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CertificateProvider", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CertificateProvider$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CertificateProviderInstance", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CertificateProviderInstance$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CombinedCertificateValidationContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext$CombinedCertificateValidationContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext$OcspStaplePolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.GenericSecret", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.GenericSecret$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.Secret", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.Secret$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher$SanType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsCertificate", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsCertificate$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsKeyLog", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsKeyLog$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsParameters", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsParameters$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsParameters$CompliancePolicy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsParameters$TlsProtocol", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.AdsDummy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.AdsDummy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DeltaDiscoveryRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DeltaDiscoveryRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DeltaDiscoveryResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DeltaDiscoveryResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DiscoveryRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DiscoveryRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DiscoveryResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DiscoveryResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$ConstraintList", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$ConstraintList$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$SingleConstraint", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$SingleConstraint$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$SingleConstraint$Exists", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.DynamicParameterConstraints$SingleConstraint$Exists$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.Resource", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.Resource$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.Resource$CacheControl", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.Resource$CacheControl$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceError", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceError$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceLocator", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceLocator$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceName", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.discovery.v3.ResourceName$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.BucketId", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.BucketId$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction$AbandonAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction$AbandonAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction$QuotaAssignmentAction", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$BucketAction$QuotaAssignmentAction$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports$BucketQuotaUsage", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports$BucketQuotaUsage$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientConfig$GenericXdsConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientConfig$GenericXdsConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientConfigStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientStatusRequest", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientStatusRequest$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientStatusResponse", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ClientStatusResponse$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.ConfigStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.PerXdsConfig", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.service.status.v3.PerXdsConfig$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation$MergeSlashes", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation$MergeSlashes$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation$NormalizePathRFC3986", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.http.v3.PathTransformation$Operation$NormalizePathRFC3986$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.AddressMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.AddressMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.DoubleMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.DoubleMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.FilterStateMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.FilterStateMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestHeaderMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestHeaderMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestQueryParamMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestQueryParamMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestTrailerMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpRequestTrailerMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpResponseHeaderMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpResponseHeaderMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpResponseTrailerMatchInput", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.HttpResponseTrailerMatchInput$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ListMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ListMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ListStringMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ListStringMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.MetadataMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.MetadataMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.MetadataMatcher$PathSegment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.MetadataMatcher$PathSegment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.NodeMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.NodeMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.OrMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.OrMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.PathMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.PathMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatchAndSubstitute", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatchAndSubstitute$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatcher$GoogleRE2", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.RegexMatcher$GoogleRE2$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StringMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StringMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StructMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StructMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StructMatcher$PathSegment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.StructMatcher$PathSegment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ValueMatcher", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ValueMatcher$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ValueMatcher$NullMatch", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.matcher.v3.ValueMatcher$NullMatch$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKey", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKey$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKey$PathSegment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKey$PathSegment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Cluster", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Cluster$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Host", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Host$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Request", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Request$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Route", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.metadata.v3.MetadataKind$Route$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Environment", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Environment$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Header", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Header$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Literal", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Literal$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Metadata", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.tracing.v3.CustomTag$Metadata$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.CodecClientType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.DoubleRange", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.DoubleRange$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.FractionalPercent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.FractionalPercent$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.FractionalPercent$DenominatorType", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.HttpStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.HttpStatus$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Int32Range", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Int32Range$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Int64Range", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Int64Range$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Percent", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.Percent$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitStrategy", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitStrategy$BlanketRule", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitStrategy$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitStrategy$RequestsPerTimeUnit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitStrategy$RequestsPerTimeUnit$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.RateLimitUnit", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.SemanticVersion", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.SemanticVersion$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.StatusCode", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.TokenBucket", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.envoy.type.v3.TokenBucket$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$AnyRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$AnyRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$BoolRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$BoolRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$BytesRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$BytesRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$DoubleRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$DoubleRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$DurationRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$DurationRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$EnumRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$EnumRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$FieldRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$FieldRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Fixed32Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Fixed32Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Fixed64Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Fixed64Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$FloatRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$FloatRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Int32Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Int32Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Int64Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$Int64Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$KnownRegex", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$MapRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$MapRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$MessageRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$MessageRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$RepeatedRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$RepeatedRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SFixed32Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SFixed32Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SFixed64Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SFixed64Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SInt32Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SInt32Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SInt64Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$SInt64Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$StringRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$StringRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$TimestampRules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$TimestampRules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$UInt32Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$UInt32Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$UInt64Rules", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.io.envoyproxy.pgv.validate.Validate$UInt64Rules$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$FieldMigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$FieldMigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$FileMigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$FileMigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$MigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Migrate$MigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Security$FieldSecurityAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Security$FieldSecurityAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Status$PackageVersionStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Status$StatusAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Status$StatusAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Versioning$VersioningAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.udpa.annotations.Versioning$VersioningAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$FieldMigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$FieldMigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$FileMigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$FileMigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$MigrateAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Migrate$MigrateAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Security$FieldSecurityAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Security$FieldSecurityAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$FieldStatusAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$FieldStatusAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$FileStatusAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$FileStatusAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$MessageStatusAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$MessageStatusAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$PackageVersionStatus", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$StatusAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Status$StatusAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Versioning$VersioningAnnotation", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true }, + { "name": "io.grpc.xds.shaded.xds.annotations.v3.Versioning$VersioningAnnotation$Builder", "queryAllDeclaredConstructors": true, "queryAllPublicConstructors": true, "queryAllDeclaredMethods": true, "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true } +] diff --git a/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/resource-config.json b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/resource-config.json new file mode 100644 index 000000000000..b9ddec8abf87 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/META-INF/native-image/com/google/cloud/storage/resource-config.json @@ -0,0 +1,28 @@ +{ + "resources":{ + "includes":[ + {"pattern": ".*.txt"}, + { + "pattern":"\\QMETA-INF/maven/com.google.cloud/google-cloud-storage/pom.properties\\E" + }, { + "pattern":"\\QMETA-INF/native/libconscrypt_openjdk_jni-linux-x86_64.so\\E" + }, { + "pattern":"\\QMETA-INF/native/libio_grpc_netty_shaded_netty_tcnative_linux_x86_64.so\\E" + }, { + "pattern":"\\QMETA-INF/native/libio_grpc_netty_shaded_netty_transport_native_epoll_x86_64.so\\E" + }, { + "pattern":"\\QMETA-INF/services/io.grpc.LoadBalancerProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/io.grpc.ManagedChannelProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/io.grpc.NameResolverProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/io.grpc.xds.XdsCredentialsProvider\\E" + }, { + "pattern":"\\Qcom/google/api/client/http/google-http-client.properties\\E" + }, { + "pattern":"\\Qdependencies.properties\\E" + }, { + "pattern":"\\Qorg/conscrypt/conscrypt.properties\\E" + }]} +} diff --git a/java-storage/google-cloud-storage/src/test/resources/META-INF/services/net.jqwik.api.providers.ArbitraryProvider b/java-storage/google-cloud-storage/src/test/resources/META-INF/services/net.jqwik.api.providers.ArbitraryProvider new file mode 100644 index 000000000000..a0c9abf011b6 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/META-INF/services/net.jqwik.api.providers.ArbitraryProvider @@ -0,0 +1,19 @@ +# +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +com.google.cloud.storage.jqwik.ObjectArbitraryProvider +com.google.cloud.storage.jqwik.BucketArbitraryProvider +com.google.cloud.storage.jqwik.IamPolicyArbitraryProvider diff --git a/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobReadChannel.ser.properties b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobReadChannel.ser.properties new file mode 100644 index 000000000000..c9d3dc5ff813 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobReadChannel.ser.properties @@ -0,0 +1,70 @@ +# +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Base 64 Encoded bytes of a BlobReadChannel circa v2.16.0 +# Generated using the following snippet: +# +# Storage s = StorageOptions.http() +# .setProjectId("proj") +# .setCredentials(NoCredentials.getInstance()) +# .build() +# .getService(); +# +# ReadChannel reader = s.reader(BlobId.of("buck", "obj", 1L)); +# RestorableState capture = reader.capture(); +# +# ByteArrayOutputStream baos = new ByteArrayOutputStream(); +# try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { +# oos.writeObject(capture); +# } +# +# byte[] bytes = baos.toByteArray(); +# String b64Ser = Base64.getEncoder().encodeToString(bytes); +# +# System.out.println("b64Ser = " + b64Ser); +# +b64bytes=\ + rO0ABXNyADJjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuQmxvYlJlYWRDaGFubmVsJFN0YXRlSW1wbGwJWjOFWbi1AgAJSQAJY2h1bmtTaXplWgALZW5kT2ZTdHJlYW1a\ + AAZpc09wZW5KAAVsaW1pdEoACHBvc2l0aW9uTAAEYmxvYnQAIUxjb20vZ29vZ2xlL2Nsb3VkL3N0b3JhZ2UvQmxvYklkO0wACGxhc3RFdGFndAASTGphdmEvbGFuZy9T\ + dHJpbmc7TAAOcmVxdWVzdE9wdGlvbnN0AA9MamF2YS91dGlsL01hcDtMAA5zZXJ2aWNlT3B0aW9uc3QALUxjb20vZ29vZ2xlL2Nsb3VkL3N0b3JhZ2UvSHR0cFN0b3Jh\ + Z2VPcHRpb25zO3hwACAAAAABf/////////8AAAAAAAAAAHNyAB9jb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuQmxvYklkcdHeVjWP2d0CAANMAAZidWNrZXRxAH4AAkwA\ + CmdlbmVyYXRpb250ABBMamF2YS9sYW5nL0xvbmc7TAAEbmFtZXEAfgACeHB0AARidWNrc3IADmphdmEubGFuZy5Mb25nO4vkkMyPI98CAAFKAAV2YWx1ZXhyABBqYXZh\ + LmxhbmcuTnVtYmVyhqyVHQuU4IsCAAB4cAAAAAAAAAABdAADb2JqcHNyADVjb20uZ29vZ2xlLmNvbW1vbi5jb2xsZWN0LkltbXV0YWJsZU1hcCRTZXJpYWxpemVkRm9y\ + bQAAAAAAAAAAAgACTAAEa2V5c3QAEkxqYXZhL2xhbmcvT2JqZWN0O0wABnZhbHVlc3EAfgAPeHB1cgATW0xqYXZhLmxhbmcuT2JqZWN0O5DOWJ8QcylsAgAAeHAAAAAA\ + dXEAfgARAAAAAHNyACtjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuSHR0cFN0b3JhZ2VPcHRpb25ztmk+4Fw7cvMCAAFMABVyZXRyeUFsZ29yaXRobU1hbmFnZXJ0ADRM\ + Y29tL2dvb2dsZS9jbG91ZC9zdG9yYWdlL0h0dHBSZXRyeUFsZ29yaXRobU1hbmFnZXI7eHIAJ2NvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5TdG9yYWdlT3B0aW9uc5q/\ + 8jOW5d5PAgAAeHIAH2NvbS5nb29nbGUuY2xvdWQuU2VydmljZU9wdGlvbnN/qQsz9VFyfgIAC0wADmNsaWVudExpYlRva2VucQB+AAJMAAVjbG9ja3QAHkxjb20vZ29v\ + Z2xlL2FwaS9jb3JlL0FwaUNsb2NrO0wAC2NyZWRlbnRpYWxzdAAdTGNvbS9nb29nbGUvYXV0aC9DcmVkZW50aWFscztMAA5oZWFkZXJQcm92aWRlcnQAJ0xjb20vZ29v\ + Z2xlL2FwaS9nYXgvcnBjL0hlYWRlclByb3ZpZGVyO0wABGhvc3RxAH4AAkwACXByb2plY3RJZHEAfgACTAAOcXVvdGFQcm9qZWN0SWRxAH4AAkwADXJldHJ5U2V0dGlu\ + Z3N0ACtMY29tL2dvb2dsZS9hcGkvZ2F4L3JldHJ5aW5nL1JldHJ5U2V0dGluZ3M7TAAXc2VydmljZUZhY3RvcnlDbGFzc05hbWVxAH4AAkwAGnNlcnZpY2VScGNGYWN0\ + b3J5Q2xhc3NOYW1lcQB+AAJMABB0cmFuc3BvcnRPcHRpb25zdAAjTGNvbS9nb29nbGUvY2xvdWQvVHJhbnNwb3J0T3B0aW9uczt4cHQABGdjY2xzcgAmY29tLmdvb2ds\ + ZS5hcGkuY29yZS5DdXJyZW50TWlsbGlzQ2xvY2usd0sHJ9YTCwIAAHhwc3IAHmNvbS5nb29nbGUuY2xvdWQuTm9DcmVkZW50aWFsc6kR5wOeLAxAAgAAeHIAKGNvbS5n\ + b29nbGUuYXV0aC5vYXV0aDIuT0F1dGgyQ3JlZGVudGlhbHM/PX166aVRVwIABEwAEGV4cGlyYXRpb25NYXJnaW50ABRMamF2YS90aW1lL0R1cmF0aW9uO0wABGxvY2tx\ + AH4AD0wADXJlZnJlc2hNYXJnaW5xAH4AI0wABXZhbHVldAA1TGNvbS9nb29nbGUvYXV0aC9vYXV0aDIvT0F1dGgyQ3JlZGVudGlhbHMkT0F1dGhWYWx1ZTt4cgAbY29t\ + Lmdvb2dsZS5hdXRoLkNyZWRlbnRpYWxzCzii14w9kIECAAB4cHNyAA1qYXZhLnRpbWUuU2VylV2EuhsiSLIMAAB4cHcNAQAAAAAAAAEsAAAAAHh1cgACW0Ks8xf4BghU\ + 4AIAAHhwAAAAAHNxAH4AJ3cNAQAAAAAAAAFoAAAAAHhwc3IAJ2NvbS5nb29nbGUuYXBpLmdheC5ycGMuTm9IZWFkZXJQcm92aWRlcmWjEqhqxXthAgAAeHB0AB5odHRw\ + czovL3N0b3JhZ2UuZ29vZ2xlYXBpcy5jb210AARwcm9qcHNyADNjb20uZ29vZ2xlLmFwaS5nYXgucmV0cnlpbmcuQXV0b1ZhbHVlX1JldHJ5U2V0dGluZ3Nym/9/a0d0\ + swIACVoACGppdHRlcmVkSQALbWF4QXR0ZW1wdHNEABRyZXRyeURlbGF5TXVsdGlwbGllckQAFHJwY1RpbWVvdXRNdWx0aXBsaWVyTAARaW5pdGlhbFJldHJ5RGVsYXl0\ + ABpMb3JnL3RocmVldGVuL2JwL0R1cmF0aW9uO0wAEWluaXRpYWxScGNUaW1lb3V0cQB+ADFMAA1tYXhSZXRyeURlbGF5cQB+ADFMAA1tYXhScGNUaW1lb3V0cQB+ADFM\ + AAx0b3RhbFRpbWVvdXRxAH4AMXhyACljb20uZ29vZ2xlLmFwaS5nYXgucmV0cnlpbmcuUmV0cnlTZXR0aW5nc3Kb/39rR3SzAgAAeHABAAAABkAAAAAAAAAAP/AAAAAA\ + AABzcgATb3JnLnRocmVldGVuLmJwLlNlcpVdhLobIkiyDAAAeHB3DQEAAAAAAAAAAQAAAAB4c3EAfgA0dw0BAAAAAAAAADIAAAAAeHNxAH4ANHcNAQAAAAAAAAAgAAAA\ + AHhzcQB+ADR3DQEAAAAAAAAAMgAAAAB4c3EAfgA0dw0BAAAAAAAAADIAAAAAeHQAPmNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5IdHRwU3RvcmFnZU9wdGlvbnMkSHR0\ + cFN0b3JhZ2VGYWN0b3J5dABBY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkh0dHBTdG9yYWdlT3B0aW9ucyRIdHRwU3RvcmFnZVJwY0ZhY3RvcnlzcgAqY29tLmdvb2ds\ + ZS5jbG91ZC5odHRwLkh0dHBUcmFuc3BvcnRPcHRpb25zbX9UTb2H/yICAANJAA5jb25uZWN0VGltZW91dEkAC3JlYWRUaW1lb3V0TAAdaHR0cFRyYW5zcG9ydEZhY3Rv\ + cnlDbGFzc05hbWVxAH4AAnhw//////////90AEZjb20uZ29vZ2xlLmNsb3VkLmh0dHAuSHR0cFRyYW5zcG9ydE9wdGlvbnMkRGVmYXVsdEh0dHBUcmFuc3BvcnRGYWN0\ + b3J5c3IAMmNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5IdHRwUmV0cnlBbGdvcml0aG1NYW5hZ2Vy0i1ymVA0mEUCAAFMAA1yZXRyeVN0cmF0ZWd5dAAvTGNvbS9nb29n\ + bGUvY2xvdWQvc3RvcmFnZS9TdG9yYWdlUmV0cnlTdHJhdGVneTt4cHNyADRjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuRGVmYXVsdFN0b3JhZ2VSZXRyeVN0cmF0ZWd5\ + bgaLnarjlYkCAAB4cA== diff --git a/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties new file mode 100644 index 000000000000..d60cb1749950 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties @@ -0,0 +1,80 @@ +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Base 64 Encoded bytes of a BlobReadChannel circa v2.16.0 +# Generated using the following snippet: +# +# Storage s = StorageOptions.http() +# .setProjectId("proj") +# .setCredentials(NoCredentials.getInstance()) +# .build() +# .getService(); +# +# WriteChannel reader = s.writer(BlobInfo.newBuilder("buck", "obj").build(), BlobWriteOption.doesNotExist()); +# RestorableState capture = reader.capture(); +# +# ByteArrayOutputStream baos = new ByteArrayOutputStream(); +# try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { +# oos.writeObject(capture); +# } +# +# byte[] bytes = baos.toByteArray(); +# String b64Ser = Base64.getEncoder().encodeToString(bytes); +# +# System.out.println("b64Ser = " + b64Ser); +# +b64bytes=\ + rO0ABXNyADNjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuQmxvYldyaXRlQ2hhbm5lbCRTdGF0ZUltcGyjA3jVYuVZZQIAAUwAEWFsZ29yaXRobUZvcldyaXRldAAyTGNv\ + bS9nb29nbGUvYXBpL2dheC9yZXRyeWluZy9SZXN1bHRSZXRyeUFsZ29yaXRobTt4cgArY29tLmdvb2dsZS5jbG91ZC5CYXNlV3JpdGVDaGFubmVsJEJhc2VTdGF0ZXaH\ + 8w86CHBzAgAHSQAJY2h1bmtTaXplWgAGaXNPcGVuSgAIcG9zaXRpb25bAAZidWZmZXJ0AAJbQkwABmVudGl0eXQAFkxqYXZhL2lvL1NlcmlhbGl6YWJsZTtMAA5zZXJ2\ + aWNlT3B0aW9uc3QAIUxjb20vZ29vZ2xlL2Nsb3VkL1NlcnZpY2VPcHRpb25zO0wACHVwbG9hZElkdAASTGphdmEvbGFuZy9TdHJpbmc7eHAA8AAAAQAAAAAAAAAAdXIA\ + AltCrPMX+AYIVOACAAB4cAAAAABwc3IAK2NvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5IdHRwU3RvcmFnZU9wdGlvbnO2aT7gXDty8wIAAUwAFXJldHJ5QWxnb3JpdGht\ + TWFuYWdlcnQANExjb20vZ29vZ2xlL2Nsb3VkL3N0b3JhZ2UvSHR0cFJldHJ5QWxnb3JpdGhtTWFuYWdlcjt4cgAnY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLlN0b3Jh\ + Z2VPcHRpb25zmr/yM5bl3k8CAAB4cgAfY29tLmdvb2dsZS5jbG91ZC5TZXJ2aWNlT3B0aW9uc3+pCzP1UXJ+AgALTAAOY2xpZW50TGliVG9rZW5xAH4ABkwABWNsb2Nr\ + dAAeTGNvbS9nb29nbGUvYXBpL2NvcmUvQXBpQ2xvY2s7TAALY3JlZGVudGlhbHN0AB1MY29tL2dvb2dsZS9hdXRoL0NyZWRlbnRpYWxzO0wADmhlYWRlclByb3ZpZGVy\ + dAAnTGNvbS9nb29nbGUvYXBpL2dheC9ycGMvSGVhZGVyUHJvdmlkZXI7TAAEaG9zdHEAfgAGTAAJcHJvamVjdElkcQB+AAZMAA5xdW90YVByb2plY3RJZHEAfgAGTAAN\ + cmV0cnlTZXR0aW5nc3QAK0xjb20vZ29vZ2xlL2FwaS9nYXgvcmV0cnlpbmcvUmV0cnlTZXR0aW5ncztMABdzZXJ2aWNlRmFjdG9yeUNsYXNzTmFtZXEAfgAGTAAac2Vy\ + dmljZVJwY0ZhY3RvcnlDbGFzc05hbWVxAH4ABkwAEHRyYW5zcG9ydE9wdGlvbnN0ACNMY29tL2dvb2dsZS9jbG91ZC9UcmFuc3BvcnRPcHRpb25zO3hwdAAEZ2NjbHNy\ + ACZjb20uZ29vZ2xlLmFwaS5jb3JlLkN1cnJlbnRNaWxsaXNDbG9ja6x3Swcn1hMLAgAAeHBzcgAeY29tLmdvb2dsZS5jbG91ZC5Ob0NyZWRlbnRpYWxzqRHnA54sDEAC\ + AAB4cgAoY29tLmdvb2dsZS5hdXRoLm9hdXRoMi5PQXV0aDJDcmVkZW50aWFscz89fXrppVFXAgAETAAQZXhwaXJhdGlvbk1hcmdpbnQAFExqYXZhL3RpbWUvRHVyYXRp\ + b247TAAEbG9ja3QAEkxqYXZhL2xhbmcvT2JqZWN0O0wADXJlZnJlc2hNYXJnaW5xAH4AGUwABXZhbHVldAA1TGNvbS9nb29nbGUvYXV0aC9vYXV0aDIvT0F1dGgyQ3Jl\ + ZGVudGlhbHMkT0F1dGhWYWx1ZTt4cgAbY29tLmdvb2dsZS5hdXRoLkNyZWRlbnRpYWxzCzii14w9kIECAAB4cHNyAA1qYXZhLnRpbWUuU2VylV2EuhsiSLIMAAB4cHcN\ + AQAAAAAAAAEsAAAAAHh1cQB+AAgAAAAAc3EAfgAedw0BAAAAAAAAAWgAAAAAeHBzcgAnY29tLmdvb2dsZS5hcGkuZ2F4LnJwYy5Ob0hlYWRlclByb3ZpZGVyZaMSqGrF\ + e2ECAAB4cHQAFWh0dHA6Ly9sb2NhbGhvc3Q6OTAwMHQABHByb2pwc3IAM2NvbS5nb29nbGUuYXBpLmdheC5yZXRyeWluZy5BdXRvVmFsdWVfUmV0cnlTZXR0aW5nc3Kb\ + /39rR3SzAgAJWgAIaml0dGVyZWRJAAttYXhBdHRlbXB0c0QAFHJldHJ5RGVsYXlNdWx0aXBsaWVyRAAUcnBjVGltZW91dE11bHRpcGxpZXJMABFpbml0aWFsUmV0cnlE\ + ZWxheXQAGkxvcmcvdGhyZWV0ZW4vYnAvRHVyYXRpb247TAARaW5pdGlhbFJwY1RpbWVvdXRxAH4AJ0wADW1heFJldHJ5RGVsYXlxAH4AJ0wADW1heFJwY1RpbWVvdXRx\ + AH4AJ0wADHRvdGFsVGltZW91dHEAfgAneHIAKWNvbS5nb29nbGUuYXBpLmdheC5yZXRyeWluZy5SZXRyeVNldHRpbmdzcpv/f2tHdLMCAAB4cAEAAAAGQAAAAAAAAAA/\ + 8AAAAAAAAHNyABNvcmcudGhyZWV0ZW4uYnAuU2VylV2EuhsiSLIMAAB4cHcNAQAAAAAAAAABAAAAAHhzcQB+ACp3DQEAAAAAAAAAMgAAAAB4c3EAfgAqdw0BAAAAAAAA\ + ACAAAAAAeHNxAH4AKncNAQAAAAAAAAAyAAAAAHhzcQB+ACp3DQEAAAAAAAAAMgAAAAB4dAA+Y29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkh0dHBTdG9yYWdlT3B0aW9u\ + cyRIdHRwU3RvcmFnZUZhY3Rvcnl0AEFjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuSHR0cFN0b3JhZ2VPcHRpb25zJEh0dHBTdG9yYWdlUnBjRmFjdG9yeXNyACpjb20u\ + Z29vZ2xlLmNsb3VkLmh0dHAuSHR0cFRyYW5zcG9ydE9wdGlvbnNtf1RNvYf/IgIAA0kADmNvbm5lY3RUaW1lb3V0SQALcmVhZFRpbWVvdXRMAB1odHRwVHJhbnNwb3J0\ + RmFjdG9yeUNsYXNzTmFtZXEAfgAGeHD//////////3QARmNvbS5nb29nbGUuY2xvdWQuaHR0cC5IdHRwVHJhbnNwb3J0T3B0aW9ucyREZWZhdWx0SHR0cFRyYW5zcG9y\ + dEZhY3RvcnlzcgAyY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkh0dHBSZXRyeUFsZ29yaXRobU1hbmFnZXLSLXKZUDSYRQIAAUwADXJldHJ5U3RyYXRlZ3l0AC9MY29t\ + L2dvb2dsZS9jbG91ZC9zdG9yYWdlL1N0b3JhZ2VSZXRyeVN0cmF0ZWd5O3hwc3IANGNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5EZWZhdWx0U3RvcmFnZVJldHJ5U3Ry\ + YXRlZ3luBoudquOViQIAAHhwdACQaHR0cDovL2xvY2FsaG9zdDo5MDAwL3VwbG9hZC9zdG9yYWdlL3YxL2IvYnVjay9vP3VwbG9hZFR5cGU9cmVzdW1hYmxlJnVwbG9h\ + ZF9pZD0xNzcyNzI1NDM5ZDEyZWUzNjNmZmRlNmNiZmNlYjEzMGYzZTIxMWJiM2NjMzBlNjFhNGQ2N2I2MTU0OTUxMjIxc3IAIWNvbS5nb29nbGUuY2xvdWQuRXhjZXB0\ + aW9uSGFuZGxlct3Z0AGsJj+JAgAETAAMaW50ZXJjZXB0b3JzdAApTGNvbS9nb29nbGUvY29tbW9uL2NvbGxlY3QvSW1tdXRhYmxlTGlzdDtMABZub25SZXRyaWFibGVF\ + eGNlcHRpb25zdAAoTGNvbS9nb29nbGUvY29tbW9uL2NvbGxlY3QvSW1tdXRhYmxlU2V0O0wAE3JldHJpYWJsZUV4Y2VwdGlvbnNxAH4APUwACXJldHJ5SW5mb3QAD0xq\ + YXZhL3V0aWwvU2V0O3hwc3IANmNvbS5nb29nbGUuY29tbW9uLmNvbGxlY3QuSW1tdXRhYmxlTGlzdCRTZXJpYWxpemVkRm9ybQAAAAAAAAAAAgABWwAIZWxlbWVudHN0\ + ABNbTGphdmEvbGFuZy9PYmplY3Q7eHB1cgATW0xqYXZhLmxhbmcuT2JqZWN0O5DOWJ8QcylsAgAAeHAAAAACc3IAWWNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5EZWZh\ + dWx0U3RvcmFnZVJldHJ5U3RyYXRlZ3kkRW1wdHlKc29uUGFyc2luZ0V4Y2VwdGlvbkludGVyY2VwdG9yz+LSc1EB+RsCAAB4cHNyAERjb20uZ29vZ2xlLmNsb3VkLnN0\ + b3JhZ2UuRGVmYXVsdFN0b3JhZ2VSZXRyeVN0cmF0ZWd5JEludGVyY2VwdG9ySW1wbElTPf0EVOdoAgACWgAKaWRlbXBvdGVudEwAD3JldHJ5YWJsZUVycm9yc3EAfgA9\ + eHABc3IANWNvbS5nb29nbGUuY29tbW9uLmNvbGxlY3QuSW1tdXRhYmxlU2V0JFNlcmlhbGl6ZWRGb3JtAAAAAAAAAAACAAFbAAhlbGVtZW50c3EAfgBBeHB1cQB+AEMA\ + AAAIc3IAK2NvbS5nb29nbGUuY2xvdWQuQmFzZVNlcnZpY2VFeGNlcHRpb24kRXJyb3LIN4LqhDNMpwIAA1oACHJlamVjdGVkTAAEY29kZXQAE0xqYXZhL2xhbmcvSW50\ + ZWdlcjtMAAZyZWFzb25xAH4ABnhwAHNyABFqYXZhLmxhbmcuSW50ZWdlchLioKT3gYc4AgABSQAFdmFsdWV4cgAQamF2YS5sYW5nLk51bWJlcoaslR0LlOCLAgAAeHAA\ + AAH4cHNxAH4ATABzcQB+AE8AAAH3cHNxAH4ATABzcQB+AE8AAAH2cHNxAH4ATABzcQB+AE8AAAH0cHNxAH4ATABzcQB+AE8AAAGtcHNxAH4ATABzcQB+AE8AAAGYcHNx\ + AH4ATABwdAANaW50ZXJuYWxFcnJvcnNxAH4ATABwdAAbY29ubmVjdGlvbkNsb3NlZFByZW1hdHVyZWx5c3EAfgBJdXEAfgBDAAAAAHEAfgBgc3IAEWphdmEudXRpbC5I\ + YXNoU2V0ukSFlZa4tzQDAAB4cHcMAAAAED9AAAAAAAAAeA== diff --git a/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/composeRequest.ser.properties b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/composeRequest.ser.properties new file mode 100644 index 000000000000..1396e5675068 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/composeRequest.ser.properties @@ -0,0 +1,67 @@ +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Base 64 Encoded bytes of a BlobReadChannel circa v2.27.0 +# Generated using the following snippet: +# +# ComposeRequest.Builder b = ComposeRequest.newBuilder(); +# b.addSource("object1", 1L); +# b.addSource("object2", 2L); +# b.addSource("object3", 3L); +# b.addSource("object4", 4L); +# +# BlobInfo info = BlobInfo.newBuilder("buck", "comp").build(); +# b.setTarget(info); +# b.setTargetOptions(BlobTargetOption.doesNotExist()); +# +# ComposeRequest composeRequest = b.build(); +# +# ByteArrayOutputStream baos = new ByteArrayOutputStream(); +# try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { +# oos.writeObject(composeRequest); +# } +# +# byte[] bytes = baos.toByteArray(); +# String b64bytes = Base64.getEncoder().encodeToString(bytes); +# +# System.out.println("b64bytes = " + b64bytes); +# +# +b64bytes = \ + rO0ABXNyAC9jb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuU3RvcmFnZSRDb21wb3NlUmVxdWVzdFvDRQIVwX+/AgADTAALc291cmNlQmxvYnN0ABBMamF2YS91dGlsL0x\ + pc3Q7TAAGdGFyZ2V0dAAjTGNvbS9nb29nbGUvY2xvdWQvc3RvcmFnZS9CbG9iSW5mbztMAA10YXJnZXRPcHRpb25zcQB+AAF4cHNyADZjb20uZ29vZ2xlLmNvbW1vbi\ + 5jb2xsZWN0LkltbXV0YWJsZUxpc3QkU2VyaWFsaXplZEZvcm0AAAAAAAAAAAIAAVsACGVsZW1lbnRzdAATW0xqYXZhL2xhbmcvT2JqZWN0O3hwdXIAE1tMamF2YS5sY\ + W5nLk9iamVjdDuQzlifEHMpbAIAAHhwAAAABHNyADpjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuU3RvcmFnZSRDb21wb3NlUmVxdWVzdCRTb3VyY2VCbG9i/c/2djN5\ + tW4CAAJMAApnZW5lcmF0aW9udAAQTGphdmEvbGFuZy9Mb25nO0wABG5hbWV0ABJMamF2YS9sYW5nL1N0cmluZzt4cHNyAA5qYXZhLmxhbmcuTG9uZzuL5JDMjyPfAgA\ + BSgAFdmFsdWV4cgAQamF2YS5sYW5nLk51bWJlcoaslR0LlOCLAgAAeHAAAAAAAAAAAXQAB29iamVjdDFzcQB+AAlzcQB+AA0AAAAAAAAAAnQAB29iamVjdDJzcQB+AA\ + lzcQB+AA0AAAAAAAAAA3QAB29iamVjdDNzcQB+AAlzcQB+AA0AAAAAAAAABHQAB29iamVjdDRzcgAhY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkJsb2JJbmZv3XARn\ + 1ocC74CAB5aAAtpc0RpcmVjdG9yeUwAA2FjbHEAfgABTAAGYmxvYklkdAAhTGNvbS9nb29nbGUvY2xvdWQvc3RvcmFnZS9CbG9iSWQ7TAAMY2FjaGVDb250cm9scQB+\ + AAtMAA5jb21wb25lbnRDb3VudHQAE0xqYXZhL2xhbmcvSW50ZWdlcjtMABJjb250ZW50RGlzcG9zaXRpb25xAH4AC0wAD2NvbnRlbnRFbmNvZGluZ3EAfgALTAAPY29\ + udGVudExhbmd1YWdlcQB+AAtMAAtjb250ZW50VHlwZXEAfgALTAAGY3JjMzJjcQB+AAtMAApjcmVhdGVUaW1ldAAaTGphdmEvdGltZS9PZmZzZXREYXRlVGltZTtMAA\ + pjdXN0b21UaW1lcQB+AB1MABJjdXN0b21lckVuY3J5cHRpb250ADZMY29tL2dvb2dsZS9jbG91ZC9zdG9yYWdlL0Jsb2JJbmZvJEN1c3RvbWVyRW5jcnlwdGlvbjtMA\ + ApkZWxldGVUaW1lcQB+AB1MAARldGFncQB+AAtMAA5ldmVudEJhc2VkSG9sZHQAE0xqYXZhL2xhbmcvQm9vbGVhbjtMAAtnZW5lcmF0ZWRJZHEAfgALTAAKa21zS2V5\ + TmFtZXEAfgALTAADbWQ1cQB+AAtMAAltZWRpYUxpbmtxAH4AC0wACG1ldGFkYXRhdAAPTGphdmEvdXRpbC9NYXA7TAAObWV0YWdlbmVyYXRpb25xAH4ACkwABW93bmV\ + ydAAlTGNvbS9nb29nbGUvY2xvdWQvc3RvcmFnZS9BY2wkRW50aXR5O0wAF3JldGVudGlvbkV4cGlyYXRpb25UaW1lcQB+AB1MAAhzZWxmTGlua3EAfgALTAAEc2l6ZX\ + EAfgAKTAAMc3RvcmFnZUNsYXNzdAAnTGNvbS9nb29nbGUvY2xvdWQvc3RvcmFnZS9TdG9yYWdlQ2xhc3M7TAANdGVtcG9yYXJ5SG9sZHEAfgAfTAAXdGltZVN0b3JhZ\ + 2VDbGFzc1VwZGF0ZWRxAH4AHUwACnVwZGF0ZVRpbWVxAH4AHXhwAHBzcgAfY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkJsb2JJZHHR3lY1j9ndAgADTAAGYnVja2V0\ + cQB+AAtMAApnZW5lcmF0aW9ucQB+AApMAARuYW1lcQB+AAt4cHQABGJ1Y2twdAAEY29tcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHNxAH4ABHVxAH4ABwAAAAF\ + zcgAxY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLlN0b3JhZ2UkQmxvYlRhcmdldE9wdGlvbrLpPC5jbC91AgAAeHIAH2NvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5PcH\ + Rpb26Wztg8QRDfQAIAAHhyAC9jb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuVW5pZmllZE9wdHMkT3B0aW9uU2hpbS9VbRFY0ia8AgABTAADb3B0dAAqTGNvbS9nb29nb\ + GUvY2xvdWQvc3RvcmFnZS9VbmlmaWVkT3B0cyRPcHQ7eHBzcgA0Y29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLlVuaWZpZWRPcHRzJEdlbmVyYXRpb25NYXRjaCS2w9uv\ + Zq0PAgAAeHIALmNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5VbmlmaWVkT3B0cyRScGNPcHRWYWx/Q2Qd8xl6FAIAAkwAA2tleXQAM0xjb20vZ29vZ2xlL2Nsb3VkL3N\ + 0b3JhZ2Uvc3BpL3YxL1N0b3JhZ2VScGMkT3B0aW9uO0wAA3ZhbHQAEkxqYXZhL2xhbmcvT2JqZWN0O3hwfnIAMWNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5zcGkudj\ + EuU3RvcmFnZVJwYyRPcHRpb24AAAAAAAAAABIAAHhyAA5qYXZhLmxhbmcuRW51bQAAAAAAAAAAEgAAeHB0ABNJRl9HRU5FUkFUSU9OX01BVENIc3EAfgANAAAAAAAAA\ + AA= diff --git a/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor/golden/OUT_OF_RANGE.txt b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor/golden/OUT_OF_RANGE.txt new file mode 100644 index 000000000000..e4efe7aa9601 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor/golden/OUT_OF_RANGE.txt @@ -0,0 +1,25 @@ +<<< status = { + code[4]=OUT_OF_RANGE +}, +trailers = { + grpc-status-details-bin[291]: google.rpc.Status{ + details { + type_url: type.googleapis.com/google.storage.v2.BidiReadObjectError + value: { + read_range_errors { + read_id: 3 + status { + code: 11 + } + } + } + } + details { + type_url: type.googleapis.com/google.rpc.DebugInfo + value: { + stack_entries: "read_object_spec { bucket: \"projects/_/buckets/b\" object: \"o\" generation: 1 } read_ranges { read_offset: 39 read_id: 3 }" + detail: "OUT_OF_RANGE read_offset = 39" + } + } + } +} \ No newline at end of file diff --git a/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/runner/registry/Dockerfile b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/runner/registry/Dockerfile new file mode 100644 index 000000000000..dd1eaa38d2c7 --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/com/google/cloud/storage/it/runner/registry/Dockerfile @@ -0,0 +1 @@ +FROM gcr.io/cloud-devrel-public-resources/storage-testbench:v0.60.0 diff --git a/java-storage/google-cloud-storage/src/test/resources/logback.xml b/java-storage/google-cloud-storage/src/test/resources/logback.xml new file mode 100644 index 000000000000..779e3112d3bf --- /dev/null +++ b/java-storage/google-cloud-storage/src/test/resources/logback.xml @@ -0,0 +1,95 @@ + + + + + true + + + + %date %-5.5level [%-24.24thread] %-45.45logger{45} - %message%n + + + + + %date %-5.5level [%-24.24thread] %-45.45logger{45} - %message%nopex%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/java-storage/grpc-google-cloud-storage-control-v2/clirr-ignored-differences.xml b/java-storage/grpc-google-cloud-storage-control-v2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..a38fb38be169 --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-control-v2/clirr-ignored-differences.xml @@ -0,0 +1,27 @@ + + + + + + + + 7012 + com/google/storage/control/v2/StorageControlGrpc$AsyncService + * *(*) + + diff --git a/java-storage/grpc-google-cloud-storage-control-v2/pom.xml b/java-storage/grpc-google-cloud-storage-control-v2/pom.xml new file mode 100644 index 000000000000..4195eb738f69 --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-control-v2/pom.xml @@ -0,0 +1,73 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + grpc-google-cloud-storage-control-v2 + GRPC library for google-cloud-storage + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-cloud-storage-control-v2 + + + com.google.guava + guava + + + com.google.api.grpc + proto-google-iam-v1 + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-storage/grpc-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlGrpc.java b/java-storage/grpc-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlGrpc.java new file mode 100644 index 000000000000..c584ce15f72b --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlGrpc.java @@ -0,0 +1,4000 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.storage.control.v2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *

+ * StorageControl service includes selected control plane operations.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class StorageControlGrpc { + + private StorageControlGrpc() {} + + public static final java.lang.String SERVICE_NAME = "google.storage.control.v2.StorageControl"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateFolderRequest, com.google.storage.control.v2.Folder> + getCreateFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateFolder", + requestType = com.google.storage.control.v2.CreateFolderRequest.class, + responseType = com.google.storage.control.v2.Folder.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateFolderRequest, com.google.storage.control.v2.Folder> + getCreateFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateFolderRequest, com.google.storage.control.v2.Folder> + getCreateFolderMethod; + if ((getCreateFolderMethod = StorageControlGrpc.getCreateFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getCreateFolderMethod = StorageControlGrpc.getCreateFolderMethod) == null) { + StorageControlGrpc.getCreateFolderMethod = + getCreateFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.CreateFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.Folder.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("CreateFolder")) + .build(); + } + } + } + return getCreateFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRequest, com.google.protobuf.Empty> + getDeleteFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteFolder", + requestType = com.google.storage.control.v2.DeleteFolderRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRequest, com.google.protobuf.Empty> + getDeleteFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRequest, com.google.protobuf.Empty> + getDeleteFolderMethod; + if ((getDeleteFolderMethod = StorageControlGrpc.getDeleteFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getDeleteFolderMethod = StorageControlGrpc.getDeleteFolderMethod) == null) { + StorageControlGrpc.getDeleteFolderMethod = + getDeleteFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.DeleteFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("DeleteFolder")) + .build(); + } + } + } + return getDeleteFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderRequest, com.google.storage.control.v2.Folder> + getGetFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetFolder", + requestType = com.google.storage.control.v2.GetFolderRequest.class, + responseType = com.google.storage.control.v2.Folder.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderRequest, com.google.storage.control.v2.Folder> + getGetFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderRequest, com.google.storage.control.v2.Folder> + getGetFolderMethod; + if ((getGetFolderMethod = StorageControlGrpc.getGetFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetFolderMethod = StorageControlGrpc.getGetFolderMethod) == null) { + StorageControlGrpc.getGetFolderMethod = + getGetFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetFolderRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.Folder.getDefaultInstance())) + .setSchemaDescriptor(new StorageControlMethodDescriptorSupplier("GetFolder")) + .build(); + } + } + } + return getGetFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListFoldersRequest, + com.google.storage.control.v2.ListFoldersResponse> + getListFoldersMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListFolders", + requestType = com.google.storage.control.v2.ListFoldersRequest.class, + responseType = com.google.storage.control.v2.ListFoldersResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListFoldersRequest, + com.google.storage.control.v2.ListFoldersResponse> + getListFoldersMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListFoldersRequest, + com.google.storage.control.v2.ListFoldersResponse> + getListFoldersMethod; + if ((getListFoldersMethod = StorageControlGrpc.getListFoldersMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getListFoldersMethod = StorageControlGrpc.getListFoldersMethod) == null) { + StorageControlGrpc.getListFoldersMethod = + getListFoldersMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListFolders")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListFoldersRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListFoldersResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("ListFolders")) + .build(); + } + } + } + return getListFoldersMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.RenameFolderRequest, com.google.longrunning.Operation> + getRenameFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "RenameFolder", + requestType = com.google.storage.control.v2.RenameFolderRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.RenameFolderRequest, com.google.longrunning.Operation> + getRenameFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.RenameFolderRequest, com.google.longrunning.Operation> + getRenameFolderMethod; + if ((getRenameFolderMethod = StorageControlGrpc.getRenameFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getRenameFolderMethod = StorageControlGrpc.getRenameFolderMethod) == null) { + StorageControlGrpc.getRenameFolderMethod = + getRenameFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "RenameFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.RenameFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("RenameFolder")) + .build(); + } + } + } + return getRenameFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRecursiveRequest, + com.google.longrunning.Operation> + getDeleteFolderRecursiveMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteFolderRecursive", + requestType = com.google.storage.control.v2.DeleteFolderRecursiveRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRecursiveRequest, + com.google.longrunning.Operation> + getDeleteFolderRecursiveMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteFolderRecursiveRequest, + com.google.longrunning.Operation> + getDeleteFolderRecursiveMethod; + if ((getDeleteFolderRecursiveMethod = StorageControlGrpc.getDeleteFolderRecursiveMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getDeleteFolderRecursiveMethod = StorageControlGrpc.getDeleteFolderRecursiveMethod) + == null) { + StorageControlGrpc.getDeleteFolderRecursiveMethod = + getDeleteFolderRecursiveMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteFolderRecursive")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.DeleteFolderRecursiveRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("DeleteFolderRecursive")) + .build(); + } + } + } + return getDeleteFolderRecursiveMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetStorageLayoutRequest, + com.google.storage.control.v2.StorageLayout> + getGetStorageLayoutMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetStorageLayout", + requestType = com.google.storage.control.v2.GetStorageLayoutRequest.class, + responseType = com.google.storage.control.v2.StorageLayout.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetStorageLayoutRequest, + com.google.storage.control.v2.StorageLayout> + getGetStorageLayoutMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetStorageLayoutRequest, + com.google.storage.control.v2.StorageLayout> + getGetStorageLayoutMethod; + if ((getGetStorageLayoutMethod = StorageControlGrpc.getGetStorageLayoutMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetStorageLayoutMethod = StorageControlGrpc.getGetStorageLayoutMethod) == null) { + StorageControlGrpc.getGetStorageLayoutMethod = + getGetStorageLayoutMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetStorageLayout")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetStorageLayoutRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.StorageLayout.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("GetStorageLayout")) + .build(); + } + } + } + return getGetStorageLayoutMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getCreateManagedFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateManagedFolder", + requestType = com.google.storage.control.v2.CreateManagedFolderRequest.class, + responseType = com.google.storage.control.v2.ManagedFolder.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getCreateManagedFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getCreateManagedFolderMethod; + if ((getCreateManagedFolderMethod = StorageControlGrpc.getCreateManagedFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getCreateManagedFolderMethod = StorageControlGrpc.getCreateManagedFolderMethod) + == null) { + StorageControlGrpc.getCreateManagedFolderMethod = + getCreateManagedFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateManagedFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.CreateManagedFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ManagedFolder.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("CreateManagedFolder")) + .build(); + } + } + } + return getCreateManagedFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteManagedFolderRequest, com.google.protobuf.Empty> + getDeleteManagedFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteManagedFolder", + requestType = com.google.storage.control.v2.DeleteManagedFolderRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteManagedFolderRequest, com.google.protobuf.Empty> + getDeleteManagedFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.DeleteManagedFolderRequest, com.google.protobuf.Empty> + getDeleteManagedFolderMethod; + if ((getDeleteManagedFolderMethod = StorageControlGrpc.getDeleteManagedFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getDeleteManagedFolderMethod = StorageControlGrpc.getDeleteManagedFolderMethod) + == null) { + StorageControlGrpc.getDeleteManagedFolderMethod = + getDeleteManagedFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteManagedFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.DeleteManagedFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("DeleteManagedFolder")) + .build(); + } + } + } + return getDeleteManagedFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getGetManagedFolderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetManagedFolder", + requestType = com.google.storage.control.v2.GetManagedFolderRequest.class, + responseType = com.google.storage.control.v2.ManagedFolder.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getGetManagedFolderMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder> + getGetManagedFolderMethod; + if ((getGetManagedFolderMethod = StorageControlGrpc.getGetManagedFolderMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetManagedFolderMethod = StorageControlGrpc.getGetManagedFolderMethod) == null) { + StorageControlGrpc.getGetManagedFolderMethod = + getGetManagedFolderMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetManagedFolder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetManagedFolderRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ManagedFolder.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("GetManagedFolder")) + .build(); + } + } + } + return getGetManagedFolderMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListManagedFoldersRequest, + com.google.storage.control.v2.ListManagedFoldersResponse> + getListManagedFoldersMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListManagedFolders", + requestType = com.google.storage.control.v2.ListManagedFoldersRequest.class, + responseType = com.google.storage.control.v2.ListManagedFoldersResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListManagedFoldersRequest, + com.google.storage.control.v2.ListManagedFoldersResponse> + getListManagedFoldersMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListManagedFoldersRequest, + com.google.storage.control.v2.ListManagedFoldersResponse> + getListManagedFoldersMethod; + if ((getListManagedFoldersMethod = StorageControlGrpc.getListManagedFoldersMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getListManagedFoldersMethod = StorageControlGrpc.getListManagedFoldersMethod) + == null) { + StorageControlGrpc.getListManagedFoldersMethod = + getListManagedFoldersMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListManagedFolders")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListManagedFoldersRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListManagedFoldersResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("ListManagedFolders")) + .build(); + } + } + } + return getListManagedFoldersMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateAnywhereCacheRequest, + com.google.longrunning.Operation> + getCreateAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateAnywhereCache", + requestType = com.google.storage.control.v2.CreateAnywhereCacheRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateAnywhereCacheRequest, + com.google.longrunning.Operation> + getCreateAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.CreateAnywhereCacheRequest, + com.google.longrunning.Operation> + getCreateAnywhereCacheMethod; + if ((getCreateAnywhereCacheMethod = StorageControlGrpc.getCreateAnywhereCacheMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getCreateAnywhereCacheMethod = StorageControlGrpc.getCreateAnywhereCacheMethod) + == null) { + StorageControlGrpc.getCreateAnywhereCacheMethod = + getCreateAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.CreateAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("CreateAnywhereCache")) + .build(); + } + } + } + return getCreateAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateAnywhereCacheRequest, + com.google.longrunning.Operation> + getUpdateAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateAnywhereCache", + requestType = com.google.storage.control.v2.UpdateAnywhereCacheRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateAnywhereCacheRequest, + com.google.longrunning.Operation> + getUpdateAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateAnywhereCacheRequest, + com.google.longrunning.Operation> + getUpdateAnywhereCacheMethod; + if ((getUpdateAnywhereCacheMethod = StorageControlGrpc.getUpdateAnywhereCacheMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getUpdateAnywhereCacheMethod = StorageControlGrpc.getUpdateAnywhereCacheMethod) + == null) { + StorageControlGrpc.getUpdateAnywhereCacheMethod = + getUpdateAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.UpdateAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("UpdateAnywhereCache")) + .build(); + } + } + } + return getUpdateAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.DisableAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getDisableAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DisableAnywhereCache", + requestType = com.google.storage.control.v2.DisableAnywhereCacheRequest.class, + responseType = com.google.storage.control.v2.AnywhereCache.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.DisableAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getDisableAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.DisableAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getDisableAnywhereCacheMethod; + if ((getDisableAnywhereCacheMethod = StorageControlGrpc.getDisableAnywhereCacheMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getDisableAnywhereCacheMethod = StorageControlGrpc.getDisableAnywhereCacheMethod) + == null) { + StorageControlGrpc.getDisableAnywhereCacheMethod = + getDisableAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DisableAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.DisableAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.AnywhereCache.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("DisableAnywhereCache")) + .build(); + } + } + } + return getDisableAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.PauseAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getPauseAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PauseAnywhereCache", + requestType = com.google.storage.control.v2.PauseAnywhereCacheRequest.class, + responseType = com.google.storage.control.v2.AnywhereCache.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.PauseAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getPauseAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.PauseAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getPauseAnywhereCacheMethod; + if ((getPauseAnywhereCacheMethod = StorageControlGrpc.getPauseAnywhereCacheMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getPauseAnywhereCacheMethod = StorageControlGrpc.getPauseAnywhereCacheMethod) + == null) { + StorageControlGrpc.getPauseAnywhereCacheMethod = + getPauseAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PauseAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.PauseAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.AnywhereCache.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("PauseAnywhereCache")) + .build(); + } + } + } + return getPauseAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.ResumeAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getResumeAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ResumeAnywhereCache", + requestType = com.google.storage.control.v2.ResumeAnywhereCacheRequest.class, + responseType = com.google.storage.control.v2.AnywhereCache.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.ResumeAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getResumeAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.ResumeAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getResumeAnywhereCacheMethod; + if ((getResumeAnywhereCacheMethod = StorageControlGrpc.getResumeAnywhereCacheMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getResumeAnywhereCacheMethod = StorageControlGrpc.getResumeAnywhereCacheMethod) + == null) { + StorageControlGrpc.getResumeAnywhereCacheMethod = + getResumeAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ResumeAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ResumeAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.AnywhereCache.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("ResumeAnywhereCache")) + .build(); + } + } + } + return getResumeAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getGetAnywhereCacheMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetAnywhereCache", + requestType = com.google.storage.control.v2.GetAnywhereCacheRequest.class, + responseType = com.google.storage.control.v2.AnywhereCache.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getGetAnywhereCacheMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache> + getGetAnywhereCacheMethod; + if ((getGetAnywhereCacheMethod = StorageControlGrpc.getGetAnywhereCacheMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetAnywhereCacheMethod = StorageControlGrpc.getGetAnywhereCacheMethod) == null) { + StorageControlGrpc.getGetAnywhereCacheMethod = + getGetAnywhereCacheMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetAnywhereCache")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetAnywhereCacheRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.AnywhereCache.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("GetAnywhereCache")) + .build(); + } + } + } + return getGetAnywhereCacheMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListAnywhereCachesRequest, + com.google.storage.control.v2.ListAnywhereCachesResponse> + getListAnywhereCachesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListAnywhereCaches", + requestType = com.google.storage.control.v2.ListAnywhereCachesRequest.class, + responseType = com.google.storage.control.v2.ListAnywhereCachesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListAnywhereCachesRequest, + com.google.storage.control.v2.ListAnywhereCachesResponse> + getListAnywhereCachesMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.ListAnywhereCachesRequest, + com.google.storage.control.v2.ListAnywhereCachesResponse> + getListAnywhereCachesMethod; + if ((getListAnywhereCachesMethod = StorageControlGrpc.getListAnywhereCachesMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getListAnywhereCachesMethod = StorageControlGrpc.getListAnywhereCachesMethod) + == null) { + StorageControlGrpc.getListAnywhereCachesMethod = + getListAnywhereCachesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListAnywhereCaches")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListAnywhereCachesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.ListAnywhereCachesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("ListAnywhereCaches")) + .build(); + } + } + } + return getListAnywhereCachesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetProjectIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetProjectIntelligenceConfig", + requestType = com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetProjectIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetProjectIntelligenceConfigMethod; + if ((getGetProjectIntelligenceConfigMethod = + StorageControlGrpc.getGetProjectIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetProjectIntelligenceConfigMethod = + StorageControlGrpc.getGetProjectIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getGetProjectIntelligenceConfigMethod = + getGetProjectIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "GetProjectIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier( + "GetProjectIntelligenceConfig")) + .build(); + } + } + } + return getGetProjectIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateProjectIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateProjectIntelligenceConfig", + requestType = com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateProjectIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateProjectIntelligenceConfigMethod; + if ((getUpdateProjectIntelligenceConfigMethod = + StorageControlGrpc.getUpdateProjectIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getUpdateProjectIntelligenceConfigMethod = + StorageControlGrpc.getUpdateProjectIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getUpdateProjectIntelligenceConfigMethod = + getUpdateProjectIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateProjectIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier( + "UpdateProjectIntelligenceConfig")) + .build(); + } + } + } + return getUpdateProjectIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetFolderIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetFolderIntelligenceConfig", + requestType = com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetFolderIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetFolderIntelligenceConfigMethod; + if ((getGetFolderIntelligenceConfigMethod = + StorageControlGrpc.getGetFolderIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetFolderIntelligenceConfigMethod = + StorageControlGrpc.getGetFolderIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getGetFolderIntelligenceConfigMethod = + getGetFolderIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "GetFolderIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("GetFolderIntelligenceConfig")) + .build(); + } + } + } + return getGetFolderIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateFolderIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateFolderIntelligenceConfig", + requestType = com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateFolderIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateFolderIntelligenceConfigMethod; + if ((getUpdateFolderIntelligenceConfigMethod = + StorageControlGrpc.getUpdateFolderIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getUpdateFolderIntelligenceConfigMethod = + StorageControlGrpc.getUpdateFolderIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getUpdateFolderIntelligenceConfigMethod = + getUpdateFolderIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateFolderIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier( + "UpdateFolderIntelligenceConfig")) + .build(); + } + } + } + return getUpdateFolderIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetOrganizationIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetOrganizationIntelligenceConfig", + requestType = com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetOrganizationIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getGetOrganizationIntelligenceConfigMethod; + if ((getGetOrganizationIntelligenceConfigMethod = + StorageControlGrpc.getGetOrganizationIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetOrganizationIntelligenceConfigMethod = + StorageControlGrpc.getGetOrganizationIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getGetOrganizationIntelligenceConfigMethod = + getGetOrganizationIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "GetOrganizationIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier( + "GetOrganizationIntelligenceConfig")) + .build(); + } + } + } + return getGetOrganizationIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateOrganizationIntelligenceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateOrganizationIntelligenceConfig", + requestType = com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.class, + responseType = com.google.storage.control.v2.IntelligenceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateOrganizationIntelligenceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig> + getUpdateOrganizationIntelligenceConfigMethod; + if ((getUpdateOrganizationIntelligenceConfigMethod = + StorageControlGrpc.getUpdateOrganizationIntelligenceConfigMethod) + == null) { + synchronized (StorageControlGrpc.class) { + if ((getUpdateOrganizationIntelligenceConfigMethod = + StorageControlGrpc.getUpdateOrganizationIntelligenceConfigMethod) + == null) { + StorageControlGrpc.getUpdateOrganizationIntelligenceConfigMethod = + getUpdateOrganizationIntelligenceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName( + SERVICE_NAME, "UpdateOrganizationIntelligenceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2 + .UpdateOrganizationIntelligenceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.control.v2.IntelligenceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier( + "UpdateOrganizationIntelligenceConfig")) + .build(); + } + } + } + return getUpdateOrganizationIntelligenceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetIamPolicy", + requestType = com.google.iam.v1.GetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod() { + io.grpc.MethodDescriptor + getGetIamPolicyMethod; + if ((getGetIamPolicyMethod = StorageControlGrpc.getGetIamPolicyMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getGetIamPolicyMethod = StorageControlGrpc.getGetIamPolicyMethod) == null) { + StorageControlGrpc.getGetIamPolicyMethod = + getGetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("GetIamPolicy")) + .build(); + } + } + } + return getGetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SetIamPolicy", + requestType = com.google.iam.v1.SetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod() { + io.grpc.MethodDescriptor + getSetIamPolicyMethod; + if ((getSetIamPolicyMethod = StorageControlGrpc.getSetIamPolicyMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getSetIamPolicyMethod = StorageControlGrpc.getSetIamPolicyMethod) == null) { + StorageControlGrpc.getSetIamPolicyMethod = + getSetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("SetIamPolicy")) + .build(); + } + } + } + return getSetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "TestIamPermissions", + requestType = com.google.iam.v1.TestIamPermissionsRequest.class, + responseType = com.google.iam.v1.TestIamPermissionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod() { + io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + if ((getTestIamPermissionsMethod = StorageControlGrpc.getTestIamPermissionsMethod) == null) { + synchronized (StorageControlGrpc.class) { + if ((getTestIamPermissionsMethod = StorageControlGrpc.getTestIamPermissionsMethod) + == null) { + StorageControlGrpc.getTestIamPermissionsMethod = + getTestIamPermissionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "TestIamPermissions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new StorageControlMethodDescriptorSupplier("TestIamPermissions")) + .build(); + } + } + } + return getTestIamPermissionsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static StorageControlStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageControlStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlStub(channel, callOptions); + } + }; + return StorageControlStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static StorageControlBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageControlBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlBlockingV2Stub(channel, callOptions); + } + }; + return StorageControlBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static StorageControlBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageControlBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlBlockingStub(channel, callOptions); + } + }; + return StorageControlBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static StorageControlFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageControlFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlFutureStub(channel, callOptions); + } + }; + return StorageControlFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a new folder. This operation is only applicable to a hierarchical
+     * namespace enabled bucket.
+     * 
+ */ + default void createFolder( + com.google.storage.control.v2.CreateFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Permanently deletes an empty folder. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + default void deleteFolder( + com.google.storage.control.v2.DeleteFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket.
+     * 
+ */ + default void getFolder( + com.google.storage.control.v2.GetFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of folders. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + default void listFolders( + com.google.storage.control.v2.ListFoldersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListFoldersMethod(), responseObserver); + } + + /** + * + * + *
+     * Renames a source folder to a destination folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket. During a rename, the
+     * source and destination folders are locked until the long running operation
+     * completes.
+     * 
+ */ + default void renameFolder( + com.google.storage.control.v2.RenameFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getRenameFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes a folder recursively. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + default void deleteFolderRecursive( + com.google.storage.control.v2.DeleteFolderRecursiveRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteFolderRecursiveMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the storage layout configuration for a given bucket.
+     * 
+ */ + default void getStorageLayout( + com.google.storage.control.v2.GetStorageLayoutRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetStorageLayoutMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new managed folder.
+     * 
+ */ + default void createManagedFolder( + com.google.storage.control.v2.CreateManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateManagedFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Permanently deletes an empty managed folder.
+     * 
+ */ + default void deleteManagedFolder( + com.google.storage.control.v2.DeleteManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteManagedFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified managed folder.
+     * 
+ */ + default void getManagedFolder( + com.google.storage.control.v2.GetManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetManagedFolderMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of managed folders for a given bucket.
+     * 
+ */ + default void listManagedFolders( + com.google.storage.control.v2.ListManagedFoldersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListManagedFoldersMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates an Anywhere Cache instance.
+     * 
+ */ + default void createAnywhereCache( + com.google.storage.control.v2.CreateAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an Anywhere Cache instance. Mutable fields include `ttl` and
+     * `admission_policy`.
+     * 
+ */ + default void updateAnywhereCache( + com.google.storage.control.v2.UpdateAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Disables an Anywhere Cache instance. A disabled instance is read-only. The
+     * disablement could be revoked by calling ResumeAnywhereCache. The cache
+     * instance will be deleted automatically if it remains in the disabled state
+     * for at least one hour.
+     * 
+ */ + default void disableAnywhereCache( + com.google.storage.control.v2.DisableAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDisableAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Pauses an Anywhere Cache instance.
+     * 
+ */ + default void pauseAnywhereCache( + com.google.storage.control.v2.PauseAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getPauseAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Resumes a disabled or paused Anywhere Cache instance.
+     * 
+ */ + default void resumeAnywhereCache( + com.google.storage.control.v2.ResumeAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getResumeAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets an Anywhere Cache instance.
+     * 
+ */ + default void getAnywhereCache( + com.google.storage.control.v2.GetAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetAnywhereCacheMethod(), responseObserver); + } + + /** + * + * + *
+     * Lists Anywhere Cache instances for a given bucket.
+     * 
+ */ + default void listAnywhereCaches( + com.google.storage.control.v2.ListAnywhereCachesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListAnywhereCachesMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void getProjectIntelligenceConfig( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetProjectIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void updateProjectIntelligenceConfig( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateProjectIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void getFolderIntelligenceConfig( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetFolderIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void updateFolderIntelligenceConfig( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateFolderIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void getOrganizationIntelligenceConfig( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetOrganizationIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + default void updateOrganizationIntelligenceConfig( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateOrganizationIntelligenceConfigMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + default void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + default void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + default void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getTestIamPermissionsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service StorageControl. + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public abstract static class StorageControlImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return StorageControlGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service StorageControl. + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public static final class StorageControlStub + extends io.grpc.stub.AbstractAsyncStub { + private StorageControlStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageControlStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new folder. This operation is only applicable to a hierarchical
+     * namespace enabled bucket.
+     * 
+ */ + public void createFolder( + com.google.storage.control.v2.CreateFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Permanently deletes an empty folder. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public void deleteFolder( + com.google.storage.control.v2.DeleteFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket.
+     * 
+ */ + public void getFolder( + com.google.storage.control.v2.GetFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetFolderMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of folders. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public void listFolders( + com.google.storage.control.v2.ListFoldersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListFoldersMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Renames a source folder to a destination folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket. During a rename, the
+     * source and destination folders are locked until the long running operation
+     * completes.
+     * 
+ */ + public void renameFolder( + com.google.storage.control.v2.RenameFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRenameFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes a folder recursively. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public void deleteFolderRecursive( + com.google.storage.control.v2.DeleteFolderRecursiveRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteFolderRecursiveMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the storage layout configuration for a given bucket.
+     * 
+ */ + public void getStorageLayout( + com.google.storage.control.v2.GetStorageLayoutRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetStorageLayoutMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates a new managed folder.
+     * 
+ */ + public void createManagedFolder( + com.google.storage.control.v2.CreateManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateManagedFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Permanently deletes an empty managed folder.
+     * 
+ */ + public void deleteManagedFolder( + com.google.storage.control.v2.DeleteManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteManagedFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified managed folder.
+     * 
+ */ + public void getManagedFolder( + com.google.storage.control.v2.GetManagedFolderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetManagedFolderMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of managed folders for a given bucket.
+     * 
+ */ + public void listManagedFolders( + com.google.storage.control.v2.ListManagedFoldersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListManagedFoldersMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates an Anywhere Cache instance.
+     * 
+ */ + public void createAnywhereCache( + com.google.storage.control.v2.CreateAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an Anywhere Cache instance. Mutable fields include `ttl` and
+     * `admission_policy`.
+     * 
+ */ + public void updateAnywhereCache( + com.google.storage.control.v2.UpdateAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Disables an Anywhere Cache instance. A disabled instance is read-only. The
+     * disablement could be revoked by calling ResumeAnywhereCache. The cache
+     * instance will be deleted automatically if it remains in the disabled state
+     * for at least one hour.
+     * 
+ */ + public void disableAnywhereCache( + com.google.storage.control.v2.DisableAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDisableAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Pauses an Anywhere Cache instance.
+     * 
+ */ + public void pauseAnywhereCache( + com.google.storage.control.v2.PauseAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPauseAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Resumes a disabled or paused Anywhere Cache instance.
+     * 
+ */ + public void resumeAnywhereCache( + com.google.storage.control.v2.ResumeAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getResumeAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets an Anywhere Cache instance.
+     * 
+ */ + public void getAnywhereCache( + com.google.storage.control.v2.GetAnywhereCacheRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetAnywhereCacheMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Lists Anywhere Cache instances for a given bucket.
+     * 
+ */ + public void listAnywhereCaches( + com.google.storage.control.v2.ListAnywhereCachesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListAnywhereCachesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void getProjectIntelligenceConfig( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetProjectIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void updateProjectIntelligenceConfig( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateProjectIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void getFolderIntelligenceConfig( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetFolderIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void updateFolderIntelligenceConfig( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateFolderIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void getOrganizationIntelligenceConfig( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetOrganizationIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public void updateOrganizationIntelligenceConfig( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateOrganizationIntelligenceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service StorageControl. + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public static final class StorageControlBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private StorageControlBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageControlBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new folder. This operation is only applicable to a hierarchical
+     * namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.Folder createFolder( + com.google.storage.control.v2.CreateFolderRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently deletes an empty folder. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteFolder( + com.google.storage.control.v2.DeleteFolderRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.Folder getFolder( + com.google.storage.control.v2.GetFolderRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of folders. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListFoldersResponse listFolders( + com.google.storage.control.v2.ListFoldersRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListFoldersMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Renames a source folder to a destination folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket. During a rename, the
+     * source and destination folders are locked until the long running operation
+     * completes.
+     * 
+ */ + public com.google.longrunning.Operation renameFolder( + com.google.storage.control.v2.RenameFolderRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getRenameFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes a folder recursively. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.longrunning.Operation deleteFolderRecursive( + com.google.storage.control.v2.DeleteFolderRecursiveRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteFolderRecursiveMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the storage layout configuration for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.StorageLayout getStorageLayout( + com.google.storage.control.v2.GetStorageLayoutRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetStorageLayoutMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new managed folder.
+     * 
+ */ + public com.google.storage.control.v2.ManagedFolder createManagedFolder( + com.google.storage.control.v2.CreateManagedFolderRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently deletes an empty managed folder.
+     * 
+ */ + public com.google.protobuf.Empty deleteManagedFolder( + com.google.storage.control.v2.DeleteManagedFolderRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified managed folder.
+     * 
+ */ + public com.google.storage.control.v2.ManagedFolder getManagedFolder( + com.google.storage.control.v2.GetManagedFolderRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of managed folders for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListManagedFoldersResponse listManagedFolders( + com.google.storage.control.v2.ListManagedFoldersRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListManagedFoldersMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates an Anywhere Cache instance.
+     * 
+ */ + public com.google.longrunning.Operation createAnywhereCache( + com.google.storage.control.v2.CreateAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an Anywhere Cache instance. Mutable fields include `ttl` and
+     * `admission_policy`.
+     * 
+ */ + public com.google.longrunning.Operation updateAnywhereCache( + com.google.storage.control.v2.UpdateAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Disables an Anywhere Cache instance. A disabled instance is read-only. The
+     * disablement could be revoked by calling ResumeAnywhereCache. The cache
+     * instance will be deleted automatically if it remains in the disabled state
+     * for at least one hour.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache disableAnywhereCache( + com.google.storage.control.v2.DisableAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDisableAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Pauses an Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache pauseAnywhereCache( + com.google.storage.control.v2.PauseAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getPauseAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Resumes a disabled or paused Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache resumeAnywhereCache( + com.google.storage.control.v2.ResumeAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getResumeAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets an Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache getAnywhereCache( + com.google.storage.control.v2.GetAnywhereCacheRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Lists Anywhere Cache instances for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListAnywhereCachesResponse listAnywhereCaches( + com.google.storage.control.v2.ListAnywhereCachesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListAnywhereCachesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getProjectIntelligenceConfig( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetProjectIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateProjectIntelligenceConfig( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateProjectIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getFolderIntelligenceConfig( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetFolderIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateFolderIntelligenceConfig( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateFolderIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getOrganizationIntelligenceConfig( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetOrganizationIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateOrganizationIntelligenceConfig( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateOrganizationIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service StorageControl. + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public static final class StorageControlBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private StorageControlBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageControlBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new folder. This operation is only applicable to a hierarchical
+     * namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.Folder createFolder( + com.google.storage.control.v2.CreateFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently deletes an empty folder. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteFolder( + com.google.storage.control.v2.DeleteFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.Folder getFolder( + com.google.storage.control.v2.GetFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of folders. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListFoldersResponse listFolders( + com.google.storage.control.v2.ListFoldersRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListFoldersMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Renames a source folder to a destination folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket. During a rename, the
+     * source and destination folders are locked until the long running operation
+     * completes.
+     * 
+ */ + public com.google.longrunning.Operation renameFolder( + com.google.storage.control.v2.RenameFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRenameFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes a folder recursively. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.longrunning.Operation deleteFolderRecursive( + com.google.storage.control.v2.DeleteFolderRecursiveRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteFolderRecursiveMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the storage layout configuration for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.StorageLayout getStorageLayout( + com.google.storage.control.v2.GetStorageLayoutRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetStorageLayoutMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new managed folder.
+     * 
+ */ + public com.google.storage.control.v2.ManagedFolder createManagedFolder( + com.google.storage.control.v2.CreateManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently deletes an empty managed folder.
+     * 
+ */ + public com.google.protobuf.Empty deleteManagedFolder( + com.google.storage.control.v2.DeleteManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified managed folder.
+     * 
+ */ + public com.google.storage.control.v2.ManagedFolder getManagedFolder( + com.google.storage.control.v2.GetManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetManagedFolderMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of managed folders for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListManagedFoldersResponse listManagedFolders( + com.google.storage.control.v2.ListManagedFoldersRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListManagedFoldersMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates an Anywhere Cache instance.
+     * 
+ */ + public com.google.longrunning.Operation createAnywhereCache( + com.google.storage.control.v2.CreateAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an Anywhere Cache instance. Mutable fields include `ttl` and
+     * `admission_policy`.
+     * 
+ */ + public com.google.longrunning.Operation updateAnywhereCache( + com.google.storage.control.v2.UpdateAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Disables an Anywhere Cache instance. A disabled instance is read-only. The
+     * disablement could be revoked by calling ResumeAnywhereCache. The cache
+     * instance will be deleted automatically if it remains in the disabled state
+     * for at least one hour.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache disableAnywhereCache( + com.google.storage.control.v2.DisableAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDisableAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Pauses an Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache pauseAnywhereCache( + com.google.storage.control.v2.PauseAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPauseAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Resumes a disabled or paused Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache resumeAnywhereCache( + com.google.storage.control.v2.ResumeAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getResumeAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets an Anywhere Cache instance.
+     * 
+ */ + public com.google.storage.control.v2.AnywhereCache getAnywhereCache( + com.google.storage.control.v2.GetAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetAnywhereCacheMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Lists Anywhere Cache instances for a given bucket.
+     * 
+ */ + public com.google.storage.control.v2.ListAnywhereCachesResponse listAnywhereCaches( + com.google.storage.control.v2.ListAnywhereCachesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListAnywhereCachesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getProjectIntelligenceConfig( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetProjectIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateProjectIntelligenceConfig( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateProjectIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getFolderIntelligenceConfig( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetFolderIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateFolderIntelligenceConfig( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateFolderIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig getOrganizationIntelligenceConfig( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetOrganizationIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.storage.control.v2.IntelligenceConfig updateOrganizationIntelligenceConfig( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateOrganizationIntelligenceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service StorageControl. + * + *
+   * StorageControl service includes selected control plane operations.
+   * 
+ */ + public static final class StorageControlFutureStub + extends io.grpc.stub.AbstractFutureStub { + private StorageControlFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageControlFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageControlFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new folder. This operation is only applicable to a hierarchical
+     * namespace enabled bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + createFolder(com.google.storage.control.v2.CreateFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Permanently deletes an empty folder. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteFolder(com.google.storage.control.v2.DeleteFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns metadata for the specified folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getFolder(com.google.storage.control.v2.GetFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Retrieves a list of folders. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.ListFoldersResponse> + listFolders(com.google.storage.control.v2.ListFoldersRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListFoldersMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Renames a source folder to a destination folder. This operation is only
+     * applicable to a hierarchical namespace enabled bucket. During a rename, the
+     * source and destination folders are locked until the long running operation
+     * completes.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + renameFolder(com.google.storage.control.v2.RenameFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRenameFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes a folder recursively. This operation is only applicable to a
+     * hierarchical namespace enabled bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteFolderRecursive(com.google.storage.control.v2.DeleteFolderRecursiveRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteFolderRecursiveMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns the storage layout configuration for a given bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.StorageLayout> + getStorageLayout(com.google.storage.control.v2.GetStorageLayoutRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetStorageLayoutMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.ManagedFolder> + createManagedFolder(com.google.storage.control.v2.CreateManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateManagedFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Permanently deletes an empty managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteManagedFolder(com.google.storage.control.v2.DeleteManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteManagedFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns metadata for the specified managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.ManagedFolder> + getManagedFolder(com.google.storage.control.v2.GetManagedFolderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetManagedFolderMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Retrieves a list of managed folders for a given bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.ListManagedFoldersResponse> + listManagedFolders(com.google.storage.control.v2.ListManagedFoldersRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListManagedFoldersMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates an Anywhere Cache instance.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + createAnywhereCache(com.google.storage.control.v2.CreateAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an Anywhere Cache instance. Mutable fields include `ttl` and
+     * `admission_policy`.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + updateAnywhereCache(com.google.storage.control.v2.UpdateAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Disables an Anywhere Cache instance. A disabled instance is read-only. The
+     * disablement could be revoked by calling ResumeAnywhereCache. The cache
+     * instance will be deleted automatically if it remains in the disabled state
+     * for at least one hour.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.AnywhereCache> + disableAnywhereCache(com.google.storage.control.v2.DisableAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDisableAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Pauses an Anywhere Cache instance.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.AnywhereCache> + pauseAnywhereCache(com.google.storage.control.v2.PauseAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPauseAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Resumes a disabled or paused Anywhere Cache instance.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.AnywhereCache> + resumeAnywhereCache(com.google.storage.control.v2.ResumeAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getResumeAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets an Anywhere Cache instance.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.AnywhereCache> + getAnywhereCache(com.google.storage.control.v2.GetAnywhereCacheRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetAnywhereCacheMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Lists Anywhere Cache instances for a given bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.ListAnywhereCachesResponse> + listAnywhereCaches(com.google.storage.control.v2.ListAnywhereCachesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListAnywhereCachesMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + getProjectIntelligenceConfig( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetProjectIntelligenceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates the Project scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + updateProjectIntelligenceConfig( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateProjectIntelligenceConfigMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Returns the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + getFolderIntelligenceConfig( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetFolderIntelligenceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates the Folder scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + updateFolderIntelligenceConfig( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateFolderIntelligenceConfigMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Returns the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + getOrganizationIntelligenceConfig( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetOrganizationIntelligenceConfigMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Updates the Organization scoped singleton IntelligenceConfig resource.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.control.v2.IntelligenceConfig> + updateOrganizationIntelligenceConfig( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateOrganizationIntelligenceConfigMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.iam.v1.TestIamPermissionsResponse> + testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_FOLDER = 0; + private static final int METHODID_DELETE_FOLDER = 1; + private static final int METHODID_GET_FOLDER = 2; + private static final int METHODID_LIST_FOLDERS = 3; + private static final int METHODID_RENAME_FOLDER = 4; + private static final int METHODID_DELETE_FOLDER_RECURSIVE = 5; + private static final int METHODID_GET_STORAGE_LAYOUT = 6; + private static final int METHODID_CREATE_MANAGED_FOLDER = 7; + private static final int METHODID_DELETE_MANAGED_FOLDER = 8; + private static final int METHODID_GET_MANAGED_FOLDER = 9; + private static final int METHODID_LIST_MANAGED_FOLDERS = 10; + private static final int METHODID_CREATE_ANYWHERE_CACHE = 11; + private static final int METHODID_UPDATE_ANYWHERE_CACHE = 12; + private static final int METHODID_DISABLE_ANYWHERE_CACHE = 13; + private static final int METHODID_PAUSE_ANYWHERE_CACHE = 14; + private static final int METHODID_RESUME_ANYWHERE_CACHE = 15; + private static final int METHODID_GET_ANYWHERE_CACHE = 16; + private static final int METHODID_LIST_ANYWHERE_CACHES = 17; + private static final int METHODID_GET_PROJECT_INTELLIGENCE_CONFIG = 18; + private static final int METHODID_UPDATE_PROJECT_INTELLIGENCE_CONFIG = 19; + private static final int METHODID_GET_FOLDER_INTELLIGENCE_CONFIG = 20; + private static final int METHODID_UPDATE_FOLDER_INTELLIGENCE_CONFIG = 21; + private static final int METHODID_GET_ORGANIZATION_INTELLIGENCE_CONFIG = 22; + private static final int METHODID_UPDATE_ORGANIZATION_INTELLIGENCE_CONFIG = 23; + private static final int METHODID_GET_IAM_POLICY = 24; + private static final int METHODID_SET_IAM_POLICY = 25; + private static final int METHODID_TEST_IAM_PERMISSIONS = 26; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_FOLDER: + serviceImpl.createFolder( + (com.google.storage.control.v2.CreateFolderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_FOLDER: + serviceImpl.deleteFolder( + (com.google.storage.control.v2.DeleteFolderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_FOLDER: + serviceImpl.getFolder( + (com.google.storage.control.v2.GetFolderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_FOLDERS: + serviceImpl.listFolders( + (com.google.storage.control.v2.ListFoldersRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_RENAME_FOLDER: + serviceImpl.renameFolder( + (com.google.storage.control.v2.RenameFolderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_FOLDER_RECURSIVE: + serviceImpl.deleteFolderRecursive( + (com.google.storage.control.v2.DeleteFolderRecursiveRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_STORAGE_LAYOUT: + serviceImpl.getStorageLayout( + (com.google.storage.control.v2.GetStorageLayoutRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_MANAGED_FOLDER: + serviceImpl.createManagedFolder( + (com.google.storage.control.v2.CreateManagedFolderRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_MANAGED_FOLDER: + serviceImpl.deleteManagedFolder( + (com.google.storage.control.v2.DeleteManagedFolderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_MANAGED_FOLDER: + serviceImpl.getManagedFolder( + (com.google.storage.control.v2.GetManagedFolderRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LIST_MANAGED_FOLDERS: + serviceImpl.listManagedFolders( + (com.google.storage.control.v2.ListManagedFoldersRequest) request, + (io.grpc.stub.StreamObserver< + com.google.storage.control.v2.ListManagedFoldersResponse>) + responseObserver); + break; + case METHODID_CREATE_ANYWHERE_CACHE: + serviceImpl.createAnywhereCache( + (com.google.storage.control.v2.CreateAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_ANYWHERE_CACHE: + serviceImpl.updateAnywhereCache( + (com.google.storage.control.v2.UpdateAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DISABLE_ANYWHERE_CACHE: + serviceImpl.disableAnywhereCache( + (com.google.storage.control.v2.DisableAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_PAUSE_ANYWHERE_CACHE: + serviceImpl.pauseAnywhereCache( + (com.google.storage.control.v2.PauseAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_RESUME_ANYWHERE_CACHE: + serviceImpl.resumeAnywhereCache( + (com.google.storage.control.v2.ResumeAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_ANYWHERE_CACHE: + serviceImpl.getAnywhereCache( + (com.google.storage.control.v2.GetAnywhereCacheRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LIST_ANYWHERE_CACHES: + serviceImpl.listAnywhereCaches( + (com.google.storage.control.v2.ListAnywhereCachesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.storage.control.v2.ListAnywhereCachesResponse>) + responseObserver); + break; + case METHODID_GET_PROJECT_INTELLIGENCE_CONFIG: + serviceImpl.getProjectIntelligenceConfig( + (com.google.storage.control.v2.GetProjectIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_PROJECT_INTELLIGENCE_CONFIG: + serviceImpl.updateProjectIntelligenceConfig( + (com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_FOLDER_INTELLIGENCE_CONFIG: + serviceImpl.getFolderIntelligenceConfig( + (com.google.storage.control.v2.GetFolderIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_FOLDER_INTELLIGENCE_CONFIG: + serviceImpl.updateFolderIntelligenceConfig( + (com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_ORGANIZATION_INTELLIGENCE_CONFIG: + serviceImpl.getOrganizationIntelligenceConfig( + (com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_ORGANIZATION_INTELLIGENCE_CONFIG: + serviceImpl.updateOrganizationIntelligenceConfig( + (com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_IAM_POLICY: + serviceImpl.getIamPolicy( + (com.google.iam.v1.GetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SET_IAM_POLICY: + serviceImpl.setIamPolicy( + (com.google.iam.v1.SetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_TEST_IAM_PERMISSIONS: + serviceImpl.testIamPermissions( + (com.google.iam.v1.TestIamPermissionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.CreateFolderRequest, + com.google.storage.control.v2.Folder>(service, METHODID_CREATE_FOLDER))) + .addMethod( + getDeleteFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.DeleteFolderRequest, com.google.protobuf.Empty>( + service, METHODID_DELETE_FOLDER))) + .addMethod( + getGetFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetFolderRequest, + com.google.storage.control.v2.Folder>(service, METHODID_GET_FOLDER))) + .addMethod( + getListFoldersMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.ListFoldersRequest, + com.google.storage.control.v2.ListFoldersResponse>( + service, METHODID_LIST_FOLDERS))) + .addMethod( + getRenameFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.RenameFolderRequest, + com.google.longrunning.Operation>(service, METHODID_RENAME_FOLDER))) + .addMethod( + getDeleteFolderRecursiveMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.DeleteFolderRecursiveRequest, + com.google.longrunning.Operation>(service, METHODID_DELETE_FOLDER_RECURSIVE))) + .addMethod( + getGetStorageLayoutMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetStorageLayoutRequest, + com.google.storage.control.v2.StorageLayout>( + service, METHODID_GET_STORAGE_LAYOUT))) + .addMethod( + getCreateManagedFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.CreateManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder>( + service, METHODID_CREATE_MANAGED_FOLDER))) + .addMethod( + getDeleteManagedFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.DeleteManagedFolderRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_MANAGED_FOLDER))) + .addMethod( + getGetManagedFolderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetManagedFolderRequest, + com.google.storage.control.v2.ManagedFolder>( + service, METHODID_GET_MANAGED_FOLDER))) + .addMethod( + getListManagedFoldersMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.ListManagedFoldersRequest, + com.google.storage.control.v2.ListManagedFoldersResponse>( + service, METHODID_LIST_MANAGED_FOLDERS))) + .addMethod( + getCreateAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.CreateAnywhereCacheRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_ANYWHERE_CACHE))) + .addMethod( + getUpdateAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.UpdateAnywhereCacheRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_ANYWHERE_CACHE))) + .addMethod( + getDisableAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.DisableAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache>( + service, METHODID_DISABLE_ANYWHERE_CACHE))) + .addMethod( + getPauseAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.PauseAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache>( + service, METHODID_PAUSE_ANYWHERE_CACHE))) + .addMethod( + getResumeAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.ResumeAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache>( + service, METHODID_RESUME_ANYWHERE_CACHE))) + .addMethod( + getGetAnywhereCacheMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetAnywhereCacheRequest, + com.google.storage.control.v2.AnywhereCache>( + service, METHODID_GET_ANYWHERE_CACHE))) + .addMethod( + getListAnywhereCachesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.ListAnywhereCachesRequest, + com.google.storage.control.v2.ListAnywhereCachesResponse>( + service, METHODID_LIST_ANYWHERE_CACHES))) + .addMethod( + getGetProjectIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_GET_PROJECT_INTELLIGENCE_CONFIG))) + .addMethod( + getUpdateProjectIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_UPDATE_PROJECT_INTELLIGENCE_CONFIG))) + .addMethod( + getGetFolderIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_GET_FOLDER_INTELLIGENCE_CONFIG))) + .addMethod( + getUpdateFolderIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_UPDATE_FOLDER_INTELLIGENCE_CONFIG))) + .addMethod( + getGetOrganizationIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_GET_ORGANIZATION_INTELLIGENCE_CONFIG))) + .addMethod( + getUpdateOrganizationIntelligenceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest, + com.google.storage.control.v2.IntelligenceConfig>( + service, METHODID_UPDATE_ORGANIZATION_INTELLIGENCE_CONFIG))) + .addMethod( + getGetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_GET_IAM_POLICY))) + .addMethod( + getSetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_SET_IAM_POLICY))) + .addMethod( + getTestIamPermissionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse>( + service, METHODID_TEST_IAM_PERMISSIONS))) + .build(); + } + + private abstract static class StorageControlBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + StorageControlBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.storage.control.v2.StorageControlProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("StorageControl"); + } + } + + private static final class StorageControlFileDescriptorSupplier + extends StorageControlBaseDescriptorSupplier { + StorageControlFileDescriptorSupplier() {} + } + + private static final class StorageControlMethodDescriptorSupplier + extends StorageControlBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + StorageControlMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (StorageControlGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new StorageControlFileDescriptorSupplier()) + .addMethod(getCreateFolderMethod()) + .addMethod(getDeleteFolderMethod()) + .addMethod(getGetFolderMethod()) + .addMethod(getListFoldersMethod()) + .addMethod(getRenameFolderMethod()) + .addMethod(getDeleteFolderRecursiveMethod()) + .addMethod(getGetStorageLayoutMethod()) + .addMethod(getCreateManagedFolderMethod()) + .addMethod(getDeleteManagedFolderMethod()) + .addMethod(getGetManagedFolderMethod()) + .addMethod(getListManagedFoldersMethod()) + .addMethod(getCreateAnywhereCacheMethod()) + .addMethod(getUpdateAnywhereCacheMethod()) + .addMethod(getDisableAnywhereCacheMethod()) + .addMethod(getPauseAnywhereCacheMethod()) + .addMethod(getResumeAnywhereCacheMethod()) + .addMethod(getGetAnywhereCacheMethod()) + .addMethod(getListAnywhereCachesMethod()) + .addMethod(getGetProjectIntelligenceConfigMethod()) + .addMethod(getUpdateProjectIntelligenceConfigMethod()) + .addMethod(getGetFolderIntelligenceConfigMethod()) + .addMethod(getUpdateFolderIntelligenceConfigMethod()) + .addMethod(getGetOrganizationIntelligenceConfigMethod()) + .addMethod(getUpdateOrganizationIntelligenceConfigMethod()) + .addMethod(getGetIamPolicyMethod()) + .addMethod(getSetIamPolicyMethod()) + .addMethod(getTestIamPermissionsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-storage/grpc-google-cloud-storage-v2/clirr-ignored-differences.xml b/java-storage/grpc-google-cloud-storage-v2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..06a66a8c0aa9 --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-v2/clirr-ignored-differences.xml @@ -0,0 +1,44 @@ + + + + + + 7002 + com/google/storage/v2/* + * *Notification*(*) + + + + 7002 + com/google/storage/v2/* + * *Hmac*(*) + + + + 7002 + com/google/storage/v2/* + * *ServiceAccount*(*) + + + + 7012 + com/google/storage/v2/* + * restoreObject(*) + + + 7012 + com/google/storage/v2/* + * bidiWriteObject(*) + + + 7012 + com/google/storage/v2/* + * moveObject(*) + + + 7012 + com/google/storage/v2/* + * bidiReadObject(*) + + + diff --git a/java-storage/grpc-google-cloud-storage-v2/pom.xml b/java-storage/grpc-google-cloud-storage-v2/pom.xml new file mode 100644 index 000000000000..8c38bb1def97 --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-v2/pom.xml @@ -0,0 +1,60 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + grpc-google-cloud-storage-v2 + GRPC library for grpc-google-cloud-storage-v2 + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-storage-v2 + + + com.google.guava + guava + + + com.google.api.grpc + proto-google-iam-v1 + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + diff --git a/java-storage/grpc-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageGrpc.java b/java-storage/grpc-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageGrpc.java new file mode 100644 index 000000000000..e3eb0230fcda --- /dev/null +++ b/java-storage/grpc-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageGrpc.java @@ -0,0 +1,4451 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.storage.v2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * ## API Overview and Naming Syntax
+ * The Cloud Storage gRPC API allows applications to read and write data through
+ * the abstractions of buckets and objects. For a description of these
+ * abstractions please see [Cloud Storage
+ * documentation](https://cloud.google.com/storage/docs).
+ * Resources are named as follows:
+ *   - Projects are referred to as they are defined by the Resource Manager API,
+ *     using strings like `projects/123456` or `projects/my-string-id`.
+ *   - Buckets are named using string names of the form:
+ *     `projects/{project}/buckets/{bucket}`.
+ *     For globally unique buckets, `_` might be substituted for the project.
+ *   - Objects are uniquely identified by their name along with the name of the
+ *     bucket they belong to, as separate strings in this API. For example:
+ *         ```
+ *         ReadObjectRequest {
+ *         bucket: 'projects/_/buckets/my-bucket'
+ *         object: 'my-object'
+ *         }
+ *         ```
+ * Note that object names can contain `/` characters, which are treated as
+ * any other character (no special directory semantics).
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class StorageGrpc { + + private StorageGrpc() {} + + public static final java.lang.String SERVICE_NAME = "google.storage.v2.Storage"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.DeleteBucketRequest, com.google.protobuf.Empty> + getDeleteBucketMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteBucket", + requestType = com.google.storage.v2.DeleteBucketRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.DeleteBucketRequest, com.google.protobuf.Empty> + getDeleteBucketMethod() { + io.grpc.MethodDescriptor + getDeleteBucketMethod; + if ((getDeleteBucketMethod = StorageGrpc.getDeleteBucketMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getDeleteBucketMethod = StorageGrpc.getDeleteBucketMethod) == null) { + StorageGrpc.getDeleteBucketMethod = + getDeleteBucketMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteBucket")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.DeleteBucketRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("DeleteBucket")) + .build(); + } + } + } + return getDeleteBucketMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.GetBucketRequest, com.google.storage.v2.Bucket> + getGetBucketMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetBucket", + requestType = com.google.storage.v2.GetBucketRequest.class, + responseType = com.google.storage.v2.Bucket.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.GetBucketRequest, com.google.storage.v2.Bucket> + getGetBucketMethod() { + io.grpc.MethodDescriptor + getGetBucketMethod; + if ((getGetBucketMethod = StorageGrpc.getGetBucketMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getGetBucketMethod = StorageGrpc.getGetBucketMethod) == null) { + StorageGrpc.getGetBucketMethod = + getGetBucketMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetBucket")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.GetBucketRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Bucket.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("GetBucket")) + .build(); + } + } + } + return getGetBucketMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.CreateBucketRequest, com.google.storage.v2.Bucket> + getCreateBucketMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateBucket", + requestType = com.google.storage.v2.CreateBucketRequest.class, + responseType = com.google.storage.v2.Bucket.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.CreateBucketRequest, com.google.storage.v2.Bucket> + getCreateBucketMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.CreateBucketRequest, com.google.storage.v2.Bucket> + getCreateBucketMethod; + if ((getCreateBucketMethod = StorageGrpc.getCreateBucketMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getCreateBucketMethod = StorageGrpc.getCreateBucketMethod) == null) { + StorageGrpc.getCreateBucketMethod = + getCreateBucketMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateBucket")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.CreateBucketRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Bucket.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("CreateBucket")) + .build(); + } + } + } + return getCreateBucketMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.ListBucketsRequest, com.google.storage.v2.ListBucketsResponse> + getListBucketsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBuckets", + requestType = com.google.storage.v2.ListBucketsRequest.class, + responseType = com.google.storage.v2.ListBucketsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.ListBucketsRequest, com.google.storage.v2.ListBucketsResponse> + getListBucketsMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.ListBucketsRequest, com.google.storage.v2.ListBucketsResponse> + getListBucketsMethod; + if ((getListBucketsMethod = StorageGrpc.getListBucketsMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getListBucketsMethod = StorageGrpc.getListBucketsMethod) == null) { + StorageGrpc.getListBucketsMethod = + getListBucketsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListBuckets")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ListBucketsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ListBucketsResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("ListBuckets")) + .build(); + } + } + } + return getListBucketsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.LockBucketRetentionPolicyRequest, com.google.storage.v2.Bucket> + getLockBucketRetentionPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "LockBucketRetentionPolicy", + requestType = com.google.storage.v2.LockBucketRetentionPolicyRequest.class, + responseType = com.google.storage.v2.Bucket.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.LockBucketRetentionPolicyRequest, com.google.storage.v2.Bucket> + getLockBucketRetentionPolicyMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.LockBucketRetentionPolicyRequest, com.google.storage.v2.Bucket> + getLockBucketRetentionPolicyMethod; + if ((getLockBucketRetentionPolicyMethod = StorageGrpc.getLockBucketRetentionPolicyMethod) + == null) { + synchronized (StorageGrpc.class) { + if ((getLockBucketRetentionPolicyMethod = StorageGrpc.getLockBucketRetentionPolicyMethod) + == null) { + StorageGrpc.getLockBucketRetentionPolicyMethod = + getLockBucketRetentionPolicyMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "LockBucketRetentionPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.LockBucketRetentionPolicyRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Bucket.getDefaultInstance())) + .setSchemaDescriptor( + new StorageMethodDescriptorSupplier("LockBucketRetentionPolicy")) + .build(); + } + } + } + return getLockBucketRetentionPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetIamPolicy", + requestType = com.google.iam.v1.GetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod() { + io.grpc.MethodDescriptor + getGetIamPolicyMethod; + if ((getGetIamPolicyMethod = StorageGrpc.getGetIamPolicyMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getGetIamPolicyMethod = StorageGrpc.getGetIamPolicyMethod) == null) { + StorageGrpc.getGetIamPolicyMethod = + getGetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("GetIamPolicy")) + .build(); + } + } + } + return getGetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SetIamPolicy", + requestType = com.google.iam.v1.SetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod() { + io.grpc.MethodDescriptor + getSetIamPolicyMethod; + if ((getSetIamPolicyMethod = StorageGrpc.getSetIamPolicyMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getSetIamPolicyMethod = StorageGrpc.getSetIamPolicyMethod) == null) { + StorageGrpc.getSetIamPolicyMethod = + getSetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("SetIamPolicy")) + .build(); + } + } + } + return getSetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "TestIamPermissions", + requestType = com.google.iam.v1.TestIamPermissionsRequest.class, + responseType = com.google.iam.v1.TestIamPermissionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod() { + io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + if ((getTestIamPermissionsMethod = StorageGrpc.getTestIamPermissionsMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getTestIamPermissionsMethod = StorageGrpc.getTestIamPermissionsMethod) == null) { + StorageGrpc.getTestIamPermissionsMethod = + getTestIamPermissionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "TestIamPermissions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new StorageMethodDescriptorSupplier("TestIamPermissions")) + .build(); + } + } + } + return getTestIamPermissionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateBucketRequest, com.google.storage.v2.Bucket> + getUpdateBucketMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateBucket", + requestType = com.google.storage.v2.UpdateBucketRequest.class, + responseType = com.google.storage.v2.Bucket.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateBucketRequest, com.google.storage.v2.Bucket> + getUpdateBucketMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateBucketRequest, com.google.storage.v2.Bucket> + getUpdateBucketMethod; + if ((getUpdateBucketMethod = StorageGrpc.getUpdateBucketMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getUpdateBucketMethod = StorageGrpc.getUpdateBucketMethod) == null) { + StorageGrpc.getUpdateBucketMethod = + getUpdateBucketMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateBucket")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.UpdateBucketRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Bucket.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("UpdateBucket")) + .build(); + } + } + } + return getUpdateBucketMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.ComposeObjectRequest, com.google.storage.v2.Object> + getComposeObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ComposeObject", + requestType = com.google.storage.v2.ComposeObjectRequest.class, + responseType = com.google.storage.v2.Object.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.ComposeObjectRequest, com.google.storage.v2.Object> + getComposeObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.ComposeObjectRequest, com.google.storage.v2.Object> + getComposeObjectMethod; + if ((getComposeObjectMethod = StorageGrpc.getComposeObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getComposeObjectMethod = StorageGrpc.getComposeObjectMethod) == null) { + StorageGrpc.getComposeObjectMethod = + getComposeObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ComposeObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ComposeObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Object.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("ComposeObject")) + .build(); + } + } + } + return getComposeObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.DeleteObjectRequest, com.google.protobuf.Empty> + getDeleteObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteObject", + requestType = com.google.storage.v2.DeleteObjectRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.DeleteObjectRequest, com.google.protobuf.Empty> + getDeleteObjectMethod() { + io.grpc.MethodDescriptor + getDeleteObjectMethod; + if ((getDeleteObjectMethod = StorageGrpc.getDeleteObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getDeleteObjectMethod = StorageGrpc.getDeleteObjectMethod) == null) { + StorageGrpc.getDeleteObjectMethod = + getDeleteObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.DeleteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("DeleteObject")) + .build(); + } + } + } + return getDeleteObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.RestoreObjectRequest, com.google.storage.v2.Object> + getRestoreObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "RestoreObject", + requestType = com.google.storage.v2.RestoreObjectRequest.class, + responseType = com.google.storage.v2.Object.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.RestoreObjectRequest, com.google.storage.v2.Object> + getRestoreObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.RestoreObjectRequest, com.google.storage.v2.Object> + getRestoreObjectMethod; + if ((getRestoreObjectMethod = StorageGrpc.getRestoreObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getRestoreObjectMethod = StorageGrpc.getRestoreObjectMethod) == null) { + StorageGrpc.getRestoreObjectMethod = + getRestoreObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "RestoreObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.RestoreObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Object.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("RestoreObject")) + .build(); + } + } + } + return getRestoreObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.CancelResumableWriteRequest, + com.google.storage.v2.CancelResumableWriteResponse> + getCancelResumableWriteMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CancelResumableWrite", + requestType = com.google.storage.v2.CancelResumableWriteRequest.class, + responseType = com.google.storage.v2.CancelResumableWriteResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.CancelResumableWriteRequest, + com.google.storage.v2.CancelResumableWriteResponse> + getCancelResumableWriteMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.CancelResumableWriteRequest, + com.google.storage.v2.CancelResumableWriteResponse> + getCancelResumableWriteMethod; + if ((getCancelResumableWriteMethod = StorageGrpc.getCancelResumableWriteMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getCancelResumableWriteMethod = StorageGrpc.getCancelResumableWriteMethod) == null) { + StorageGrpc.getCancelResumableWriteMethod = + getCancelResumableWriteMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CancelResumableWrite")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.CancelResumableWriteRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.CancelResumableWriteResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageMethodDescriptorSupplier("CancelResumableWrite")) + .build(); + } + } + } + return getCancelResumableWriteMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.GetObjectRequest, com.google.storage.v2.Object> + getGetObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetObject", + requestType = com.google.storage.v2.GetObjectRequest.class, + responseType = com.google.storage.v2.Object.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.GetObjectRequest, com.google.storage.v2.Object> + getGetObjectMethod() { + io.grpc.MethodDescriptor + getGetObjectMethod; + if ((getGetObjectMethod = StorageGrpc.getGetObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getGetObjectMethod = StorageGrpc.getGetObjectMethod) == null) { + StorageGrpc.getGetObjectMethod = + getGetObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.GetObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Object.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("GetObject")) + .build(); + } + } + } + return getGetObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.ReadObjectRequest, com.google.storage.v2.ReadObjectResponse> + getReadObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadObject", + requestType = com.google.storage.v2.ReadObjectRequest.class, + responseType = com.google.storage.v2.ReadObjectResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.ReadObjectRequest, com.google.storage.v2.ReadObjectResponse> + getReadObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.ReadObjectRequest, com.google.storage.v2.ReadObjectResponse> + getReadObjectMethod; + if ((getReadObjectMethod = StorageGrpc.getReadObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getReadObjectMethod = StorageGrpc.getReadObjectMethod) == null) { + StorageGrpc.getReadObjectMethod = + getReadObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ReadObjectResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("ReadObject")) + .build(); + } + } + } + return getReadObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.BidiReadObjectRequest, com.google.storage.v2.BidiReadObjectResponse> + getBidiReadObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BidiReadObject", + requestType = com.google.storage.v2.BidiReadObjectRequest.class, + responseType = com.google.storage.v2.BidiReadObjectResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.BidiReadObjectRequest, com.google.storage.v2.BidiReadObjectResponse> + getBidiReadObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.BidiReadObjectRequest, + com.google.storage.v2.BidiReadObjectResponse> + getBidiReadObjectMethod; + if ((getBidiReadObjectMethod = StorageGrpc.getBidiReadObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getBidiReadObjectMethod = StorageGrpc.getBidiReadObjectMethod) == null) { + StorageGrpc.getBidiReadObjectMethod = + getBidiReadObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BidiReadObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.BidiReadObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.BidiReadObjectResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("BidiReadObject")) + .build(); + } + } + } + return getBidiReadObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateObjectRequest, com.google.storage.v2.Object> + getUpdateObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateObject", + requestType = com.google.storage.v2.UpdateObjectRequest.class, + responseType = com.google.storage.v2.Object.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateObjectRequest, com.google.storage.v2.Object> + getUpdateObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.UpdateObjectRequest, com.google.storage.v2.Object> + getUpdateObjectMethod; + if ((getUpdateObjectMethod = StorageGrpc.getUpdateObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getUpdateObjectMethod = StorageGrpc.getUpdateObjectMethod) == null) { + StorageGrpc.getUpdateObjectMethod = + getUpdateObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.UpdateObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Object.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("UpdateObject")) + .build(); + } + } + } + return getUpdateObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.WriteObjectRequest, com.google.storage.v2.WriteObjectResponse> + getWriteObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "WriteObject", + requestType = com.google.storage.v2.WriteObjectRequest.class, + responseType = com.google.storage.v2.WriteObjectResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.WriteObjectRequest, com.google.storage.v2.WriteObjectResponse> + getWriteObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.WriteObjectRequest, com.google.storage.v2.WriteObjectResponse> + getWriteObjectMethod; + if ((getWriteObjectMethod = StorageGrpc.getWriteObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getWriteObjectMethod = StorageGrpc.getWriteObjectMethod) == null) { + StorageGrpc.getWriteObjectMethod = + getWriteObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "WriteObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.WriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.WriteObjectResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("WriteObject")) + .build(); + } + } + } + return getWriteObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.BidiWriteObjectRequest, + com.google.storage.v2.BidiWriteObjectResponse> + getBidiWriteObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BidiWriteObject", + requestType = com.google.storage.v2.BidiWriteObjectRequest.class, + responseType = com.google.storage.v2.BidiWriteObjectResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.BidiWriteObjectRequest, + com.google.storage.v2.BidiWriteObjectResponse> + getBidiWriteObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.BidiWriteObjectRequest, + com.google.storage.v2.BidiWriteObjectResponse> + getBidiWriteObjectMethod; + if ((getBidiWriteObjectMethod = StorageGrpc.getBidiWriteObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getBidiWriteObjectMethod = StorageGrpc.getBidiWriteObjectMethod) == null) { + StorageGrpc.getBidiWriteObjectMethod = + getBidiWriteObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BidiWriteObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.BidiWriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.BidiWriteObjectResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("BidiWriteObject")) + .build(); + } + } + } + return getBidiWriteObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.ListObjectsRequest, com.google.storage.v2.ListObjectsResponse> + getListObjectsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListObjects", + requestType = com.google.storage.v2.ListObjectsRequest.class, + responseType = com.google.storage.v2.ListObjectsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.ListObjectsRequest, com.google.storage.v2.ListObjectsResponse> + getListObjectsMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.ListObjectsRequest, com.google.storage.v2.ListObjectsResponse> + getListObjectsMethod; + if ((getListObjectsMethod = StorageGrpc.getListObjectsMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getListObjectsMethod = StorageGrpc.getListObjectsMethod) == null) { + StorageGrpc.getListObjectsMethod = + getListObjectsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListObjects")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ListObjectsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.ListObjectsResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("ListObjects")) + .build(); + } + } + } + return getListObjectsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.RewriteObjectRequest, com.google.storage.v2.RewriteResponse> + getRewriteObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "RewriteObject", + requestType = com.google.storage.v2.RewriteObjectRequest.class, + responseType = com.google.storage.v2.RewriteResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.RewriteObjectRequest, com.google.storage.v2.RewriteResponse> + getRewriteObjectMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.RewriteObjectRequest, com.google.storage.v2.RewriteResponse> + getRewriteObjectMethod; + if ((getRewriteObjectMethod = StorageGrpc.getRewriteObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getRewriteObjectMethod = StorageGrpc.getRewriteObjectMethod) == null) { + StorageGrpc.getRewriteObjectMethod = + getRewriteObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "RewriteObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.RewriteObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.RewriteResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("RewriteObject")) + .build(); + } + } + } + return getRewriteObjectMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.StartResumableWriteRequest, + com.google.storage.v2.StartResumableWriteResponse> + getStartResumableWriteMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StartResumableWrite", + requestType = com.google.storage.v2.StartResumableWriteRequest.class, + responseType = com.google.storage.v2.StartResumableWriteResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.StartResumableWriteRequest, + com.google.storage.v2.StartResumableWriteResponse> + getStartResumableWriteMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.StartResumableWriteRequest, + com.google.storage.v2.StartResumableWriteResponse> + getStartResumableWriteMethod; + if ((getStartResumableWriteMethod = StorageGrpc.getStartResumableWriteMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getStartResumableWriteMethod = StorageGrpc.getStartResumableWriteMethod) == null) { + StorageGrpc.getStartResumableWriteMethod = + getStartResumableWriteMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "StartResumableWrite")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.StartResumableWriteRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.StartResumableWriteResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new StorageMethodDescriptorSupplier("StartResumableWrite")) + .build(); + } + } + } + return getStartResumableWriteMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.QueryWriteStatusRequest, + com.google.storage.v2.QueryWriteStatusResponse> + getQueryWriteStatusMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "QueryWriteStatus", + requestType = com.google.storage.v2.QueryWriteStatusRequest.class, + responseType = com.google.storage.v2.QueryWriteStatusResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.QueryWriteStatusRequest, + com.google.storage.v2.QueryWriteStatusResponse> + getQueryWriteStatusMethod() { + io.grpc.MethodDescriptor< + com.google.storage.v2.QueryWriteStatusRequest, + com.google.storage.v2.QueryWriteStatusResponse> + getQueryWriteStatusMethod; + if ((getQueryWriteStatusMethod = StorageGrpc.getQueryWriteStatusMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getQueryWriteStatusMethod = StorageGrpc.getQueryWriteStatusMethod) == null) { + StorageGrpc.getQueryWriteStatusMethod = + getQueryWriteStatusMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "QueryWriteStatus")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.QueryWriteStatusRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.QueryWriteStatusResponse.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("QueryWriteStatus")) + .build(); + } + } + } + return getQueryWriteStatusMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.storage.v2.MoveObjectRequest, com.google.storage.v2.Object> + getMoveObjectMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "MoveObject", + requestType = com.google.storage.v2.MoveObjectRequest.class, + responseType = com.google.storage.v2.Object.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.storage.v2.MoveObjectRequest, com.google.storage.v2.Object> + getMoveObjectMethod() { + io.grpc.MethodDescriptor + getMoveObjectMethod; + if ((getMoveObjectMethod = StorageGrpc.getMoveObjectMethod) == null) { + synchronized (StorageGrpc.class) { + if ((getMoveObjectMethod = StorageGrpc.getMoveObjectMethod) == null) { + StorageGrpc.getMoveObjectMethod = + getMoveObjectMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "MoveObject")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.MoveObjectRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.storage.v2.Object.getDefaultInstance())) + .setSchemaDescriptor(new StorageMethodDescriptorSupplier("MoveObject")) + .build(); + } + } + } + return getMoveObjectMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static StorageStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageStub(channel, callOptions); + } + }; + return StorageStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static StorageBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageBlockingV2Stub(channel, callOptions); + } + }; + return StorageBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static StorageBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageBlockingStub(channel, callOptions); + } + }; + return StorageBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static StorageFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public StorageFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageFutureStub(channel, callOptions); + } + }; + return StorageFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Permanently deletes an empty bucket.
+     * The request fails if there are any live or
+     * noncurrent objects in the bucket, but the request succeeds if the
+     * bucket only contains soft-deleted objects or incomplete uploads, such
+     * as ongoing XML API multipart uploads. Does not permanently delete
+     * soft-deleted objects.
+     * When this API is used to delete a bucket containing an object that has a
+     * soft delete policy
+     * enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the
+     * object.
+     * Objects and multipart uploads that were in the bucket at the time of
+     * deletion are also retained for the specified retention duration. When
+     * a soft-deleted bucket reaches the end of its retention duration, it
+     * is permanently deleted. The `hardDeleteTime` of the bucket always
+     * equals
+     * or exceeds the expiration time of the last soft-deleted object in the
+     * bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.delete` IAM permission on the bucket.
+     * 
+ */ + default void deleteBucket( + com.google.storage.v2.DeleteBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteBucketMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.get`
+     * IAM permission on
+     * the bucket. Additionally, to return specific bucket metadata, the
+     * authenticated user must have the following permissions:
+     * - To return the IAM policies: `storage.buckets.getIamPolicy`
+     * - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + default void getBucket( + com.google.storage.v2.GetBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetBucketMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.create` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To enable object retention using the `enableObjectRetention` query
+     * parameter: `storage.buckets.enableObjectRetention`
+     * - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * 
+ */ + default void createBucket( + com.google.storage.v2.CreateBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateBucketMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of buckets for a given project, ordered
+     * lexicographically by name.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.list` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated
+     * user must have the following permissions:
+     * - To list the IAM policies: `storage.buckets.getIamPolicy`
+     * - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + default void listBuckets( + com.google.storage.v2.ListBucketsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBucketsMethod(), responseObserver); + } + + /** + * + * + *
+     * Permanently locks the retention
+     * policy that is
+     * currently applied to the specified bucket.
+     * Caution: Locking a bucket is an
+     * irreversible action. Once you lock a bucket:
+     * - You cannot remove the retention policy from the bucket.
+     * - You cannot decrease the retention period for the policy.
+     * Once locked, you must delete the entire bucket in order to remove the
+     * bucket's retention policy. However, before you can delete the bucket, you
+     * must delete all the objects in the bucket, which is only
+     * possible if all the objects have reached the retention period set by the
+     * retention policy.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * 
+ */ + default void lockBucketRetentionPolicy( + com.google.storage.v2.LockBucketRetentionPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getLockBucketRetentionPolicyMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.getIamPolicy` on the bucket or
+     * `storage.managedFolders.getIamPolicy` IAM permission on the
+     * managed folder.
+     * 
+ */ + default void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + default void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller. The `resource` field in the
+     * request should be `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + default void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getTestIamPermissionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates a bucket. Changes to the bucket are readable immediately after
+     * writing, but configuration changes might take time to propagate. This
+     * method supports `patch` semantics.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To set bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * - To update public access prevention policies or access control lists
+     * (ACLs): `storage.buckets.setIamPolicy`
+     * 
+ */ + default void updateBucket( + com.google.storage.v2.UpdateBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateBucketMethod(), responseObserver); + } + + /** + * + * + *
+     * Concatenates a list of existing objects into a new object in the same
+     * bucket. The existing source objects are unaffected by this operation.
+     * **IAM Permissions**:
+     * Requires the `storage.objects.create` and `storage.objects.get` IAM
+     * permissions to use this method. If the new composite object
+     * overwrites an existing object, the authenticated user must also have
+     * the `storage.objects.delete` permission. If the request body includes
+     * the retention property, the authenticated user must also have the
+     * `storage.objects.setRetention` IAM permission.
+     * 
+ */ + default void composeObject( + com.google.storage.v2.ComposeObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getComposeObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes an object and its metadata. Deletions are permanent if versioning
+     * is not enabled for the bucket, or if the generation parameter is used, or
+     * if soft delete is not
+     * enabled for the bucket.
+     * When this API is used to delete an object from a bucket that has soft
+     * delete policy enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+     * This API cannot be used to permanently delete soft-deleted objects.
+     * Soft-deleted objects are permanently deleted according to their
+     * `hardDeleteTime`.
+     * You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+     * API to restore soft-deleted objects until the soft delete retention period
+     * has passed.
+     * **IAM Permissions**:
+     * Requires `storage.objects.delete` IAM permission on the bucket.
+     * 
+ */ + default void deleteObject( + com.google.storage.v2.DeleteObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Restores a
+     * soft-deleted object.
+     * When a soft-deleted object is restored, a new copy of that object is
+     * created in the same bucket and inherits the same metadata as the
+     * soft-deleted object. The inherited metadata is the metadata that existed
+     * when the original object became soft deleted, with the following
+     * exceptions:
+     *   - The `createTime` of the new object is set to the time at which the
+     *   soft-deleted object was restored.
+     *   - The `softDeleteTime` and `hardDeleteTime` values are cleared.
+     *   - A new generation is assigned and the metageneration is reset to 1.
+     *   - If the soft-deleted object was in a bucket that had Autoclass enabled,
+     *   the new object is
+     *     restored to Standard storage.
+     *   - The restored object inherits the bucket's default object ACL, unless
+     *   `copySourceAcl` is `true`.
+     * If a live object using the same name already exists in the bucket and
+     * becomes overwritten, the live object becomes a noncurrent object if Object
+     * Versioning is enabled on the bucket. If Object Versioning is not enabled,
+     * the live object becomes soft deleted.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.restore`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     *   - `storage.objects.getIamPolicy` (only required if `projection` is `full`
+     *   and the relevant bucket
+     *     has uniform bucket-level access disabled)
+     *   - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is
+     *   `true` and the relevant
+     *     bucket has uniform bucket-level access disabled)
+     * 
+ */ + default void restoreObject( + com.google.storage.v2.RestoreObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getRestoreObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Cancels an in-progress resumable upload.
+     * Any attempts to write to the resumable upload after cancelling the upload
+     * fail.
+     * The behavior for any in-progress write operations is not guaranteed;
+     * they could either complete before the cancellation or fail if the
+     * cancellation completes first.
+     * 
+ */ + default void cancelResumableWrite( + com.google.storage.v2.CancelResumableWriteRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCancelResumableWriteMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves object metadata.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * To return object ACLs, the authenticated user must also have
+     * the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + default void getObject( + com.google.storage.v2.GetObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves object data.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + default void readObject( + com.google.storage.v2.ReadObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Reads an object's data.
+     * This bi-directional API reads data from an object, allowing you to request
+     * multiple data ranges within a single stream, even across several messages.
+     * If an error occurs with any request, the stream closes with a relevant
+     * error code. Since you can have multiple outstanding requests, the error
+     * response includes a `BidiReadObjectError` proto in its `details` field,
+     * reporting the specific error, if any, for each pending `read_id`.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + default io.grpc.stub.StreamObserver bidiReadObject( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getBidiReadObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates an object's metadata.
+     * Equivalent to JSON API's `storage.objects.patch` method.
+     * **IAM Permissions**:
+     * Requires `storage.objects.update` IAM permission on the bucket.
+     * 
+ */ + default void updateObject( + com.google.storage.v2.UpdateObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * An object can be written either in a single message stream or in a
+     * resumable sequence of message streams. To write using a single stream,
+     * the client should include in the first message of the stream an
+     * `WriteObjectSpec` describing the destination bucket, object, and any
+     * preconditions. Additionally, the final message must set 'finish_write' to
+     * true, or else it is an error.
+     * For a resumable write, the client should instead call
+     * `StartResumableWrite()`, populating a `WriteObjectSpec` into that request.
+     * They should then attach the returned `upload_id` to the first message of
+     * each following call to `WriteObject`. If the stream is closed before
+     * finishing the upload (either explicitly by the client or due to a network
+     * error or an error response from the server), the client should do as
+     * follows:
+     *   - Check the result Status of the stream, to determine if writing can be
+     *     resumed on this stream or must be restarted from scratch (by calling
+     *     `StartResumableWrite()`). The resumable errors are `DEADLINE_EXCEEDED`,
+     *     `INTERNAL`, and `UNAVAILABLE`. For each case, the client should use
+     *     binary exponential backoff before retrying.  Additionally, writes can
+     *     be resumed after `RESOURCE_EXHAUSTED` errors, but only after taking
+     *     appropriate measures, which might include reducing aggregate send rate
+     *     across clients and/or requesting a quota increase for your project.
+     *   - If the call to `WriteObject` returns `ABORTED`, that indicates
+     *     concurrent attempts to update the resumable write, caused either by
+     *     multiple racing clients or by a single client where the previous
+     *     request was timed out on the client side but nonetheless reached the
+     *     server. In this case the client should take steps to prevent further
+     *     concurrent writes. For example, increase the timeouts and stop using
+     *     more than one process to perform the upload. Follow the steps below for
+     *     resuming the upload.
+     *   - For resumable errors, the client should call `QueryWriteStatus()` and
+     *     then continue writing from the returned `persisted_size`. This might be
+     *     less than the amount of data the client previously sent. Note also that
+     *     it is acceptable to send data starting at an offset earlier than the
+     *     returned `persisted_size`; in this case, the service skips data at
+     *     offsets that were already persisted (without checking that it matches
+     *     the previously written data), and write only the data starting from the
+     *     persisted offset. Even though the data isn't written, it might still
+     *     incur a performance cost over resuming at the correct write offset.
+     *     This behavior can make client-side handling simpler in some cases.
+     *   - Clients must only send data that is a multiple of 256 KiB per message,
+     *     unless the object is being finished with `finish_write` set to `true`.
+     * The service does not view the object as complete until the client has
+     * sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any
+     * requests on a stream after sending a request with `finish_write` set to
+     * `true` causes an error. The client must check the response it
+     * receives to determine how much data the service is able to commit and
+     * whether the service views the object as complete.
+     * Attempting to resume an already finalized object results in an `OK`
+     * status, with a `WriteObjectResponse` containing the finalized object's
+     * metadata.
+     * Alternatively, you can use the `BidiWriteObject` operation to write an
+     * object with controls over flushing and the ability to fetch the ability to
+     * determine the current persisted size.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create`
+     * IAM permission on
+     * the bucket.
+     * 
+ */ + default io.grpc.stub.StreamObserver writeObject( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getWriteObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * This is similar to the `WriteObject` call with the added support for
+     * manual flushing of persisted state, and the ability to determine current
+     * persisted size without closing the stream.
+     * The client might specify one or both of the `state_lookup` and `flush`
+     * fields in each `BidiWriteObjectRequest`. If `flush` is specified, the data
+     * written so far is persisted to storage. If `state_lookup` is specified, the
+     * service responds with a `BidiWriteObjectResponse` that contains the
+     * persisted size. If both `flush` and `state_lookup` are specified, the flush
+     * always occurs before a `state_lookup`, so that both might be set in the
+     * same request and the returned state is the state of the object
+     * post-flush. When the stream is closed, a `BidiWriteObjectResponse`
+     * is always sent to the client, regardless of the value of `state_lookup`.
+     * 
+ */ + default io.grpc.stub.StreamObserver + bidiWriteObject( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getBidiWriteObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of objects matching the criteria.
+     * **IAM Permissions**:
+     * The authenticated user requires `storage.objects.list`
+     * IAM permission to use this method. To return object ACLs, the
+     * authenticated user must also
+     * have the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + default void listObjects( + com.google.storage.v2.ListObjectsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListObjectsMethod(), responseObserver); + } + + /** + * + * + *
+     * Rewrites a source object to a destination object. Optionally overrides
+     * metadata.
+     * 
+ */ + default void rewriteObject( + com.google.storage.v2.RewriteObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getRewriteObjectMethod(), responseObserver); + } + + /** + * + * + *
+     * Starts a resumable write operation. This
+     * method is part of the Resumable
+     * upload feature.
+     * This allows you to upload large objects in multiple chunks, which is more
+     * resilient to network interruptions than a single upload. The validity
+     * duration of the write operation, and the consequences of it becoming
+     * invalid, are service-dependent.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create` IAM permission on the bucket.
+     * 
+ */ + default void startResumableWrite( + com.google.storage.v2.StartResumableWriteRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getStartResumableWriteMethod(), responseObserver); + } + + /** + * + * + *
+     * Determines the `persisted_size` of an object that is being written. This
+     * method is part of the resumable
+     * upload feature.
+     * The returned value is the size of the object that has been persisted so
+     * far. The value can be used as the `write_offset` for the next `Write()`
+     * call.
+     * If the object does not exist, meaning if it was deleted, or the
+     * first `Write()` has not yet reached the service, this method returns the
+     * error `NOT_FOUND`.
+     * This method is useful for clients that buffer data and need to know which
+     * data can be safely evicted. The client can call `QueryWriteStatus()` at any
+     * time to determine how much data has been logged for this object.
+     * For any sequence of `QueryWriteStatus()` calls for a given
+     * object name, the sequence of returned `persisted_size` values are
+     * non-decreasing.
+     * 
+ */ + default void queryWriteStatus( + com.google.storage.v2.QueryWriteStatusRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getQueryWriteStatusMethod(), responseObserver); + } + + /** + * + * + *
+     * Moves the source object to the destination object in the same bucket.
+     * This operation moves a source object to a destination object in the
+     * same bucket by renaming the object. The move itself is an atomic
+     * transaction, ensuring all steps either complete successfully or no
+     * changes are made.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.move`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     * 
+ */ + default void moveObject( + com.google.storage.v2.MoveObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getMoveObjectMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service Storage. + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public abstract static class StorageImplBase implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return StorageGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service Storage. + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public static final class StorageStub extends io.grpc.stub.AbstractAsyncStub { + private StorageStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageStub(channel, callOptions); + } + + /** + * + * + *
+     * Permanently deletes an empty bucket.
+     * The request fails if there are any live or
+     * noncurrent objects in the bucket, but the request succeeds if the
+     * bucket only contains soft-deleted objects or incomplete uploads, such
+     * as ongoing XML API multipart uploads. Does not permanently delete
+     * soft-deleted objects.
+     * When this API is used to delete a bucket containing an object that has a
+     * soft delete policy
+     * enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the
+     * object.
+     * Objects and multipart uploads that were in the bucket at the time of
+     * deletion are also retained for the specified retention duration. When
+     * a soft-deleted bucket reaches the end of its retention duration, it
+     * is permanently deleted. The `hardDeleteTime` of the bucket always
+     * equals
+     * or exceeds the expiration time of the last soft-deleted object in the
+     * bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.delete` IAM permission on the bucket.
+     * 
+ */ + public void deleteBucket( + com.google.storage.v2.DeleteBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteBucketMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns metadata for the specified bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.get`
+     * IAM permission on
+     * the bucket. Additionally, to return specific bucket metadata, the
+     * authenticated user must have the following permissions:
+     * - To return the IAM policies: `storage.buckets.getIamPolicy`
+     * - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public void getBucket( + com.google.storage.v2.GetBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetBucketMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Creates a new bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.create` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To enable object retention using the `enableObjectRetention` query
+     * parameter: `storage.buckets.enableObjectRetention`
+     * - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * 
+ */ + public void createBucket( + com.google.storage.v2.CreateBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateBucketMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of buckets for a given project, ordered
+     * lexicographically by name.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.list` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated
+     * user must have the following permissions:
+     * - To list the IAM policies: `storage.buckets.getIamPolicy`
+     * - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public void listBuckets( + com.google.storage.v2.ListBucketsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBucketsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Permanently locks the retention
+     * policy that is
+     * currently applied to the specified bucket.
+     * Caution: Locking a bucket is an
+     * irreversible action. Once you lock a bucket:
+     * - You cannot remove the retention policy from the bucket.
+     * - You cannot decrease the retention period for the policy.
+     * Once locked, you must delete the entire bucket in order to remove the
+     * bucket's retention policy. However, before you can delete the bucket, you
+     * must delete all the objects in the bucket, which is only
+     * possible if all the objects have reached the retention period set by the
+     * retention policy.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * 
+ */ + public void lockBucketRetentionPolicy( + com.google.storage.v2.LockBucketRetentionPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getLockBucketRetentionPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.getIamPolicy` on the bucket or
+     * `storage.managedFolders.getIamPolicy` IAM permission on the
+     * managed folder.
+     * 
+ */ + public void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller. The `resource` field in the
+     * request should be `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates a bucket. Changes to the bucket are readable immediately after
+     * writing, but configuration changes might take time to propagate. This
+     * method supports `patch` semantics.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To set bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * - To update public access prevention policies or access control lists
+     * (ACLs): `storage.buckets.setIamPolicy`
+     * 
+ */ + public void updateBucket( + com.google.storage.v2.UpdateBucketRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateBucketMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Concatenates a list of existing objects into a new object in the same
+     * bucket. The existing source objects are unaffected by this operation.
+     * **IAM Permissions**:
+     * Requires the `storage.objects.create` and `storage.objects.get` IAM
+     * permissions to use this method. If the new composite object
+     * overwrites an existing object, the authenticated user must also have
+     * the `storage.objects.delete` permission. If the request body includes
+     * the retention property, the authenticated user must also have the
+     * `storage.objects.setRetention` IAM permission.
+     * 
+ */ + public void composeObject( + com.google.storage.v2.ComposeObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getComposeObjectMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes an object and its metadata. Deletions are permanent if versioning
+     * is not enabled for the bucket, or if the generation parameter is used, or
+     * if soft delete is not
+     * enabled for the bucket.
+     * When this API is used to delete an object from a bucket that has soft
+     * delete policy enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+     * This API cannot be used to permanently delete soft-deleted objects.
+     * Soft-deleted objects are permanently deleted according to their
+     * `hardDeleteTime`.
+     * You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+     * API to restore soft-deleted objects until the soft delete retention period
+     * has passed.
+     * **IAM Permissions**:
+     * Requires `storage.objects.delete` IAM permission on the bucket.
+     * 
+ */ + public void deleteObject( + com.google.storage.v2.DeleteObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteObjectMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Restores a
+     * soft-deleted object.
+     * When a soft-deleted object is restored, a new copy of that object is
+     * created in the same bucket and inherits the same metadata as the
+     * soft-deleted object. The inherited metadata is the metadata that existed
+     * when the original object became soft deleted, with the following
+     * exceptions:
+     *   - The `createTime` of the new object is set to the time at which the
+     *   soft-deleted object was restored.
+     *   - The `softDeleteTime` and `hardDeleteTime` values are cleared.
+     *   - A new generation is assigned and the metageneration is reset to 1.
+     *   - If the soft-deleted object was in a bucket that had Autoclass enabled,
+     *   the new object is
+     *     restored to Standard storage.
+     *   - The restored object inherits the bucket's default object ACL, unless
+     *   `copySourceAcl` is `true`.
+     * If a live object using the same name already exists in the bucket and
+     * becomes overwritten, the live object becomes a noncurrent object if Object
+     * Versioning is enabled on the bucket. If Object Versioning is not enabled,
+     * the live object becomes soft deleted.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.restore`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     *   - `storage.objects.getIamPolicy` (only required if `projection` is `full`
+     *   and the relevant bucket
+     *     has uniform bucket-level access disabled)
+     *   - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is
+     *   `true` and the relevant
+     *     bucket has uniform bucket-level access disabled)
+     * 
+ */ + public void restoreObject( + com.google.storage.v2.RestoreObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRestoreObjectMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Cancels an in-progress resumable upload.
+     * Any attempts to write to the resumable upload after cancelling the upload
+     * fail.
+     * The behavior for any in-progress write operations is not guaranteed;
+     * they could either complete before the cancellation or fail if the
+     * cancellation completes first.
+     * 
+ */ + public void cancelResumableWrite( + com.google.storage.v2.CancelResumableWriteRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCancelResumableWriteMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Retrieves object metadata.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * To return object ACLs, the authenticated user must also have
+     * the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public void getObject( + com.google.storage.v2.GetObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetObjectMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Retrieves object data.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + public void readObject( + com.google.storage.v2.ReadObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getReadObjectMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Reads an object's data.
+     * This bi-directional API reads data from an object, allowing you to request
+     * multiple data ranges within a single stream, even across several messages.
+     * If an error occurs with any request, the stream closes with a relevant
+     * error code. Since you can have multiple outstanding requests, the error
+     * response includes a `BidiReadObjectError` proto in its `details` field,
+     * reporting the specific error, if any, for each pending `read_id`.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + public io.grpc.stub.StreamObserver bidiReadObject( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getBidiReadObjectMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Updates an object's metadata.
+     * Equivalent to JSON API's `storage.objects.patch` method.
+     * **IAM Permissions**:
+     * Requires `storage.objects.update` IAM permission on the bucket.
+     * 
+ */ + public void updateObject( + com.google.storage.v2.UpdateObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateObjectMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * An object can be written either in a single message stream or in a
+     * resumable sequence of message streams. To write using a single stream,
+     * the client should include in the first message of the stream an
+     * `WriteObjectSpec` describing the destination bucket, object, and any
+     * preconditions. Additionally, the final message must set 'finish_write' to
+     * true, or else it is an error.
+     * For a resumable write, the client should instead call
+     * `StartResumableWrite()`, populating a `WriteObjectSpec` into that request.
+     * They should then attach the returned `upload_id` to the first message of
+     * each following call to `WriteObject`. If the stream is closed before
+     * finishing the upload (either explicitly by the client or due to a network
+     * error or an error response from the server), the client should do as
+     * follows:
+     *   - Check the result Status of the stream, to determine if writing can be
+     *     resumed on this stream or must be restarted from scratch (by calling
+     *     `StartResumableWrite()`). The resumable errors are `DEADLINE_EXCEEDED`,
+     *     `INTERNAL`, and `UNAVAILABLE`. For each case, the client should use
+     *     binary exponential backoff before retrying.  Additionally, writes can
+     *     be resumed after `RESOURCE_EXHAUSTED` errors, but only after taking
+     *     appropriate measures, which might include reducing aggregate send rate
+     *     across clients and/or requesting a quota increase for your project.
+     *   - If the call to `WriteObject` returns `ABORTED`, that indicates
+     *     concurrent attempts to update the resumable write, caused either by
+     *     multiple racing clients or by a single client where the previous
+     *     request was timed out on the client side but nonetheless reached the
+     *     server. In this case the client should take steps to prevent further
+     *     concurrent writes. For example, increase the timeouts and stop using
+     *     more than one process to perform the upload. Follow the steps below for
+     *     resuming the upload.
+     *   - For resumable errors, the client should call `QueryWriteStatus()` and
+     *     then continue writing from the returned `persisted_size`. This might be
+     *     less than the amount of data the client previously sent. Note also that
+     *     it is acceptable to send data starting at an offset earlier than the
+     *     returned `persisted_size`; in this case, the service skips data at
+     *     offsets that were already persisted (without checking that it matches
+     *     the previously written data), and write only the data starting from the
+     *     persisted offset. Even though the data isn't written, it might still
+     *     incur a performance cost over resuming at the correct write offset.
+     *     This behavior can make client-side handling simpler in some cases.
+     *   - Clients must only send data that is a multiple of 256 KiB per message,
+     *     unless the object is being finished with `finish_write` set to `true`.
+     * The service does not view the object as complete until the client has
+     * sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any
+     * requests on a stream after sending a request with `finish_write` set to
+     * `true` causes an error. The client must check the response it
+     * receives to determine how much data the service is able to commit and
+     * whether the service views the object as complete.
+     * Attempting to resume an already finalized object results in an `OK`
+     * status, with a `WriteObjectResponse` containing the finalized object's
+     * metadata.
+     * Alternatively, you can use the `BidiWriteObject` operation to write an
+     * object with controls over flushing and the ability to fetch the ability to
+     * determine the current persisted size.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create`
+     * IAM permission on
+     * the bucket.
+     * 
+ */ + public io.grpc.stub.StreamObserver writeObject( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncClientStreamingCall( + getChannel().newCall(getWriteObjectMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * This is similar to the `WriteObject` call with the added support for
+     * manual flushing of persisted state, and the ability to determine current
+     * persisted size without closing the stream.
+     * The client might specify one or both of the `state_lookup` and `flush`
+     * fields in each `BidiWriteObjectRequest`. If `flush` is specified, the data
+     * written so far is persisted to storage. If `state_lookup` is specified, the
+     * service responds with a `BidiWriteObjectResponse` that contains the
+     * persisted size. If both `flush` and `state_lookup` are specified, the flush
+     * always occurs before a `state_lookup`, so that both might be set in the
+     * same request and the returned state is the state of the object
+     * post-flush. When the stream is closed, a `BidiWriteObjectResponse`
+     * is always sent to the client, regardless of the value of `state_lookup`.
+     * 
+ */ + public io.grpc.stub.StreamObserver + bidiWriteObject( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getBidiWriteObjectMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Retrieves a list of objects matching the criteria.
+     * **IAM Permissions**:
+     * The authenticated user requires `storage.objects.list`
+     * IAM permission to use this method. To return object ACLs, the
+     * authenticated user must also
+     * have the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public void listObjects( + com.google.storage.v2.ListObjectsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListObjectsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Rewrites a source object to a destination object. Optionally overrides
+     * metadata.
+     * 
+ */ + public void rewriteObject( + com.google.storage.v2.RewriteObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRewriteObjectMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Starts a resumable write operation. This
+     * method is part of the Resumable
+     * upload feature.
+     * This allows you to upload large objects in multiple chunks, which is more
+     * resilient to network interruptions than a single upload. The validity
+     * duration of the write operation, and the consequences of it becoming
+     * invalid, are service-dependent.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create` IAM permission on the bucket.
+     * 
+ */ + public void startResumableWrite( + com.google.storage.v2.StartResumableWriteRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getStartResumableWriteMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Determines the `persisted_size` of an object that is being written. This
+     * method is part of the resumable
+     * upload feature.
+     * The returned value is the size of the object that has been persisted so
+     * far. The value can be used as the `write_offset` for the next `Write()`
+     * call.
+     * If the object does not exist, meaning if it was deleted, or the
+     * first `Write()` has not yet reached the service, this method returns the
+     * error `NOT_FOUND`.
+     * This method is useful for clients that buffer data and need to know which
+     * data can be safely evicted. The client can call `QueryWriteStatus()` at any
+     * time to determine how much data has been logged for this object.
+     * For any sequence of `QueryWriteStatus()` calls for a given
+     * object name, the sequence of returned `persisted_size` values are
+     * non-decreasing.
+     * 
+ */ + public void queryWriteStatus( + com.google.storage.v2.QueryWriteStatusRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getQueryWriteStatusMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Moves the source object to the destination object in the same bucket.
+     * This operation moves a source object to a destination object in the
+     * same bucket by renaming the object. The move itself is an atomic
+     * transaction, ensuring all steps either complete successfully or no
+     * changes are made.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.move`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     * 
+ */ + public void moveObject( + com.google.storage.v2.MoveObjectRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getMoveObjectMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service Storage. + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public static final class StorageBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private StorageBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Permanently deletes an empty bucket.
+     * The request fails if there are any live or
+     * noncurrent objects in the bucket, but the request succeeds if the
+     * bucket only contains soft-deleted objects or incomplete uploads, such
+     * as ongoing XML API multipart uploads. Does not permanently delete
+     * soft-deleted objects.
+     * When this API is used to delete a bucket containing an object that has a
+     * soft delete policy
+     * enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the
+     * object.
+     * Objects and multipart uploads that were in the bucket at the time of
+     * deletion are also retained for the specified retention duration. When
+     * a soft-deleted bucket reaches the end of its retention duration, it
+     * is permanently deleted. The `hardDeleteTime` of the bucket always
+     * equals
+     * or exceeds the expiration time of the last soft-deleted object in the
+     * bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteBucket(com.google.storage.v2.DeleteBucketRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.get`
+     * IAM permission on
+     * the bucket. Additionally, to return specific bucket metadata, the
+     * authenticated user must have the following permissions:
+     * - To return the IAM policies: `storage.buckets.getIamPolicy`
+     * - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.storage.v2.Bucket getBucket(com.google.storage.v2.GetBucketRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.create` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To enable object retention using the `enableObjectRetention` query
+     * parameter: `storage.buckets.enableObjectRetention`
+     * - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * 
+ */ + public com.google.storage.v2.Bucket createBucket( + com.google.storage.v2.CreateBucketRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of buckets for a given project, ordered
+     * lexicographically by name.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.list` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated
+     * user must have the following permissions:
+     * - To list the IAM policies: `storage.buckets.getIamPolicy`
+     * - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.storage.v2.ListBucketsResponse listBuckets( + com.google.storage.v2.ListBucketsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListBucketsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently locks the retention
+     * policy that is
+     * currently applied to the specified bucket.
+     * Caution: Locking a bucket is an
+     * irreversible action. Once you lock a bucket:
+     * - You cannot remove the retention policy from the bucket.
+     * - You cannot decrease the retention period for the policy.
+     * Once locked, you must delete the entire bucket in order to remove the
+     * bucket's retention policy. However, before you can delete the bucket, you
+     * must delete all the objects in the bucket, which is only
+     * possible if all the objects have reached the retention period set by the
+     * retention policy.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.Bucket lockBucketRetentionPolicy( + com.google.storage.v2.LockBucketRetentionPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getLockBucketRetentionPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.getIamPolicy` on the bucket or
+     * `storage.managedFolders.getIamPolicy` IAM permission on the
+     * managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller. The `resource` field in the
+     * request should be `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates a bucket. Changes to the bucket are readable immediately after
+     * writing, but configuration changes might take time to propagate. This
+     * method supports `patch` semantics.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To set bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * - To update public access prevention policies or access control lists
+     * (ACLs): `storage.buckets.setIamPolicy`
+     * 
+ */ + public com.google.storage.v2.Bucket updateBucket( + com.google.storage.v2.UpdateBucketRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Concatenates a list of existing objects into a new object in the same
+     * bucket. The existing source objects are unaffected by this operation.
+     * **IAM Permissions**:
+     * Requires the `storage.objects.create` and `storage.objects.get` IAM
+     * permissions to use this method. If the new composite object
+     * overwrites an existing object, the authenticated user must also have
+     * the `storage.objects.delete` permission. If the request body includes
+     * the retention property, the authenticated user must also have the
+     * `storage.objects.setRetention` IAM permission.
+     * 
+ */ + public com.google.storage.v2.Object composeObject( + com.google.storage.v2.ComposeObjectRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getComposeObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an object and its metadata. Deletions are permanent if versioning
+     * is not enabled for the bucket, or if the generation parameter is used, or
+     * if soft delete is not
+     * enabled for the bucket.
+     * When this API is used to delete an object from a bucket that has soft
+     * delete policy enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+     * This API cannot be used to permanently delete soft-deleted objects.
+     * Soft-deleted objects are permanently deleted according to their
+     * `hardDeleteTime`.
+     * You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+     * API to restore soft-deleted objects until the soft delete retention period
+     * has passed.
+     * **IAM Permissions**:
+     * Requires `storage.objects.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteObject(com.google.storage.v2.DeleteObjectRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Restores a
+     * soft-deleted object.
+     * When a soft-deleted object is restored, a new copy of that object is
+     * created in the same bucket and inherits the same metadata as the
+     * soft-deleted object. The inherited metadata is the metadata that existed
+     * when the original object became soft deleted, with the following
+     * exceptions:
+     *   - The `createTime` of the new object is set to the time at which the
+     *   soft-deleted object was restored.
+     *   - The `softDeleteTime` and `hardDeleteTime` values are cleared.
+     *   - A new generation is assigned and the metageneration is reset to 1.
+     *   - If the soft-deleted object was in a bucket that had Autoclass enabled,
+     *   the new object is
+     *     restored to Standard storage.
+     *   - The restored object inherits the bucket's default object ACL, unless
+     *   `copySourceAcl` is `true`.
+     * If a live object using the same name already exists in the bucket and
+     * becomes overwritten, the live object becomes a noncurrent object if Object
+     * Versioning is enabled on the bucket. If Object Versioning is not enabled,
+     * the live object becomes soft deleted.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.restore`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     *   - `storage.objects.getIamPolicy` (only required if `projection` is `full`
+     *   and the relevant bucket
+     *     has uniform bucket-level access disabled)
+     *   - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is
+     *   `true` and the relevant
+     *     bucket has uniform bucket-level access disabled)
+     * 
+ */ + public com.google.storage.v2.Object restoreObject( + com.google.storage.v2.RestoreObjectRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getRestoreObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Cancels an in-progress resumable upload.
+     * Any attempts to write to the resumable upload after cancelling the upload
+     * fail.
+     * The behavior for any in-progress write operations is not guaranteed;
+     * they could either complete before the cancellation or fail if the
+     * cancellation completes first.
+     * 
+ */ + public com.google.storage.v2.CancelResumableWriteResponse cancelResumableWrite( + com.google.storage.v2.CancelResumableWriteRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCancelResumableWriteMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves object metadata.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * To return object ACLs, the authenticated user must also have
+     * the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.storage.v2.Object getObject(com.google.storage.v2.GetObjectRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves object data.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall readObject( + com.google.storage.v2.ReadObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getReadObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads an object's data.
+     * This bi-directional API reads data from an object, allowing you to request
+     * multiple data ranges within a single stream, even across several messages.
+     * If an error occurs with any request, the stream closes with a relevant
+     * error code. Since you can have multiple outstanding requests, the error
+     * response includes a `BidiReadObjectError` proto in its `details` field,
+     * reporting the specific error, if any, for each pending `read_id`.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.storage.v2.BidiReadObjectRequest, + com.google.storage.v2.BidiReadObjectResponse> + bidiReadObject() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getBidiReadObjectMethod(), getCallOptions()); + } + + /** + * + * + *
+     * Updates an object's metadata.
+     * Equivalent to JSON API's `storage.objects.patch` method.
+     * **IAM Permissions**:
+     * Requires `storage.objects.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.Object updateObject( + com.google.storage.v2.UpdateObjectRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * An object can be written either in a single message stream or in a
+     * resumable sequence of message streams. To write using a single stream,
+     * the client should include in the first message of the stream an
+     * `WriteObjectSpec` describing the destination bucket, object, and any
+     * preconditions. Additionally, the final message must set 'finish_write' to
+     * true, or else it is an error.
+     * For a resumable write, the client should instead call
+     * `StartResumableWrite()`, populating a `WriteObjectSpec` into that request.
+     * They should then attach the returned `upload_id` to the first message of
+     * each following call to `WriteObject`. If the stream is closed before
+     * finishing the upload (either explicitly by the client or due to a network
+     * error or an error response from the server), the client should do as
+     * follows:
+     *   - Check the result Status of the stream, to determine if writing can be
+     *     resumed on this stream or must be restarted from scratch (by calling
+     *     `StartResumableWrite()`). The resumable errors are `DEADLINE_EXCEEDED`,
+     *     `INTERNAL`, and `UNAVAILABLE`. For each case, the client should use
+     *     binary exponential backoff before retrying.  Additionally, writes can
+     *     be resumed after `RESOURCE_EXHAUSTED` errors, but only after taking
+     *     appropriate measures, which might include reducing aggregate send rate
+     *     across clients and/or requesting a quota increase for your project.
+     *   - If the call to `WriteObject` returns `ABORTED`, that indicates
+     *     concurrent attempts to update the resumable write, caused either by
+     *     multiple racing clients or by a single client where the previous
+     *     request was timed out on the client side but nonetheless reached the
+     *     server. In this case the client should take steps to prevent further
+     *     concurrent writes. For example, increase the timeouts and stop using
+     *     more than one process to perform the upload. Follow the steps below for
+     *     resuming the upload.
+     *   - For resumable errors, the client should call `QueryWriteStatus()` and
+     *     then continue writing from the returned `persisted_size`. This might be
+     *     less than the amount of data the client previously sent. Note also that
+     *     it is acceptable to send data starting at an offset earlier than the
+     *     returned `persisted_size`; in this case, the service skips data at
+     *     offsets that were already persisted (without checking that it matches
+     *     the previously written data), and write only the data starting from the
+     *     persisted offset. Even though the data isn't written, it might still
+     *     incur a performance cost over resuming at the correct write offset.
+     *     This behavior can make client-side handling simpler in some cases.
+     *   - Clients must only send data that is a multiple of 256 KiB per message,
+     *     unless the object is being finished with `finish_write` set to `true`.
+     * The service does not view the object as complete until the client has
+     * sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any
+     * requests on a stream after sending a request with `finish_write` set to
+     * `true` causes an error. The client must check the response it
+     * receives to determine how much data the service is able to commit and
+     * whether the service views the object as complete.
+     * Attempting to resume an already finalized object results in an `OK`
+     * status, with a `WriteObjectResponse` containing the finalized object's
+     * metadata.
+     * Alternatively, you can use the `BidiWriteObject` operation to write an
+     * object with controls over flushing and the ability to fetch the ability to
+     * determine the current persisted size.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create`
+     * IAM permission on
+     * the bucket.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.storage.v2.WriteObjectRequest, com.google.storage.v2.WriteObjectResponse> + writeObject() { + return io.grpc.stub.ClientCalls.blockingClientStreamingCall( + getChannel(), getWriteObjectMethod(), getCallOptions()); + } + + /** + * + * + *
+     * Stores a new object and metadata.
+     * This is similar to the `WriteObject` call with the added support for
+     * manual flushing of persisted state, and the ability to determine current
+     * persisted size without closing the stream.
+     * The client might specify one or both of the `state_lookup` and `flush`
+     * fields in each `BidiWriteObjectRequest`. If `flush` is specified, the data
+     * written so far is persisted to storage. If `state_lookup` is specified, the
+     * service responds with a `BidiWriteObjectResponse` that contains the
+     * persisted size. If both `flush` and `state_lookup` are specified, the flush
+     * always occurs before a `state_lookup`, so that both might be set in the
+     * same request and the returned state is the state of the object
+     * post-flush. When the stream is closed, a `BidiWriteObjectResponse`
+     * is always sent to the client, regardless of the value of `state_lookup`.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.storage.v2.BidiWriteObjectRequest, + com.google.storage.v2.BidiWriteObjectResponse> + bidiWriteObject() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getBidiWriteObjectMethod(), getCallOptions()); + } + + /** + * + * + *
+     * Retrieves a list of objects matching the criteria.
+     * **IAM Permissions**:
+     * The authenticated user requires `storage.objects.list`
+     * IAM permission to use this method. To return object ACLs, the
+     * authenticated user must also
+     * have the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.storage.v2.ListObjectsResponse listObjects( + com.google.storage.v2.ListObjectsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListObjectsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Rewrites a source object to a destination object. Optionally overrides
+     * metadata.
+     * 
+ */ + public com.google.storage.v2.RewriteResponse rewriteObject( + com.google.storage.v2.RewriteObjectRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getRewriteObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Starts a resumable write operation. This
+     * method is part of the Resumable
+     * upload feature.
+     * This allows you to upload large objects in multiple chunks, which is more
+     * resilient to network interruptions than a single upload. The validity
+     * duration of the write operation, and the consequences of it becoming
+     * invalid, are service-dependent.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.StartResumableWriteResponse startResumableWrite( + com.google.storage.v2.StartResumableWriteRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getStartResumableWriteMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Determines the `persisted_size` of an object that is being written. This
+     * method is part of the resumable
+     * upload feature.
+     * The returned value is the size of the object that has been persisted so
+     * far. The value can be used as the `write_offset` for the next `Write()`
+     * call.
+     * If the object does not exist, meaning if it was deleted, or the
+     * first `Write()` has not yet reached the service, this method returns the
+     * error `NOT_FOUND`.
+     * This method is useful for clients that buffer data and need to know which
+     * data can be safely evicted. The client can call `QueryWriteStatus()` at any
+     * time to determine how much data has been logged for this object.
+     * For any sequence of `QueryWriteStatus()` calls for a given
+     * object name, the sequence of returned `persisted_size` values are
+     * non-decreasing.
+     * 
+ */ + public com.google.storage.v2.QueryWriteStatusResponse queryWriteStatus( + com.google.storage.v2.QueryWriteStatusRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getQueryWriteStatusMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Moves the source object to the destination object in the same bucket.
+     * This operation moves a source object to a destination object in the
+     * same bucket by renaming the object. The move itself is an atomic
+     * transaction, ensuring all steps either complete successfully or no
+     * changes are made.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.move`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     * 
+ */ + public com.google.storage.v2.Object moveObject(com.google.storage.v2.MoveObjectRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getMoveObjectMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service Storage. + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public static final class StorageBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private StorageBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageBlockingStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Permanently deletes an empty bucket.
+     * The request fails if there are any live or
+     * noncurrent objects in the bucket, but the request succeeds if the
+     * bucket only contains soft-deleted objects or incomplete uploads, such
+     * as ongoing XML API multipart uploads. Does not permanently delete
+     * soft-deleted objects.
+     * When this API is used to delete a bucket containing an object that has a
+     * soft delete policy
+     * enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the
+     * object.
+     * Objects and multipart uploads that were in the bucket at the time of
+     * deletion are also retained for the specified retention duration. When
+     * a soft-deleted bucket reaches the end of its retention duration, it
+     * is permanently deleted. The `hardDeleteTime` of the bucket always
+     * equals
+     * or exceeds the expiration time of the last soft-deleted object in the
+     * bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteBucket( + com.google.storage.v2.DeleteBucketRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns metadata for the specified bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.get`
+     * IAM permission on
+     * the bucket. Additionally, to return specific bucket metadata, the
+     * authenticated user must have the following permissions:
+     * - To return the IAM policies: `storage.buckets.getIamPolicy`
+     * - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.storage.v2.Bucket getBucket(com.google.storage.v2.GetBucketRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.create` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To enable object retention using the `enableObjectRetention` query
+     * parameter: `storage.buckets.enableObjectRetention`
+     * - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * 
+ */ + public com.google.storage.v2.Bucket createBucket( + com.google.storage.v2.CreateBucketRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of buckets for a given project, ordered
+     * lexicographically by name.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.list` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated
+     * user must have the following permissions:
+     * - To list the IAM policies: `storage.buckets.getIamPolicy`
+     * - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.storage.v2.ListBucketsResponse listBuckets( + com.google.storage.v2.ListBucketsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBucketsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Permanently locks the retention
+     * policy that is
+     * currently applied to the specified bucket.
+     * Caution: Locking a bucket is an
+     * irreversible action. Once you lock a bucket:
+     * - You cannot remove the retention policy from the bucket.
+     * - You cannot decrease the retention period for the policy.
+     * Once locked, you must delete the entire bucket in order to remove the
+     * bucket's retention policy. However, before you can delete the bucket, you
+     * must delete all the objects in the bucket, which is only
+     * possible if all the objects have reached the retention period set by the
+     * retention policy.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.Bucket lockBucketRetentionPolicy( + com.google.storage.v2.LockBucketRetentionPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getLockBucketRetentionPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.getIamPolicy` on the bucket or
+     * `storage.managedFolders.getIamPolicy` IAM permission on the
+     * managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller. The `resource` field in the
+     * request should be `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates a bucket. Changes to the bucket are readable immediately after
+     * writing, but configuration changes might take time to propagate. This
+     * method supports `patch` semantics.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To set bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * - To update public access prevention policies or access control lists
+     * (ACLs): `storage.buckets.setIamPolicy`
+     * 
+ */ + public com.google.storage.v2.Bucket updateBucket( + com.google.storage.v2.UpdateBucketRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateBucketMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Concatenates a list of existing objects into a new object in the same
+     * bucket. The existing source objects are unaffected by this operation.
+     * **IAM Permissions**:
+     * Requires the `storage.objects.create` and `storage.objects.get` IAM
+     * permissions to use this method. If the new composite object
+     * overwrites an existing object, the authenticated user must also have
+     * the `storage.objects.delete` permission. If the request body includes
+     * the retention property, the authenticated user must also have the
+     * `storage.objects.setRetention` IAM permission.
+     * 
+ */ + public com.google.storage.v2.Object composeObject( + com.google.storage.v2.ComposeObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getComposeObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes an object and its metadata. Deletions are permanent if versioning
+     * is not enabled for the bucket, or if the generation parameter is used, or
+     * if soft delete is not
+     * enabled for the bucket.
+     * When this API is used to delete an object from a bucket that has soft
+     * delete policy enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+     * This API cannot be used to permanently delete soft-deleted objects.
+     * Soft-deleted objects are permanently deleted according to their
+     * `hardDeleteTime`.
+     * You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+     * API to restore soft-deleted objects until the soft delete retention period
+     * has passed.
+     * **IAM Permissions**:
+     * Requires `storage.objects.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.protobuf.Empty deleteObject( + com.google.storage.v2.DeleteObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Restores a
+     * soft-deleted object.
+     * When a soft-deleted object is restored, a new copy of that object is
+     * created in the same bucket and inherits the same metadata as the
+     * soft-deleted object. The inherited metadata is the metadata that existed
+     * when the original object became soft deleted, with the following
+     * exceptions:
+     *   - The `createTime` of the new object is set to the time at which the
+     *   soft-deleted object was restored.
+     *   - The `softDeleteTime` and `hardDeleteTime` values are cleared.
+     *   - A new generation is assigned and the metageneration is reset to 1.
+     *   - If the soft-deleted object was in a bucket that had Autoclass enabled,
+     *   the new object is
+     *     restored to Standard storage.
+     *   - The restored object inherits the bucket's default object ACL, unless
+     *   `copySourceAcl` is `true`.
+     * If a live object using the same name already exists in the bucket and
+     * becomes overwritten, the live object becomes a noncurrent object if Object
+     * Versioning is enabled on the bucket. If Object Versioning is not enabled,
+     * the live object becomes soft deleted.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.restore`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     *   - `storage.objects.getIamPolicy` (only required if `projection` is `full`
+     *   and the relevant bucket
+     *     has uniform bucket-level access disabled)
+     *   - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is
+     *   `true` and the relevant
+     *     bucket has uniform bucket-level access disabled)
+     * 
+ */ + public com.google.storage.v2.Object restoreObject( + com.google.storage.v2.RestoreObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRestoreObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Cancels an in-progress resumable upload.
+     * Any attempts to write to the resumable upload after cancelling the upload
+     * fail.
+     * The behavior for any in-progress write operations is not guaranteed;
+     * they could either complete before the cancellation or fail if the
+     * cancellation completes first.
+     * 
+ */ + public com.google.storage.v2.CancelResumableWriteResponse cancelResumableWrite( + com.google.storage.v2.CancelResumableWriteRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCancelResumableWriteMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves object metadata.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * To return object ACLs, the authenticated user must also have
+     * the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.storage.v2.Object getObject(com.google.storage.v2.GetObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves object data.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * 
+ */ + public java.util.Iterator readObject( + com.google.storage.v2.ReadObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getReadObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates an object's metadata.
+     * Equivalent to JSON API's `storage.objects.patch` method.
+     * **IAM Permissions**:
+     * Requires `storage.objects.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.Object updateObject( + com.google.storage.v2.UpdateObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Retrieves a list of objects matching the criteria.
+     * **IAM Permissions**:
+     * The authenticated user requires `storage.objects.list`
+     * IAM permission to use this method. To return object ACLs, the
+     * authenticated user must also
+     * have the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.storage.v2.ListObjectsResponse listObjects( + com.google.storage.v2.ListObjectsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListObjectsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Rewrites a source object to a destination object. Optionally overrides
+     * metadata.
+     * 
+ */ + public com.google.storage.v2.RewriteResponse rewriteObject( + com.google.storage.v2.RewriteObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRewriteObjectMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Starts a resumable write operation. This
+     * method is part of the Resumable
+     * upload feature.
+     * This allows you to upload large objects in multiple chunks, which is more
+     * resilient to network interruptions than a single upload. The validity
+     * duration of the write operation, and the consequences of it becoming
+     * invalid, are service-dependent.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create` IAM permission on the bucket.
+     * 
+ */ + public com.google.storage.v2.StartResumableWriteResponse startResumableWrite( + com.google.storage.v2.StartResumableWriteRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getStartResumableWriteMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Determines the `persisted_size` of an object that is being written. This
+     * method is part of the resumable
+     * upload feature.
+     * The returned value is the size of the object that has been persisted so
+     * far. The value can be used as the `write_offset` for the next `Write()`
+     * call.
+     * If the object does not exist, meaning if it was deleted, or the
+     * first `Write()` has not yet reached the service, this method returns the
+     * error `NOT_FOUND`.
+     * This method is useful for clients that buffer data and need to know which
+     * data can be safely evicted. The client can call `QueryWriteStatus()` at any
+     * time to determine how much data has been logged for this object.
+     * For any sequence of `QueryWriteStatus()` calls for a given
+     * object name, the sequence of returned `persisted_size` values are
+     * non-decreasing.
+     * 
+ */ + public com.google.storage.v2.QueryWriteStatusResponse queryWriteStatus( + com.google.storage.v2.QueryWriteStatusRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getQueryWriteStatusMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Moves the source object to the destination object in the same bucket.
+     * This operation moves a source object to a destination object in the
+     * same bucket by renaming the object. The move itself is an atomic
+     * transaction, ensuring all steps either complete successfully or no
+     * changes are made.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.move`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     * 
+ */ + public com.google.storage.v2.Object moveObject( + com.google.storage.v2.MoveObjectRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getMoveObjectMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service Storage. + * + *
+   * ## API Overview and Naming Syntax
+   * The Cloud Storage gRPC API allows applications to read and write data through
+   * the abstractions of buckets and objects. For a description of these
+   * abstractions please see [Cloud Storage
+   * documentation](https://cloud.google.com/storage/docs).
+   * Resources are named as follows:
+   *   - Projects are referred to as they are defined by the Resource Manager API,
+   *     using strings like `projects/123456` or `projects/my-string-id`.
+   *   - Buckets are named using string names of the form:
+   *     `projects/{project}/buckets/{bucket}`.
+   *     For globally unique buckets, `_` might be substituted for the project.
+   *   - Objects are uniquely identified by their name along with the name of the
+   *     bucket they belong to, as separate strings in this API. For example:
+   *         ```
+   *         ReadObjectRequest {
+   *         bucket: 'projects/_/buckets/my-bucket'
+   *         object: 'my-object'
+   *         }
+   *         ```
+   * Note that object names can contain `/` characters, which are treated as
+   * any other character (no special directory semantics).
+   * 
+ */ + public static final class StorageFutureStub + extends io.grpc.stub.AbstractFutureStub { + private StorageFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected StorageFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new StorageFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Permanently deletes an empty bucket.
+     * The request fails if there are any live or
+     * noncurrent objects in the bucket, but the request succeeds if the
+     * bucket only contains soft-deleted objects or incomplete uploads, such
+     * as ongoing XML API multipart uploads. Does not permanently delete
+     * soft-deleted objects.
+     * When this API is used to delete a bucket containing an object that has a
+     * soft delete policy
+     * enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the
+     * object.
+     * Objects and multipart uploads that were in the bucket at the time of
+     * deletion are also retained for the specified retention duration. When
+     * a soft-deleted bucket reaches the end of its retention duration, it
+     * is permanently deleted. The `hardDeleteTime` of the bucket always
+     * equals
+     * or exceeds the expiration time of the last soft-deleted object in the
+     * bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteBucket(com.google.storage.v2.DeleteBucketRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteBucketMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns metadata for the specified bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.get`
+     * IAM permission on
+     * the bucket. Additionally, to return specific bucket metadata, the
+     * authenticated user must have the following permissions:
+     * - To return the IAM policies: `storage.buckets.getIamPolicy`
+     * - To return the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getBucket(com.google.storage.v2.GetBucketRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetBucketMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new bucket.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.create` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To enable object retention using the `enableObjectRetention` query
+     * parameter: `storage.buckets.enableObjectRetention`
+     * - To set the bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + createBucket(com.google.storage.v2.CreateBucketRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateBucketMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Retrieves a list of buckets for a given project, ordered
+     * lexicographically by name.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.list` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated
+     * user must have the following permissions:
+     * - To list the IAM policies: `storage.buckets.getIamPolicy`
+     * - To list the bucket IP filtering rules: `storage.buckets.getIpFilter`
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.v2.ListBucketsResponse> + listBuckets(com.google.storage.v2.ListBucketsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBucketsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Permanently locks the retention
+     * policy that is
+     * currently applied to the specified bucket.
+     * Caution: Locking a bucket is an
+     * irreversible action. Once you lock a bucket:
+     * - You cannot remove the retention policy from the bucket.
+     * - You cannot decrease the retention period for the policy.
+     * Once locked, you must delete the entire bucket in order to remove the
+     * bucket's retention policy. However, before you can delete the bucket, you
+     * must delete all the objects in the bucket, which is only
+     * possible if all the objects have reached the retention period set by the
+     * retention policy.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + lockBucketRetentionPolicy(com.google.storage.v2.LockBucketRetentionPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getLockBucketRetentionPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets the IAM policy for a specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.getIamPolicy` on the bucket or
+     * `storage.managedFolders.getIamPolicy` IAM permission on the
+     * managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an IAM policy for the specified bucket or managed folder.
+     * The `resource` field in the request should be
+     * `projects/_/buckets/{bucket}` for a bucket, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Tests a set of permissions on the given bucket, object, or managed folder
+     * to see which, if any, are held by the caller. The `resource` field in the
+     * request should be `projects/_/buckets/{bucket}` for a bucket,
+     * `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+     * `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+     * for a managed folder.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.iam.v1.TestIamPermissionsResponse> + testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates a bucket. Changes to the bucket are readable immediately after
+     * writing, but configuration changes might take time to propagate. This
+     * method supports `patch` semantics.
+     * **IAM Permissions**:
+     * Requires `storage.buckets.update` IAM permission on the bucket.
+     * Additionally, to enable specific bucket features, the authenticated user
+     * must have the following permissions:
+     * - To set bucket IP filtering rules: `storage.buckets.setIpFilter`
+     * - To update public access prevention policies or access control lists
+     * (ACLs): `storage.buckets.setIamPolicy`
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + updateBucket(com.google.storage.v2.UpdateBucketRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateBucketMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Concatenates a list of existing objects into a new object in the same
+     * bucket. The existing source objects are unaffected by this operation.
+     * **IAM Permissions**:
+     * Requires the `storage.objects.create` and `storage.objects.get` IAM
+     * permissions to use this method. If the new composite object
+     * overwrites an existing object, the authenticated user must also have
+     * the `storage.objects.delete` permission. If the request body includes
+     * the retention property, the authenticated user must also have the
+     * `storage.objects.setRetention` IAM permission.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + composeObject(com.google.storage.v2.ComposeObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getComposeObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes an object and its metadata. Deletions are permanent if versioning
+     * is not enabled for the bucket, or if the generation parameter is used, or
+     * if soft delete is not
+     * enabled for the bucket.
+     * When this API is used to delete an object from a bucket that has soft
+     * delete policy enabled, the object becomes soft deleted, and the
+     * `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+     * This API cannot be used to permanently delete soft-deleted objects.
+     * Soft-deleted objects are permanently deleted according to their
+     * `hardDeleteTime`.
+     * You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+     * API to restore soft-deleted objects until the soft delete retention period
+     * has passed.
+     * **IAM Permissions**:
+     * Requires `storage.objects.delete` IAM permission on the bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteObject(com.google.storage.v2.DeleteObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Restores a
+     * soft-deleted object.
+     * When a soft-deleted object is restored, a new copy of that object is
+     * created in the same bucket and inherits the same metadata as the
+     * soft-deleted object. The inherited metadata is the metadata that existed
+     * when the original object became soft deleted, with the following
+     * exceptions:
+     *   - The `createTime` of the new object is set to the time at which the
+     *   soft-deleted object was restored.
+     *   - The `softDeleteTime` and `hardDeleteTime` values are cleared.
+     *   - A new generation is assigned and the metageneration is reset to 1.
+     *   - If the soft-deleted object was in a bucket that had Autoclass enabled,
+     *   the new object is
+     *     restored to Standard storage.
+     *   - The restored object inherits the bucket's default object ACL, unless
+     *   `copySourceAcl` is `true`.
+     * If a live object using the same name already exists in the bucket and
+     * becomes overwritten, the live object becomes a noncurrent object if Object
+     * Versioning is enabled on the bucket. If Object Versioning is not enabled,
+     * the live object becomes soft deleted.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.restore`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     *   - `storage.objects.getIamPolicy` (only required if `projection` is `full`
+     *   and the relevant bucket
+     *     has uniform bucket-level access disabled)
+     *   - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is
+     *   `true` and the relevant
+     *     bucket has uniform bucket-level access disabled)
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + restoreObject(com.google.storage.v2.RestoreObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRestoreObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Cancels an in-progress resumable upload.
+     * Any attempts to write to the resumable upload after cancelling the upload
+     * fail.
+     * The behavior for any in-progress write operations is not guaranteed;
+     * they could either complete before the cancellation or fail if the
+     * cancellation completes first.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.v2.CancelResumableWriteResponse> + cancelResumableWrite(com.google.storage.v2.CancelResumableWriteRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCancelResumableWriteMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Retrieves object metadata.
+     * **IAM Permissions**:
+     * Requires `storage.objects.get` IAM permission on the bucket.
+     * To return object ACLs, the authenticated user must also have
+     * the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getObject(com.google.storage.v2.GetObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates an object's metadata.
+     * Equivalent to JSON API's `storage.objects.patch` method.
+     * **IAM Permissions**:
+     * Requires `storage.objects.update` IAM permission on the bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + updateObject(com.google.storage.v2.UpdateObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Retrieves a list of objects matching the criteria.
+     * **IAM Permissions**:
+     * The authenticated user requires `storage.objects.list`
+     * IAM permission to use this method. To return object ACLs, the
+     * authenticated user must also
+     * have the `storage.objects.getIamPolicy` permission.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.v2.ListObjectsResponse> + listObjects(com.google.storage.v2.ListObjectsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListObjectsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Rewrites a source object to a destination object. Optionally overrides
+     * metadata.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + rewriteObject(com.google.storage.v2.RewriteObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRewriteObjectMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Starts a resumable write operation. This
+     * method is part of the Resumable
+     * upload feature.
+     * This allows you to upload large objects in multiple chunks, which is more
+     * resilient to network interruptions than a single upload. The validity
+     * duration of the write operation, and the consequences of it becoming
+     * invalid, are service-dependent.
+     * **IAM Permissions**:
+     * Requires `storage.objects.create` IAM permission on the bucket.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.v2.StartResumableWriteResponse> + startResumableWrite(com.google.storage.v2.StartResumableWriteRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getStartResumableWriteMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Determines the `persisted_size` of an object that is being written. This
+     * method is part of the resumable
+     * upload feature.
+     * The returned value is the size of the object that has been persisted so
+     * far. The value can be used as the `write_offset` for the next `Write()`
+     * call.
+     * If the object does not exist, meaning if it was deleted, or the
+     * first `Write()` has not yet reached the service, this method returns the
+     * error `NOT_FOUND`.
+     * This method is useful for clients that buffer data and need to know which
+     * data can be safely evicted. The client can call `QueryWriteStatus()` at any
+     * time to determine how much data has been logged for this object.
+     * For any sequence of `QueryWriteStatus()` calls for a given
+     * object name, the sequence of returned `persisted_size` values are
+     * non-decreasing.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.storage.v2.QueryWriteStatusResponse> + queryWriteStatus(com.google.storage.v2.QueryWriteStatusRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getQueryWriteStatusMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Moves the source object to the destination object in the same bucket.
+     * This operation moves a source object to a destination object in the
+     * same bucket by renaming the object. The move itself is an atomic
+     * transaction, ensuring all steps either complete successfully or no
+     * changes are made.
+     * **IAM Permissions**:
+     * Requires the following IAM permissions to use this method:
+     *   - `storage.objects.move`
+     *   - `storage.objects.create`
+     *   - `storage.objects.delete` (only required if overwriting an existing
+     *   object)
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + moveObject(com.google.storage.v2.MoveObjectRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getMoveObjectMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_DELETE_BUCKET = 0; + private static final int METHODID_GET_BUCKET = 1; + private static final int METHODID_CREATE_BUCKET = 2; + private static final int METHODID_LIST_BUCKETS = 3; + private static final int METHODID_LOCK_BUCKET_RETENTION_POLICY = 4; + private static final int METHODID_GET_IAM_POLICY = 5; + private static final int METHODID_SET_IAM_POLICY = 6; + private static final int METHODID_TEST_IAM_PERMISSIONS = 7; + private static final int METHODID_UPDATE_BUCKET = 8; + private static final int METHODID_COMPOSE_OBJECT = 9; + private static final int METHODID_DELETE_OBJECT = 10; + private static final int METHODID_RESTORE_OBJECT = 11; + private static final int METHODID_CANCEL_RESUMABLE_WRITE = 12; + private static final int METHODID_GET_OBJECT = 13; + private static final int METHODID_READ_OBJECT = 14; + private static final int METHODID_UPDATE_OBJECT = 15; + private static final int METHODID_LIST_OBJECTS = 16; + private static final int METHODID_REWRITE_OBJECT = 17; + private static final int METHODID_START_RESUMABLE_WRITE = 18; + private static final int METHODID_QUERY_WRITE_STATUS = 19; + private static final int METHODID_MOVE_OBJECT = 20; + private static final int METHODID_BIDI_READ_OBJECT = 21; + private static final int METHODID_WRITE_OBJECT = 22; + private static final int METHODID_BIDI_WRITE_OBJECT = 23; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_DELETE_BUCKET: + serviceImpl.deleteBucket( + (com.google.storage.v2.DeleteBucketRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_BUCKET: + serviceImpl.getBucket( + (com.google.storage.v2.GetBucketRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_CREATE_BUCKET: + serviceImpl.createBucket( + (com.google.storage.v2.CreateBucketRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_BUCKETS: + serviceImpl.listBuckets( + (com.google.storage.v2.ListBucketsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_LOCK_BUCKET_RETENTION_POLICY: + serviceImpl.lockBucketRetentionPolicy( + (com.google.storage.v2.LockBucketRetentionPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_IAM_POLICY: + serviceImpl.getIamPolicy( + (com.google.iam.v1.GetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SET_IAM_POLICY: + serviceImpl.setIamPolicy( + (com.google.iam.v1.SetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_TEST_IAM_PERMISSIONS: + serviceImpl.testIamPermissions( + (com.google.iam.v1.TestIamPermissionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_BUCKET: + serviceImpl.updateBucket( + (com.google.storage.v2.UpdateBucketRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_COMPOSE_OBJECT: + serviceImpl.composeObject( + (com.google.storage.v2.ComposeObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_OBJECT: + serviceImpl.deleteObject( + (com.google.storage.v2.DeleteObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_RESTORE_OBJECT: + serviceImpl.restoreObject( + (com.google.storage.v2.RestoreObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_CANCEL_RESUMABLE_WRITE: + serviceImpl.cancelResumableWrite( + (com.google.storage.v2.CancelResumableWriteRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_OBJECT: + serviceImpl.getObject( + (com.google.storage.v2.GetObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_READ_OBJECT: + serviceImpl.readObject( + (com.google.storage.v2.ReadObjectRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_OBJECT: + serviceImpl.updateObject( + (com.google.storage.v2.UpdateObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_OBJECTS: + serviceImpl.listObjects( + (com.google.storage.v2.ListObjectsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_REWRITE_OBJECT: + serviceImpl.rewriteObject( + (com.google.storage.v2.RewriteObjectRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_START_RESUMABLE_WRITE: + serviceImpl.startResumableWrite( + (com.google.storage.v2.StartResumableWriteRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_QUERY_WRITE_STATUS: + serviceImpl.queryWriteStatus( + (com.google.storage.v2.QueryWriteStatusRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_MOVE_OBJECT: + serviceImpl.moveObject( + (com.google.storage.v2.MoveObjectRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_BIDI_READ_OBJECT: + return (io.grpc.stub.StreamObserver) + serviceImpl.bidiReadObject( + (io.grpc.stub.StreamObserver) + responseObserver); + case METHODID_WRITE_OBJECT: + return (io.grpc.stub.StreamObserver) + serviceImpl.writeObject( + (io.grpc.stub.StreamObserver) + responseObserver); + case METHODID_BIDI_WRITE_OBJECT: + return (io.grpc.stub.StreamObserver) + serviceImpl.bidiWriteObject( + (io.grpc.stub.StreamObserver) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getDeleteBucketMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.DeleteBucketRequest, com.google.protobuf.Empty>( + service, METHODID_DELETE_BUCKET))) + .addMethod( + getGetBucketMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.GetBucketRequest, com.google.storage.v2.Bucket>( + service, METHODID_GET_BUCKET))) + .addMethod( + getCreateBucketMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.CreateBucketRequest, com.google.storage.v2.Bucket>( + service, METHODID_CREATE_BUCKET))) + .addMethod( + getListBucketsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.ListBucketsRequest, + com.google.storage.v2.ListBucketsResponse>(service, METHODID_LIST_BUCKETS))) + .addMethod( + getLockBucketRetentionPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.LockBucketRetentionPolicyRequest, + com.google.storage.v2.Bucket>(service, METHODID_LOCK_BUCKET_RETENTION_POLICY))) + .addMethod( + getGetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_GET_IAM_POLICY))) + .addMethod( + getSetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_SET_IAM_POLICY))) + .addMethod( + getTestIamPermissionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse>( + service, METHODID_TEST_IAM_PERMISSIONS))) + .addMethod( + getUpdateBucketMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.UpdateBucketRequest, com.google.storage.v2.Bucket>( + service, METHODID_UPDATE_BUCKET))) + .addMethod( + getComposeObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.ComposeObjectRequest, com.google.storage.v2.Object>( + service, METHODID_COMPOSE_OBJECT))) + .addMethod( + getDeleteObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.DeleteObjectRequest, com.google.protobuf.Empty>( + service, METHODID_DELETE_OBJECT))) + .addMethod( + getRestoreObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.RestoreObjectRequest, com.google.storage.v2.Object>( + service, METHODID_RESTORE_OBJECT))) + .addMethod( + getCancelResumableWriteMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.CancelResumableWriteRequest, + com.google.storage.v2.CancelResumableWriteResponse>( + service, METHODID_CANCEL_RESUMABLE_WRITE))) + .addMethod( + getGetObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.GetObjectRequest, com.google.storage.v2.Object>( + service, METHODID_GET_OBJECT))) + .addMethod( + getReadObjectMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.storage.v2.ReadObjectRequest, + com.google.storage.v2.ReadObjectResponse>(service, METHODID_READ_OBJECT))) + .addMethod( + getBidiReadObjectMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.storage.v2.BidiReadObjectRequest, + com.google.storage.v2.BidiReadObjectResponse>( + service, METHODID_BIDI_READ_OBJECT))) + .addMethod( + getUpdateObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.UpdateObjectRequest, com.google.storage.v2.Object>( + service, METHODID_UPDATE_OBJECT))) + .addMethod( + getWriteObjectMethod(), + io.grpc.stub.ServerCalls.asyncClientStreamingCall( + new MethodHandlers< + com.google.storage.v2.WriteObjectRequest, + com.google.storage.v2.WriteObjectResponse>(service, METHODID_WRITE_OBJECT))) + .addMethod( + getBidiWriteObjectMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.storage.v2.BidiWriteObjectRequest, + com.google.storage.v2.BidiWriteObjectResponse>( + service, METHODID_BIDI_WRITE_OBJECT))) + .addMethod( + getListObjectsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.ListObjectsRequest, + com.google.storage.v2.ListObjectsResponse>(service, METHODID_LIST_OBJECTS))) + .addMethod( + getRewriteObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.RewriteObjectRequest, + com.google.storage.v2.RewriteResponse>(service, METHODID_REWRITE_OBJECT))) + .addMethod( + getStartResumableWriteMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.StartResumableWriteRequest, + com.google.storage.v2.StartResumableWriteResponse>( + service, METHODID_START_RESUMABLE_WRITE))) + .addMethod( + getQueryWriteStatusMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.QueryWriteStatusRequest, + com.google.storage.v2.QueryWriteStatusResponse>( + service, METHODID_QUERY_WRITE_STATUS))) + .addMethod( + getMoveObjectMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.storage.v2.MoveObjectRequest, com.google.storage.v2.Object>( + service, METHODID_MOVE_OBJECT))) + .build(); + } + + private abstract static class StorageBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + StorageBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.storage.v2.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("Storage"); + } + } + + private static final class StorageFileDescriptorSupplier extends StorageBaseDescriptorSupplier { + StorageFileDescriptorSupplier() {} + } + + private static final class StorageMethodDescriptorSupplier extends StorageBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + StorageMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (StorageGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new StorageFileDescriptorSupplier()) + .addMethod(getDeleteBucketMethod()) + .addMethod(getGetBucketMethod()) + .addMethod(getCreateBucketMethod()) + .addMethod(getListBucketsMethod()) + .addMethod(getLockBucketRetentionPolicyMethod()) + .addMethod(getGetIamPolicyMethod()) + .addMethod(getSetIamPolicyMethod()) + .addMethod(getTestIamPermissionsMethod()) + .addMethod(getUpdateBucketMethod()) + .addMethod(getComposeObjectMethod()) + .addMethod(getDeleteObjectMethod()) + .addMethod(getRestoreObjectMethod()) + .addMethod(getCancelResumableWriteMethod()) + .addMethod(getGetObjectMethod()) + .addMethod(getReadObjectMethod()) + .addMethod(getBidiReadObjectMethod()) + .addMethod(getUpdateObjectMethod()) + .addMethod(getWriteObjectMethod()) + .addMethod(getBidiWriteObjectMethod()) + .addMethod(getListObjectsMethod()) + .addMethod(getRewriteObjectMethod()) + .addMethod(getStartResumableWriteMethod()) + .addMethod(getQueryWriteStatusMethod()) + .addMethod(getMoveObjectMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-storage/owlbot.py b/java-storage/owlbot.py new file mode 100644 index 000000000000..cdbd175afff0 --- /dev/null +++ b/java-storage/owlbot.py @@ -0,0 +1,39 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synthtool as s +from synthtool.languages import java + +'This script is used to synthesize generated parts of this library.' +import os + +for library in s.get_staging_dirs(): + # put any special-case replacements here + if os.path.exists('owl-bot-staging/v2/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/gapic_metadata.json'): + os.remove('owl-bot-staging/v2/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/gapic_metadata.json') + s.move(library) +s.remove_staging_dirs() +java.common_templates(monorepo=True, excludes=[ + ".github/*", + ".kokoro/*", + "samples/*", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.md", + "LICENSE", + "SECURITY.md", + "java.header", + "license-checks.xml", + "renovate.json", + ".gitignore" +]) diff --git a/java-storage/pom.xml b/java-storage/pom.xml new file mode 100644 index 000000000000..9600543ebab0 --- /dev/null +++ b/java-storage/pom.xml @@ -0,0 +1,187 @@ + + + 4.0.0 + com.google.cloud + google-cloud-storage-parent + pom + 2.64.1-SNAPSHOT + Storage Parent + https://github.com/googleapis/google-cloud-java + + Java idiomatic client for Google Cloud Platform services. + + + + com.google.cloud + google-cloud-jar-parent + 1.82.0-SNAPSHOT + ../google-cloud-jar-parent/pom.xml + + + + + chingor + Jeff Ching + chingor@google.com + Google + + Developer + + + + + Google LLC + + + scm:git:git@github.com:googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + HEAD + + + https://github.com/googleapis/google-cloud-java/issues + GitHub Issues + + + + + Apache-2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + UTF-8 + UTF-8 + github + google-cloud-storage-parent + 3.31.0 + + + + + + org.junit + junit-bom + 5.14.3 + pom + import + + + io.opentelemetry.contrib + opentelemetry-gcp-resources + 1.37.0-alpha + + + + com.google.cloud + google-cloud-storage + 2.64.1-SNAPSHOT + + + com.google.cloud + google-cloud-pubsub + 1.148.0 + test + + + com.google.api.grpc + proto-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + gapic-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + + + com.google.cloud + google-cloud-storage-control + 2.64.1-SNAPSHOT + + + com.google.cloud + google-cloud-conformance-tests + 0.3.7 + test + + + org.checkerframework + checker-qual + + + + + org.apache.httpcomponents + httpmime + 4.5.14 + test + + + + com.google.truth + truth + 1.4.5 + test + + + org.checkerframework + checker-qual + + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + + + + + google-cloud-storage + grpc-google-cloud-storage-v2 + grpc-google-cloud-storage-control-v2 + proto-google-cloud-storage-v2 + proto-google-cloud-storage-control-v2 + google-cloud-storage-control + gapic-google-cloud-storage-v2 + google-cloud-storage-bom + + + + + include-samples + + samples + + + + include-storage-shared-benchmarking + + storage-shared-benchmarking + + + + diff --git a/java-storage/proto-google-cloud-storage-control-v2/clirr-ignored-differences.xml b/java-storage/proto-google-cloud-storage-control-v2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..81de990e40ed --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/clirr-ignored-differences.xml @@ -0,0 +1,81 @@ + + + + + 7012 + com/google/storage/control/v2/*OrBuilder + * get*(*) + + + 7012 + com/google/storage/control/v2/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/storage/control/v2/*OrBuilder + boolean has*(*) + + + + + 7006 + com/google/storage/control/v2/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/storage/control/v2/** + * addRepeatedField(*) + ** + + + 7006 + com/google/storage/control/v2/** + * clear() + ** + + + 7006 + com/google/storage/control/v2/** + * clearField(*) + ** + + + 7006 + com/google/storage/control/v2/** + * clearOneof(*) + ** + + + 7006 + com/google/storage/control/v2/** + * clone() + ** + + + 7006 + com/google/storage/control/v2/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/storage/control/v2/** + * setField(*) + ** + + + 7006 + com/google/storage/control/v2/** + * setRepeatedField(*) + ** + + + 7006 + com/google/storage/control/v2/** + * setUnknownFields(*) + ** + + diff --git a/java-storage/proto-google-cloud-storage-control-v2/pom.xml b/java-storage/proto-google-cloud-storage-control-v2/pom.xml new file mode 100644 index 000000000000..190f0403d7ef --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/pom.xml @@ -0,0 +1,46 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-storage-control-v2 + 2.64.1-SNAPSHOT + proto-google-cloud-storage-control-v2 + Proto library for proto-google-cloud-storage-control-v2 + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCache.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCache.java new file mode 100644 index 000000000000..1b5bf71a132e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCache.java @@ -0,0 +1,2235 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * An Anywhere Cache Instance.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.AnywhereCache} + */ +@com.google.protobuf.Generated +public final class AnywhereCache extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.AnywhereCache) + AnywhereCacheOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AnywhereCache"); + } + + // Use AnywhereCache.newBuilder() to construct. + private AnywhereCache(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AnywhereCache() { + name_ = ""; + zone_ = ""; + admissionPolicy_ = ""; + state_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_AnywhereCache_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_AnywhereCache_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.AnywhereCache.class, + com.google.storage.control.v2.AnywhereCache.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Immutable. The resource name of this AnywhereCache.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The resource name of this AnywhereCache.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ZONE_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private volatile java.lang.Object zone_ = ""; + + /** + * + * + *
+   * Immutable. The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The zone. + */ + @java.lang.Override + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for zone. + */ + @java.lang.Override + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TTL_FIELD_NUMBER = 3; + private com.google.protobuf.Duration ttl_; + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + @java.lang.Override + public boolean hasTtl() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + @java.lang.Override + public com.google.protobuf.Duration getTtl() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + public static final int ADMISSION_POLICY_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+   * Cache admission policy. Valid policies includes:
+   * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+   * `admit-on-first-miss`. Default value is applied if not specified in the
+   * create request.
+   * 
+ * + * string admission_policy = 9; + * + * @return The admissionPolicy. + */ + @java.lang.Override + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } + } + + /** + * + * + *
+   * Cache admission policy. Valid policies includes:
+   * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+   * `admit-on-first-miss`. Default value is applied if not specified in the
+   * create request.
+   * 
+ * + * string admission_policy = 9; + * + * @return The bytes for admissionPolicy. + */ + @java.lang.Override + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object state_ = ""; + + /** + * + * + *
+   * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+   * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The state. + */ + @java.lang.Override + public java.lang.String getState() { + java.lang.Object ref = state_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + state_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+   * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for state. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStateBytes() { + java.lang.Object ref = state_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + state_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 6; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 7; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int PENDING_UPDATE_FIELD_NUMBER = 8; + private boolean pendingUpdate_ = false; + + /** + * + * + *
+   * Output only. True if there is an active update operation against this cache
+   * instance. Subsequential update requests will be rejected if this field is
+   * true. Output only.
+   * 
+ * + * bool pending_update = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The pendingUpdate. + */ + @java.lang.Override + public boolean getPendingUpdate() { + return pendingUpdate_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getTtl()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(state_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, state_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getUpdateTime()); + } + if (pendingUpdate_ != false) { + output.writeBool(8, pendingUpdate_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(admissionPolicy_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, admissionPolicy_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(zone_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 10, zone_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getTtl()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(state_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, state_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getUpdateTime()); + } + if (pendingUpdate_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, pendingUpdate_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(admissionPolicy_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, admissionPolicy_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(zone_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(10, zone_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.AnywhereCache)) { + return super.equals(obj); + } + com.google.storage.control.v2.AnywhereCache other = + (com.google.storage.control.v2.AnywhereCache) obj; + + if (!getName().equals(other.getName())) return false; + if (!getZone().equals(other.getZone())) return false; + if (hasTtl() != other.hasTtl()) return false; + if (hasTtl()) { + if (!getTtl().equals(other.getTtl())) return false; + } + if (!getAdmissionPolicy().equals(other.getAdmissionPolicy())) return false; + if (!getState().equals(other.getState())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (getPendingUpdate() != other.getPendingUpdate()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + ZONE_FIELD_NUMBER; + hash = (53 * hash) + getZone().hashCode(); + if (hasTtl()) { + hash = (37 * hash) + TTL_FIELD_NUMBER; + hash = (53 * hash) + getTtl().hashCode(); + } + hash = (37 * hash) + ADMISSION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getAdmissionPolicy().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + getState().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (37 * hash) + PENDING_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getPendingUpdate()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.AnywhereCache parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.AnywhereCache parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.AnywhereCache parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.AnywhereCache prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * An Anywhere Cache Instance.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.AnywhereCache} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.AnywhereCache) + com.google.storage.control.v2.AnywhereCacheOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_AnywhereCache_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_AnywhereCache_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.AnywhereCache.class, + com.google.storage.control.v2.AnywhereCache.Builder.class); + } + + // Construct using com.google.storage.control.v2.AnywhereCache.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTtlFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + zone_ = ""; + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + admissionPolicy_ = ""; + state_ = ""; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + pendingUpdate_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_AnywhereCache_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache getDefaultInstanceForType() { + return com.google.storage.control.v2.AnywhereCache.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache build() { + com.google.storage.control.v2.AnywhereCache result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache buildPartial() { + com.google.storage.control.v2.AnywhereCache result = + new com.google.storage.control.v2.AnywhereCache(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.AnywhereCache result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.zone_ = zone_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ttl_ = ttlBuilder_ == null ? ttl_ : ttlBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.admissionPolicy_ = admissionPolicy_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.pendingUpdate_ = pendingUpdate_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.AnywhereCache) { + return mergeFrom((com.google.storage.control.v2.AnywhereCache) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.AnywhereCache other) { + if (other == com.google.storage.control.v2.AnywhereCache.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getZone().isEmpty()) { + zone_ = other.zone_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasTtl()) { + mergeTtl(other.getTtl()); + } + if (!other.getAdmissionPolicy().isEmpty()) { + admissionPolicy_ = other.admissionPolicy_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getState().isEmpty()) { + state_ = other.state_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (other.getPendingUpdate() != false) { + setPendingUpdate(other.getPendingUpdate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage(internalGetTtlFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 42: + { + state_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 64: + { + pendingUpdate_ = input.readBool(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 74: + { + admissionPolicy_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 74 + case 82: + { + zone_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Immutable. The resource name of this AnywhereCache.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The resource name of this AnywhereCache.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The resource name of this AnywhereCache.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The resource name of this AnywhereCache.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The resource name of this AnywhereCache.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object zone_ = ""; + + /** + * + * + *
+     * Immutable. The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The zone. + */ + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for zone. + */ + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The zone to set. + * @return This builder for chaining. + */ + public Builder setZone(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + zone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return This builder for chaining. + */ + public Builder clearZone() { + zone_ = getDefaultInstance().getZone(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The bytes for zone to set. + * @return This builder for chaining. + */ + public Builder setZoneBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + zone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.Duration ttl_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + ttlBuilder_; + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + public com.google.protobuf.Duration getTtl() { + if (ttlBuilder_ == null) { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } else { + return ttlBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + } else { + ttlBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration.Builder builderForValue) { + if (ttlBuilder_ == null) { + ttl_ = builderForValue.build(); + } else { + ttlBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public Builder mergeTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && ttl_ != null + && ttl_ != com.google.protobuf.Duration.getDefaultInstance()) { + getTtlBuilder().mergeFrom(value); + } else { + ttl_ = value; + } + } else { + ttlBuilder_.mergeFrom(value); + } + if (ttl_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public Builder clearTtl() { + bitField0_ = (bitField0_ & ~0x00000004); + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.Duration.Builder getTtlBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetTtlFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + if (ttlBuilder_ != null) { + return ttlBuilder_.getMessageOrBuilder(); + } else { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + } + + /** + * + * + *
+     * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+     * that defines how long a cache entry can live. Default ttl value (24hrs)
+     * is applied if not specified in the create request. TTL must be in whole
+     * seconds.
+     * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetTtlFieldBuilder() { + if (ttlBuilder_ == null) { + ttlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>(getTtl(), getParentForChildren(), isClean()); + ttl_ = null; + } + return ttlBuilder_; + } + + private java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+     * Cache admission policy. Valid policies includes:
+     * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+     * `admit-on-first-miss`. Default value is applied if not specified in the
+     * create request.
+     * 
+ * + * string admission_policy = 9; + * + * @return The admissionPolicy. + */ + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Cache admission policy. Valid policies includes:
+     * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+     * `admit-on-first-miss`. Default value is applied if not specified in the
+     * create request.
+     * 
+ * + * string admission_policy = 9; + * + * @return The bytes for admissionPolicy. + */ + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Cache admission policy. Valid policies includes:
+     * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+     * `admit-on-first-miss`. Default value is applied if not specified in the
+     * create request.
+     * 
+ * + * string admission_policy = 9; + * + * @param value The admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicy(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + admissionPolicy_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Cache admission policy. Valid policies includes:
+     * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+     * `admit-on-first-miss`. Default value is applied if not specified in the
+     * create request.
+     * 
+ * + * string admission_policy = 9; + * + * @return This builder for chaining. + */ + public Builder clearAdmissionPolicy() { + admissionPolicy_ = getDefaultInstance().getAdmissionPolicy(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Cache admission policy. Valid policies includes:
+     * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+     * `admit-on-first-miss`. Default value is applied if not specified in the
+     * create request.
+     * 
+ * + * string admission_policy = 9; + * + * @param value The bytes for admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + admissionPolicy_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object state_ = ""; + + /** + * + * + *
+     * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+     * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The state. + */ + public java.lang.String getState() { + java.lang.Object ref = state_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + state_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+     * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for state. + */ + public com.google.protobuf.ByteString getStateBytes() { + java.lang.Object ref = state_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + state_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+     * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + state_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+     * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearState() { + state_ = getDefaultInstance().getState(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+     * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for state to set. + * @return This builder for chaining. + */ + public Builder setStateBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + state_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000020); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is allocated.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000040); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. Time when Anywhere cache instance is last updated, including
+     * creation.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private boolean pendingUpdate_; + + /** + * + * + *
+     * Output only. True if there is an active update operation against this cache
+     * instance. Subsequential update requests will be rejected if this field is
+     * true. Output only.
+     * 
+ * + * bool pending_update = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The pendingUpdate. + */ + @java.lang.Override + public boolean getPendingUpdate() { + return pendingUpdate_; + } + + /** + * + * + *
+     * Output only. True if there is an active update operation against this cache
+     * instance. Subsequential update requests will be rejected if this field is
+     * true. Output only.
+     * 
+ * + * bool pending_update = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The pendingUpdate to set. + * @return This builder for chaining. + */ + public Builder setPendingUpdate(boolean value) { + + pendingUpdate_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. True if there is an active update operation against this cache
+     * instance. Subsequential update requests will be rejected if this field is
+     * true. Output only.
+     * 
+ * + * bool pending_update = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearPendingUpdate() { + bitField0_ = (bitField0_ & ~0x00000080); + pendingUpdate_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.AnywhereCache) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.AnywhereCache) + private static final com.google.storage.control.v2.AnywhereCache DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.AnywhereCache(); + } + + public static com.google.storage.control.v2.AnywhereCache getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AnywhereCache parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheName.java new file mode 100644 index 000000000000..f56f02918761 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheName.java @@ -0,0 +1,227 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class AnywhereCacheName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET_ANYWHERE_CACHE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + private final String anywhereCache; + + @Deprecated + protected AnywhereCacheName() { + project = null; + bucket = null; + anywhereCache = null; + } + + private AnywhereCacheName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + anywhereCache = Preconditions.checkNotNull(builder.getAnywhereCache()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getAnywhereCache() { + return anywhereCache; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static AnywhereCacheName of(String project, String bucket, String anywhereCache) { + return newBuilder() + .setProject(project) + .setBucket(bucket) + .setAnywhereCache(anywhereCache) + .build(); + } + + public static String format(String project, String bucket, String anywhereCache) { + return newBuilder() + .setProject(project) + .setBucket(bucket) + .setAnywhereCache(anywhereCache) + .build() + .toString(); + } + + public static AnywhereCacheName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET_ANYWHERE_CACHE.validatedMatch( + formattedString, "AnywhereCacheName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket"), matchMap.get("anywhere_cache")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (AnywhereCacheName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET_ANYWHERE_CACHE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + if (anywhereCache != null) { + fieldMapBuilder.put("anywhere_cache", anywhereCache); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET_ANYWHERE_CACHE.instantiate( + "project", project, "bucket", bucket, "anywhere_cache", anywhereCache); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + AnywhereCacheName that = ((AnywhereCacheName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.bucket, that.bucket) + && Objects.equals(this.anywhereCache, that.anywhereCache); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + h *= 1000003; + h ^= Objects.hashCode(anywhereCache); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}. */ + public static class Builder { + private String project; + private String bucket; + private String anywhereCache; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getAnywhereCache() { + return anywhereCache; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + public Builder setAnywhereCache(String anywhereCache) { + this.anywhereCache = anywhereCache; + return this; + } + + private Builder(AnywhereCacheName anywhereCacheName) { + this.project = anywhereCacheName.project; + this.bucket = anywhereCacheName.bucket; + this.anywhereCache = anywhereCacheName.anywhereCache; + } + + public AnywhereCacheName build() { + return new AnywhereCacheName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheOrBuilder.java new file mode 100644 index 000000000000..874457393300 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/AnywhereCacheOrBuilder.java @@ -0,0 +1,288 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface AnywhereCacheOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.AnywhereCache) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Immutable. The resource name of this AnywhereCache.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Immutable. The resource name of this AnywhereCache.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Immutable. The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The zone. + */ + java.lang.String getZone(); + + /** + * + * + *
+   * Immutable. The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * string zone = 10 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for zone. + */ + com.google.protobuf.ByteString getZoneBytes(); + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + boolean hasTtl(); + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + com.google.protobuf.Duration getTtl(); + + /** + * + * + *
+   * Cache entry TTL (ranges between 1h to 7d). This is a cache-level config
+   * that defines how long a cache entry can live. Default ttl value (24hrs)
+   * is applied if not specified in the create request. TTL must be in whole
+   * seconds.
+   * 
+ * + * .google.protobuf.Duration ttl = 3; + */ + com.google.protobuf.DurationOrBuilder getTtlOrBuilder(); + + /** + * + * + *
+   * Cache admission policy. Valid policies includes:
+   * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+   * `admit-on-first-miss`. Default value is applied if not specified in the
+   * create request.
+   * 
+ * + * string admission_policy = 9; + * + * @return The admissionPolicy. + */ + java.lang.String getAdmissionPolicy(); + + /** + * + * + *
+   * Cache admission policy. Valid policies includes:
+   * `admit-on-first-miss` and `admit-on-second-miss`. Defaults to
+   * `admit-on-first-miss`. Default value is applied if not specified in the
+   * create request.
+   * 
+ * + * string admission_policy = 9; + * + * @return The bytes for admissionPolicy. + */ + com.google.protobuf.ByteString getAdmissionPolicyBytes(); + + /** + * + * + *
+   * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+   * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The state. + */ + java.lang.String getState(); + + /** + * + * + *
+   * Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED.
+   * 
+ * + * string state = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for state. + */ + com.google.protobuf.ByteString getStateBytes(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is allocated.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. Time when Anywhere cache instance is last updated, including
+   * creation.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. True if there is an active update operation against this cache
+   * instance. Subsequential update requests will be rejected if this field is
+   * true. Output only.
+   * 
+ * + * bool pending_update = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The pendingUpdate. + */ + boolean getPendingUpdate(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/BucketName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/BucketName.java new file mode 100644 index 000000000000..17822379adba --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/BucketName.java @@ -0,0 +1,191 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BucketName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET = + PathTemplate.createWithoutUrlEncoding("projects/{project}/buckets/{bucket}"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + + @Deprecated + protected BucketName() { + project = null; + bucket = null; + } + + private BucketName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BucketName of(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build(); + } + + public static String format(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build().toString(); + } + + public static BucketName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET.validatedMatch( + formattedString, "BucketName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BucketName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET.instantiate("project", project, "bucket", bucket); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + BucketName that = ((BucketName) o); + return Objects.equals(this.project, that.project) && Objects.equals(this.bucket, that.bucket); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}. */ + public static class Builder { + private String project; + private String bucket; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + private Builder(BucketName bucketName) { + this.project = bucketName.project; + this.bucket = bucketName.bucket; + } + + public BucketName build() { + return new BucketName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadata.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadata.java new file mode 100644 index 000000000000..e9a292063e09 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadata.java @@ -0,0 +1,1698 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * The message contains metadata that is common to all Storage Control
+ * long-running operations, present in its `google.longrunning.Operation`
+ * messages, and accessible via `metadata.common_metadata`.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.CommonLongRunningOperationMetadata} + */ +@com.google.protobuf.Generated +public final class CommonLongRunningOperationMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.CommonLongRunningOperationMetadata) + CommonLongRunningOperationMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommonLongRunningOperationMetadata"); + } + + // Use CommonLongRunningOperationMetadata.newBuilder() to construct. + private CommonLongRunningOperationMetadata( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CommonLongRunningOperationMetadata() { + type_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.class, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder.class); + } + + private int bitField0_; + public static final int CREATE_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int TYPE_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
+   * Output only. The type of operation invoked.
+   * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The type of operation invoked.
+   * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTED_CANCELLATION_FIELD_NUMBER = 5; + private boolean requestedCancellation_ = false; + + /** + * + * + *
+   * Output only. Identifies whether the user has requested cancellation.
+   * 
+ * + * bool requested_cancellation = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The requestedCancellation. + */ + @java.lang.Override + public boolean getRequestedCancellation() { + return requestedCancellation_; + } + + public static final int PROGRESS_PERCENT_FIELD_NUMBER = 6; + private int progressPercent_ = 0; + + /** + * + * + *
+   * Output only. The estimated progress of the operation in percentage [0,
+   * 100]. The value -1 means the progress is unknown.
+   * 
+ * + * int32 progress_percent = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEndTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getUpdateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, type_); + } + if (requestedCancellation_ != false) { + output.writeBool(5, requestedCancellation_); + } + if (progressPercent_ != 0) { + output.writeInt32(6, progressPercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getEndTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, type_); + } + if (requestedCancellation_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, requestedCancellation_); + } + if (progressPercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(6, progressPercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.CommonLongRunningOperationMetadata)) { + return super.equals(obj); + } + com.google.storage.control.v2.CommonLongRunningOperationMetadata other = + (com.google.storage.control.v2.CommonLongRunningOperationMetadata) obj; + + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getType().equals(other.getType())) return false; + if (getRequestedCancellation() != other.getRequestedCancellation()) return false; + if (getProgressPercent() != other.getProgressPercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (37 * hash) + REQUESTED_CANCELLATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRequestedCancellation()); + hash = (37 * hash) + PROGRESS_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getProgressPercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.CommonLongRunningOperationMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The message contains metadata that is common to all Storage Control
+   * long-running operations, present in its `google.longrunning.Operation`
+   * messages, and accessible via `metadata.common_metadata`.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.CommonLongRunningOperationMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.CommonLongRunningOperationMetadata) + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.class, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder.class); + } + + // Construct using com.google.storage.control.v2.CommonLongRunningOperationMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + type_ = ""; + requestedCancellation_ = false; + progressPercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata + getDefaultInstanceForType() { + return com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata build() { + com.google.storage.control.v2.CommonLongRunningOperationMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata buildPartial() { + com.google.storage.control.v2.CommonLongRunningOperationMetadata result = + new com.google.storage.control.v2.CommonLongRunningOperationMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.CommonLongRunningOperationMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestedCancellation_ = requestedCancellation_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.progressPercent_ = progressPercent_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.CommonLongRunningOperationMetadata) { + return mergeFrom((com.google.storage.control.v2.CommonLongRunningOperationMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.CommonLongRunningOperationMetadata other) { + if (other + == com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance()) + return this; + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.getRequestedCancellation() != false) { + setRequestedCancellation(other.getRequestedCancellation()); + } + if (other.getProgressPercent() != 0) { + setProgressPercent(other.getProgressPercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + requestedCancellation_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + progressPercent_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The time the operation was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000002); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
+     * Output only. The time the operation finished running.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The time the operation was last modified.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
+     * Output only. The type of operation invoked.
+     * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The type of operation invoked.
+     * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The type of operation invoked.
+     * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The type of operation invoked.
+     * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The type of operation invoked.
+     * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private boolean requestedCancellation_; + + /** + * + * + *
+     * Output only. Identifies whether the user has requested cancellation.
+     * 
+ * + * bool requested_cancellation = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The requestedCancellation. + */ + @java.lang.Override + public boolean getRequestedCancellation() { + return requestedCancellation_; + } + + /** + * + * + *
+     * Output only. Identifies whether the user has requested cancellation.
+     * 
+ * + * bool requested_cancellation = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The requestedCancellation to set. + * @return This builder for chaining. + */ + public Builder setRequestedCancellation(boolean value) { + + requestedCancellation_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Identifies whether the user has requested cancellation.
+     * 
+ * + * bool requested_cancellation = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearRequestedCancellation() { + bitField0_ = (bitField0_ & ~0x00000010); + requestedCancellation_ = false; + onChanged(); + return this; + } + + private int progressPercent_; + + /** + * + * + *
+     * Output only. The estimated progress of the operation in percentage [0,
+     * 100]. The value -1 means the progress is unknown.
+     * 
+ * + * int32 progress_percent = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + /** + * + * + *
+     * Output only. The estimated progress of the operation in percentage [0,
+     * 100]. The value -1 means the progress is unknown.
+     * 
+ * + * int32 progress_percent = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The progressPercent to set. + * @return This builder for chaining. + */ + public Builder setProgressPercent(int value) { + + progressPercent_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The estimated progress of the operation in percentage [0,
+     * 100]. The value -1 means the progress is unknown.
+     * 
+ * + * int32 progress_percent = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearProgressPercent() { + bitField0_ = (bitField0_ & ~0x00000020); + progressPercent_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.CommonLongRunningOperationMetadata) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.CommonLongRunningOperationMetadata) + private static final com.google.storage.control.v2.CommonLongRunningOperationMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.CommonLongRunningOperationMetadata(); + } + + public static com.google.storage.control.v2.CommonLongRunningOperationMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommonLongRunningOperationMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadataOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadataOrBuilder.java new file mode 100644 index 000000000000..0a917a7db2c4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CommonLongRunningOperationMetadataOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface CommonLongRunningOperationMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.CommonLongRunningOperationMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The time the operation was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
+   * Output only. The time the operation finished running.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The time the operation was last modified.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The type of operation invoked.
+   * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
+   * Output only. The type of operation invoked.
+   * 
+ * + * string type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); + + /** + * + * + *
+   * Output only. Identifies whether the user has requested cancellation.
+   * 
+ * + * bool requested_cancellation = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The requestedCancellation. + */ + boolean getRequestedCancellation(); + + /** + * + * + *
+   * Output only. The estimated progress of the operation in percentage [0,
+   * 100]. The value -1 means the progress is unknown.
+   * 
+ * + * int32 progress_percent = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The progressPercent. + */ + int getProgressPercent(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadata.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadata.java new file mode 100644 index 000000000000..f1c0620e3c00 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadata.java @@ -0,0 +1,1720 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Message returned in the metadata field of the Operation resource for
+ * CreateAnywhereCache operations.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateAnywhereCacheMetadata} + */ +@com.google.protobuf.Generated +public final class CreateAnywhereCacheMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.CreateAnywhereCacheMetadata) + CreateAnywhereCacheMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateAnywhereCacheMetadata"); + } + + // Use CreateAnywhereCacheMetadata.newBuilder() to construct. + private CreateAnywhereCacheMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateAnywhereCacheMetadata() { + anywhereCacheId_ = ""; + zone_ = ""; + admissionPolicy_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateAnywhereCacheMetadata.class, + com.google.storage.control.v2.CreateAnywhereCacheMetadata.Builder.class); + } + + private int bitField0_; + public static final int COMMON_METADATA_FIELD_NUMBER = 1; + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + @java.lang.Override + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + public static final int ANYWHERE_CACHE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object anywhereCacheId_ = ""; + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + @java.lang.Override + public boolean hasAnywhereCacheId() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + @java.lang.Override + public java.lang.String getAnywhereCacheId() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + anywhereCacheId_ = s; + return s; + } + } + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getAnywhereCacheIdBytes() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + anywhereCacheId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ZONE_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object zone_ = ""; + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return Whether the zone field is set. + */ + @java.lang.Override + public boolean hasZone() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return The zone. + */ + @java.lang.Override + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } + } + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return The bytes for zone. + */ + @java.lang.Override + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TTL_FIELD_NUMBER = 3; + private com.google.protobuf.Duration ttl_; + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + @java.lang.Override + public boolean hasTtl() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + @java.lang.Override + public com.google.protobuf.Duration getTtl() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + public static final int ADMISSION_POLICY_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return Whether the admissionPolicy field is set. + */ + @java.lang.Override + public boolean hasAdmissionPolicy() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return The admissionPolicy. + */ + @java.lang.Override + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } + } + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return The bytes for admissionPolicy. + */ + @java.lang.Override + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommonMetadata()); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, anywhereCacheId_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(3, getTtl()); + } + if (((bitField0_ & 0x00000010) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, admissionPolicy_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, zone_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommonMetadata()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, anywhereCacheId_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getTtl()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, admissionPolicy_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, zone_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.CreateAnywhereCacheMetadata)) { + return super.equals(obj); + } + com.google.storage.control.v2.CreateAnywhereCacheMetadata other = + (com.google.storage.control.v2.CreateAnywhereCacheMetadata) obj; + + if (hasCommonMetadata() != other.hasCommonMetadata()) return false; + if (hasCommonMetadata()) { + if (!getCommonMetadata().equals(other.getCommonMetadata())) return false; + } + if (hasAnywhereCacheId() != other.hasAnywhereCacheId()) return false; + if (hasAnywhereCacheId()) { + if (!getAnywhereCacheId().equals(other.getAnywhereCacheId())) return false; + } + if (hasZone() != other.hasZone()) return false; + if (hasZone()) { + if (!getZone().equals(other.getZone())) return false; + } + if (hasTtl() != other.hasTtl()) return false; + if (hasTtl()) { + if (!getTtl().equals(other.getTtl())) return false; + } + if (hasAdmissionPolicy() != other.hasAdmissionPolicy()) return false; + if (hasAdmissionPolicy()) { + if (!getAdmissionPolicy().equals(other.getAdmissionPolicy())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommonMetadata()) { + hash = (37 * hash) + COMMON_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getCommonMetadata().hashCode(); + } + if (hasAnywhereCacheId()) { + hash = (37 * hash) + ANYWHERE_CACHE_ID_FIELD_NUMBER; + hash = (53 * hash) + getAnywhereCacheId().hashCode(); + } + if (hasZone()) { + hash = (37 * hash) + ZONE_FIELD_NUMBER; + hash = (53 * hash) + getZone().hashCode(); + } + if (hasTtl()) { + hash = (37 * hash) + TTL_FIELD_NUMBER; + hash = (53 * hash) + getTtl().hashCode(); + } + if (hasAdmissionPolicy()) { + hash = (37 * hash) + ADMISSION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getAdmissionPolicy().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.CreateAnywhereCacheMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message returned in the metadata field of the Operation resource for
+   * CreateAnywhereCache operations.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateAnywhereCacheMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.CreateAnywhereCacheMetadata) + com.google.storage.control.v2.CreateAnywhereCacheMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateAnywhereCacheMetadata.class, + com.google.storage.control.v2.CreateAnywhereCacheMetadata.Builder.class); + } + + // Construct using com.google.storage.control.v2.CreateAnywhereCacheMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonMetadataFieldBuilder(); + internalGetTtlFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + anywhereCacheId_ = ""; + zone_ = ""; + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + admissionPolicy_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheMetadata getDefaultInstanceForType() { + return com.google.storage.control.v2.CreateAnywhereCacheMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheMetadata build() { + com.google.storage.control.v2.CreateAnywhereCacheMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheMetadata buildPartial() { + com.google.storage.control.v2.CreateAnywhereCacheMetadata result = + new com.google.storage.control.v2.CreateAnywhereCacheMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.CreateAnywhereCacheMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commonMetadata_ = + commonMetadataBuilder_ == null ? commonMetadata_ : commonMetadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.anywhereCacheId_ = anywhereCacheId_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.zone_ = zone_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ttl_ = ttlBuilder_ == null ? ttl_ : ttlBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.admissionPolicy_ = admissionPolicy_; + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.CreateAnywhereCacheMetadata) { + return mergeFrom((com.google.storage.control.v2.CreateAnywhereCacheMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.CreateAnywhereCacheMetadata other) { + if (other == com.google.storage.control.v2.CreateAnywhereCacheMetadata.getDefaultInstance()) + return this; + if (other.hasCommonMetadata()) { + mergeCommonMetadata(other.getCommonMetadata()); + } + if (other.hasAnywhereCacheId()) { + anywhereCacheId_ = other.anywhereCacheId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasZone()) { + zone_ = other.zone_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasTtl()) { + mergeTtl(other.getTtl()); + } + if (other.hasAdmissionPolicy()) { + admissionPolicy_ = other.admissionPolicy_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommonMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + anywhereCacheId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetTtlFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 26 + case 42: + { + admissionPolicy_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + zone_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + commonMetadataBuilder_; + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return Whether the commonMetadata field is set. + */ + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return The commonMetadata. + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + if (commonMetadataBuilder_ == null) { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } else { + return commonMetadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonMetadata_ = value; + } else { + commonMetadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder builderForValue) { + if (commonMetadataBuilder_ == null) { + commonMetadata_ = builderForValue.build(); + } else { + commonMetadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder mergeCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commonMetadata_ != null + && commonMetadata_ + != com.google.storage.control.v2.CommonLongRunningOperationMetadata + .getDefaultInstance()) { + getCommonMetadataBuilder().mergeFrom(value); + } else { + commonMetadata_ = value; + } + } else { + commonMetadataBuilder_.mergeFrom(value); + } + if (commonMetadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder clearCommonMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder + getCommonMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommonMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + if (commonMetadataBuilder_ != null) { + return commonMetadataBuilder_.getMessageOrBuilder(); + } else { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + internalGetCommonMetadataFieldBuilder() { + if (commonMetadataBuilder_ == null) { + commonMetadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder>( + getCommonMetadata(), getParentForChildren(), isClean()); + commonMetadata_ = null; + } + return commonMetadataBuilder_; + } + + private java.lang.Object anywhereCacheId_ = ""; + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + public boolean hasAnywhereCacheId() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + public java.lang.String getAnywhereCacheId() { + java.lang.Object ref = anywhereCacheId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + anywhereCacheId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + public com.google.protobuf.ByteString getAnywhereCacheIdBytes() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + anywhereCacheId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @param value The anywhereCacheId to set. + * @return This builder for chaining. + */ + public Builder setAnywhereCacheId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + anywhereCacheId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearAnywhereCacheId() { + anywhereCacheId_ = getDefaultInstance().getAnywhereCacheId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @param value The bytes for anywhereCacheId to set. + * @return This builder for chaining. + */ + public Builder setAnywhereCacheIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + anywhereCacheId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object zone_ = ""; + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @return Whether the zone field is set. + */ + public boolean hasZone() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @return The zone. + */ + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @return The bytes for zone. + */ + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @param value The zone to set. + * @return This builder for chaining. + */ + public Builder setZone(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + zone_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @return This builder for chaining. + */ + public Builder clearZone() { + zone_ = getDefaultInstance().getZone(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 6; + * + * @param value The bytes for zone to set. + * @return This builder for chaining. + */ + public Builder setZoneBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + zone_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Duration ttl_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + ttlBuilder_; + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + public com.google.protobuf.Duration getTtl() { + if (ttlBuilder_ == null) { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } else { + return ttlBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + } else { + ttlBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration.Builder builderForValue) { + if (ttlBuilder_ == null) { + ttl_ = builderForValue.build(); + } else { + ttlBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder mergeTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && ttl_ != null + && ttl_ != com.google.protobuf.Duration.getDefaultInstance()) { + getTtlBuilder().mergeFrom(value); + } else { + ttl_ = value; + } + } else { + ttlBuilder_.mergeFrom(value); + } + if (ttl_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder clearTtl() { + bitField0_ = (bitField0_ & ~0x00000008); + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.Duration.Builder getTtlBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetTtlFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + if (ttlBuilder_ != null) { + return ttlBuilder_.getMessageOrBuilder(); + } else { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+     * cache entries on admission. Default ttl value (24hrs) is applied if not
+     * specified in the create request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetTtlFieldBuilder() { + if (ttlBuilder_ == null) { + ttlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>(getTtl(), getParentForChildren(), isClean()); + ttl_ = null; + } + return ttlBuilder_; + } + + private java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @return Whether the admissionPolicy field is set. + */ + public boolean hasAdmissionPolicy() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @return The admissionPolicy. + */ + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @return The bytes for admissionPolicy. + */ + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @param value The admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicy(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + admissionPolicy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @return This builder for chaining. + */ + public Builder clearAdmissionPolicy() { + admissionPolicy_ = getDefaultInstance().getAdmissionPolicy(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+     * applied if not specified in the create request.
+     * 
+ * + * optional string admission_policy = 5; + * + * @param value The bytes for admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + admissionPolicy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.CreateAnywhereCacheMetadata) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.CreateAnywhereCacheMetadata) + private static final com.google.storage.control.v2.CreateAnywhereCacheMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.CreateAnywhereCacheMetadata(); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateAnywhereCacheMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadataOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadataOrBuilder.java new file mode 100644 index 000000000000..d4fe37dfcdac --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheMetadataOrBuilder.java @@ -0,0 +1,235 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface CreateAnywhereCacheMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.CreateAnywhereCacheMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + boolean hasCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + boolean hasAnywhereCacheId(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + java.lang.String getAnywhereCacheId(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + com.google.protobuf.ByteString getAnywhereCacheIdBytes(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return Whether the zone field is set. + */ + boolean hasZone(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return The zone. + */ + java.lang.String getZone(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 6; + * + * @return The bytes for zone. + */ + com.google.protobuf.ByteString getZoneBytes(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + boolean hasTtl(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + com.google.protobuf.Duration getTtl(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL. A cache-level config that is applied to all new
+   * cache entries on admission. Default ttl value (24hrs) is applied if not
+   * specified in the create request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + com.google.protobuf.DurationOrBuilder getTtlOrBuilder(); + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return Whether the admissionPolicy field is set. + */ + boolean hasAdmissionPolicy(); + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return The admissionPolicy. + */ + java.lang.String getAdmissionPolicy(); + + /** + * + * + *
+   * Anywhere Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is
+   * applied if not specified in the create request.
+   * 
+ * + * optional string admission_policy = 5; + * + * @return The bytes for admissionPolicy. + */ + com.google.protobuf.ByteString getAdmissionPolicyBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequest.java new file mode 100644 index 000000000000..b08c48d4c6c3 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequest.java @@ -0,0 +1,1189 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for CreateAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class CreateAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.CreateAnywhereCacheRequest) + CreateAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateAnywhereCacheRequest"); + } + + // Use CreateAnywhereCacheRequest.newBuilder() to construct. + private CreateAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateAnywhereCacheRequest() { + parent_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateAnywhereCacheRequest.class, + com.google.storage.control.v2.CreateAnywhereCacheRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ANYWHERE_CACHE_FIELD_NUMBER = 3; + private com.google.storage.control.v2.AnywhereCache anywhereCache_; + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + @java.lang.Override + public boolean hasAnywhereCache() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache getAnywhereCache() { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder() { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getAnywhereCache()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getAnywhereCache()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.CreateAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.CreateAnywhereCacheRequest other = + (com.google.storage.control.v2.CreateAnywhereCacheRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasAnywhereCache() != other.hasAnywhereCache()) return false; + if (hasAnywhereCache()) { + if (!getAnywhereCache().equals(other.getAnywhereCache())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasAnywhereCache()) { + hash = (37 * hash) + ANYWHERE_CACHE_FIELD_NUMBER; + hash = (53 * hash) + getAnywhereCache().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.CreateAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreateAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.CreateAnywhereCacheRequest) + com.google.storage.control.v2.CreateAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateAnywhereCacheRequest.class, + com.google.storage.control.v2.CreateAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.CreateAnywhereCacheRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAnywhereCacheFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + anywhereCache_ = null; + if (anywhereCacheBuilder_ != null) { + anywhereCacheBuilder_.dispose(); + anywhereCacheBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.CreateAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheRequest build() { + com.google.storage.control.v2.CreateAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.CreateAnywhereCacheRequest result = + new com.google.storage.control.v2.CreateAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.CreateAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.anywhereCache_ = + anywhereCacheBuilder_ == null ? anywhereCache_ : anywhereCacheBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.CreateAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.CreateAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.CreateAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.CreateAnywhereCacheRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasAnywhereCache()) { + mergeAnywhereCache(other.getAnywhereCache()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage( + internalGetAnywhereCacheFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 26 + case 34: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.control.v2.AnywhereCache anywhereCache_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + anywhereCacheBuilder_; + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + public boolean hasAnywhereCache() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + public com.google.storage.control.v2.AnywhereCache getAnywhereCache() { + if (anywhereCacheBuilder_ == null) { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } else { + return anywhereCacheBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAnywhereCache(com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCacheBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + anywhereCache_ = value; + } else { + anywhereCacheBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAnywhereCache( + com.google.storage.control.v2.AnywhereCache.Builder builderForValue) { + if (anywhereCacheBuilder_ == null) { + anywhereCache_ = builderForValue.build(); + } else { + anywhereCacheBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeAnywhereCache(com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCacheBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && anywhereCache_ != null + && anywhereCache_ != com.google.storage.control.v2.AnywhereCache.getDefaultInstance()) { + getAnywhereCacheBuilder().mergeFrom(value); + } else { + anywhereCache_ = value; + } + } else { + anywhereCacheBuilder_.mergeFrom(value); + } + if (anywhereCache_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearAnywhereCache() { + bitField0_ = (bitField0_ & ~0x00000002); + anywhereCache_ = null; + if (anywhereCacheBuilder_ != null) { + anywhereCacheBuilder_.dispose(); + anywhereCacheBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.AnywhereCache.Builder getAnywhereCacheBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetAnywhereCacheFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder() { + if (anywhereCacheBuilder_ != null) { + return anywhereCacheBuilder_.getMessageOrBuilder(); + } else { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + } + + /** + * + * + *
+     * Required. Properties of the Anywhere Cache instance being created.
+     * The parent bucket name is specified in the `parent` field. Server uses the
+     * default value of `ttl` or `admission_policy` if not specified in
+     * request.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + internalGetAnywhereCacheFieldBuilder() { + if (anywhereCacheBuilder_ == null) { + anywhereCacheBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder>( + getAnywhereCache(), getParentForChildren(), isClean()); + anywhereCache_ = null; + } + return anywhereCacheBuilder_; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.CreateAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.CreateAnywhereCacheRequest) + private static final com.google.storage.control.v2.CreateAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.CreateAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.CreateAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..bcf501a7f029 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface CreateAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.CreateAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + boolean hasAnywhereCache(); + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + com.google.storage.control.v2.AnywhereCache getAnywhereCache(); + + /** + * + * + *
+   * Required. Properties of the Anywhere Cache instance being created.
+   * The parent bucket name is specified in the `parent` field. Server uses the
+   * default value of `ttl` or `admission_policy` if not specified in
+   * request.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequest.java new file mode 100644 index 000000000000..d2cfc8527762 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequest.java @@ -0,0 +1,1482 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for CreateFolder. This operation is only applicable to a
+ * hierarchical namespace enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateFolderRequest} + */ +@com.google.protobuf.Generated +public final class CreateFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.CreateFolderRequest) + CreateFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateFolderRequest"); + } + + // Use CreateFolderRequest.newBuilder() to construct. + private CreateFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateFolderRequest() { + parent_ = ""; + folderId_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateFolderRequest.class, + com.google.storage.control.v2.CreateFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which the folder will reside. The bucket
+   * must be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which the folder will reside. The bucket
+   * must be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FOLDER_FIELD_NUMBER = 2; + private com.google.storage.control.v2.Folder folder_; + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the folder field is set. + */ + @java.lang.Override + public boolean hasFolder() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The folder. + */ + @java.lang.Override + public com.google.storage.control.v2.Folder getFolder() { + return folder_ == null ? com.google.storage.control.v2.Folder.getDefaultInstance() : folder_; + } + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.FolderOrBuilder getFolderOrBuilder() { + return folder_ == null ? com.google.storage.control.v2.Folder.getDefaultInstance() : folder_; + } + + public static final int FOLDER_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object folderId_ = ""; + + /** + * + * + *
+   * Required. The full name of a folder, including all its parent folders.
+   * Folders use single '/' characters as a delimiter.
+   * The folder_id must end with a slash.
+   * For example, the folder_id of "books/biographies/" would create a new
+   * "biographies/" folder under the "books/" folder.
+   * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The folderId. + */ + @java.lang.Override + public java.lang.String getFolderId() { + java.lang.Object ref = folderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + folderId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The full name of a folder, including all its parent folders.
+   * Folders use single '/' characters as a delimiter.
+   * The folder_id must end with a slash.
+   * For example, the folder_id of "books/biographies/" would create a new
+   * "biographies/" folder under the "books/" folder.
+   * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for folderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFolderIdBytes() { + java.lang.Object ref = folderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + folderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int RECURSIVE_FIELD_NUMBER = 4; + private boolean recursive_ = false; + + /** + * + * + *
+   * Optional. If true, parent folder doesn't have to be present and all missing
+   * ancestor folders will be created atomically.
+   * 
+ * + * bool recursive = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The recursive. + */ + @java.lang.Override + public boolean getRecursive() { + return recursive_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getFolder()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(folderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, folderId_); + } + if (recursive_ != false) { + output.writeBool(4, recursive_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getFolder()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(folderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, folderId_); + } + if (recursive_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, recursive_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.CreateFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.CreateFolderRequest other = + (com.google.storage.control.v2.CreateFolderRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasFolder() != other.hasFolder()) return false; + if (hasFolder()) { + if (!getFolder().equals(other.getFolder())) return false; + } + if (!getFolderId().equals(other.getFolderId())) return false; + if (getRecursive() != other.getRecursive()) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasFolder()) { + hash = (37 * hash) + FOLDER_FIELD_NUMBER; + hash = (53 * hash) + getFolder().hashCode(); + } + hash = (37 * hash) + FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getFolderId().hashCode(); + hash = (37 * hash) + RECURSIVE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRecursive()); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.CreateFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreateFolder. This operation is only applicable to a
+   * hierarchical namespace enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.CreateFolderRequest) + com.google.storage.control.v2.CreateFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateFolderRequest.class, + com.google.storage.control.v2.CreateFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.CreateFolderRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetFolderFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + folder_ = null; + if (folderBuilder_ != null) { + folderBuilder_.dispose(); + folderBuilder_ = null; + } + folderId_ = ""; + recursive_ = false; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.CreateFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.CreateFolderRequest build() { + com.google.storage.control.v2.CreateFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateFolderRequest buildPartial() { + com.google.storage.control.v2.CreateFolderRequest result = + new com.google.storage.control.v2.CreateFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.CreateFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.folder_ = folderBuilder_ == null ? folder_ : folderBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.folderId_ = folderId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.recursive_ = recursive_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.CreateFolderRequest) { + return mergeFrom((com.google.storage.control.v2.CreateFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.CreateFolderRequest other) { + if (other == com.google.storage.control.v2.CreateFolderRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasFolder()) { + mergeFolder(other.getFolder()); + } + if (!other.getFolderId().isEmpty()) { + folderId_ = other.folderId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getRecursive() != false) { + setRecursive(other.getRecursive()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetFolderFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + folderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + recursive_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which the folder will reside. The bucket
+     * must be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the folder will reside. The bucket
+     * must be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the folder will reside. The bucket
+     * must be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the folder will reside. The bucket
+     * must be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the folder will reside. The bucket
+     * must be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.control.v2.Folder folder_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder> + folderBuilder_; + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the folder field is set. + */ + public boolean hasFolder() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The folder. + */ + public com.google.storage.control.v2.Folder getFolder() { + if (folderBuilder_ == null) { + return folder_ == null + ? com.google.storage.control.v2.Folder.getDefaultInstance() + : folder_; + } else { + return folderBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFolder(com.google.storage.control.v2.Folder value) { + if (folderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + folder_ = value; + } else { + folderBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFolder(com.google.storage.control.v2.Folder.Builder builderForValue) { + if (folderBuilder_ == null) { + folder_ = builderForValue.build(); + } else { + folderBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeFolder(com.google.storage.control.v2.Folder value) { + if (folderBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && folder_ != null + && folder_ != com.google.storage.control.v2.Folder.getDefaultInstance()) { + getFolderBuilder().mergeFrom(value); + } else { + folder_ = value; + } + } else { + folderBuilder_.mergeFrom(value); + } + if (folder_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearFolder() { + bitField0_ = (bitField0_ & ~0x00000002); + folder_ = null; + if (folderBuilder_ != null) { + folderBuilder_.dispose(); + folderBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.Folder.Builder getFolderBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetFolderFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.FolderOrBuilder getFolderOrBuilder() { + if (folderBuilder_ != null) { + return folderBuilder_.getMessageOrBuilder(); + } else { + return folder_ == null + ? com.google.storage.control.v2.Folder.getDefaultInstance() + : folder_; + } + } + + /** + * + * + *
+     * Required. Properties of the new folder being created.
+     * The bucket and name of the folder are specified in the parent and folder_id
+     * fields, respectively. Populating those fields in `folder` will result in an
+     * error.
+     * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder> + internalGetFolderFieldBuilder() { + if (folderBuilder_ == null) { + folderBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder>( + getFolder(), getParentForChildren(), isClean()); + folder_ = null; + } + return folderBuilder_; + } + + private java.lang.Object folderId_ = ""; + + /** + * + * + *
+     * Required. The full name of a folder, including all its parent folders.
+     * Folders use single '/' characters as a delimiter.
+     * The folder_id must end with a slash.
+     * For example, the folder_id of "books/biographies/" would create a new
+     * "biographies/" folder under the "books/" folder.
+     * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The folderId. + */ + public java.lang.String getFolderId() { + java.lang.Object ref = folderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + folderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The full name of a folder, including all its parent folders.
+     * Folders use single '/' characters as a delimiter.
+     * The folder_id must end with a slash.
+     * For example, the folder_id of "books/biographies/" would create a new
+     * "biographies/" folder under the "books/" folder.
+     * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for folderId. + */ + public com.google.protobuf.ByteString getFolderIdBytes() { + java.lang.Object ref = folderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + folderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The full name of a folder, including all its parent folders.
+     * Folders use single '/' characters as a delimiter.
+     * The folder_id must end with a slash.
+     * For example, the folder_id of "books/biographies/" would create a new
+     * "biographies/" folder under the "books/" folder.
+     * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The folderId to set. + * @return This builder for chaining. + */ + public Builder setFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + folderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The full name of a folder, including all its parent folders.
+     * Folders use single '/' characters as a delimiter.
+     * The folder_id must end with a slash.
+     * For example, the folder_id of "books/biographies/" would create a new
+     * "biographies/" folder under the "books/" folder.
+     * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearFolderId() { + folderId_ = getDefaultInstance().getFolderId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The full name of a folder, including all its parent folders.
+     * Folders use single '/' characters as a delimiter.
+     * The folder_id must end with a slash.
+     * For example, the folder_id of "books/biographies/" would create a new
+     * "biographies/" folder under the "books/" folder.
+     * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for folderId to set. + * @return This builder for chaining. + */ + public Builder setFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + folderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private boolean recursive_; + + /** + * + * + *
+     * Optional. If true, parent folder doesn't have to be present and all missing
+     * ancestor folders will be created atomically.
+     * 
+ * + * bool recursive = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The recursive. + */ + @java.lang.Override + public boolean getRecursive() { + return recursive_; + } + + /** + * + * + *
+     * Optional. If true, parent folder doesn't have to be present and all missing
+     * ancestor folders will be created atomically.
+     * 
+ * + * bool recursive = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The recursive to set. + * @return This builder for chaining. + */ + public Builder setRecursive(boolean value) { + + recursive_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If true, parent folder doesn't have to be present and all missing
+     * ancestor folders will be created atomically.
+     * 
+ * + * bool recursive = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRecursive() { + bitField0_ = (bitField0_ & ~0x00000008); + recursive_ = false; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.CreateFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.CreateFolderRequest) + private static final com.google.storage.control.v2.CreateFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.CreateFolderRequest(); + } + + public static com.google.storage.control.v2.CreateFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequestOrBuilder.java new file mode 100644 index 000000000000..1d671db4cd22 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateFolderRequestOrBuilder.java @@ -0,0 +1,189 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface CreateFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.CreateFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which the folder will reside. The bucket
+   * must be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Name of the bucket in which the folder will reside. The bucket
+   * must be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the folder field is set. + */ + boolean hasFolder(); + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The folder. + */ + com.google.storage.control.v2.Folder getFolder(); + + /** + * + * + *
+   * Required. Properties of the new folder being created.
+   * The bucket and name of the folder are specified in the parent and folder_id
+   * fields, respectively. Populating those fields in `folder` will result in an
+   * error.
+   * 
+ * + * .google.storage.control.v2.Folder folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.FolderOrBuilder getFolderOrBuilder(); + + /** + * + * + *
+   * Required. The full name of a folder, including all its parent folders.
+   * Folders use single '/' characters as a delimiter.
+   * The folder_id must end with a slash.
+   * For example, the folder_id of "books/biographies/" would create a new
+   * "biographies/" folder under the "books/" folder.
+   * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The folderId. + */ + java.lang.String getFolderId(); + + /** + * + * + *
+   * Required. The full name of a folder, including all its parent folders.
+   * Folders use single '/' characters as a delimiter.
+   * The folder_id must end with a slash.
+   * For example, the folder_id of "books/biographies/" would create a new
+   * "biographies/" folder under the "books/" folder.
+   * 
+ * + * string folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for folderId. + */ + com.google.protobuf.ByteString getFolderIdBytes(); + + /** + * + * + *
+   * Optional. If true, parent folder doesn't have to be present and all missing
+   * ancestor folders will be created atomically.
+   * 
+ * + * bool recursive = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The recursive. + */ + boolean getRecursive(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequest.java new file mode 100644 index 000000000000..f14b50e05eab --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequest.java @@ -0,0 +1,1371 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for CreateManagedFolder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateManagedFolderRequest} + */ +@com.google.protobuf.Generated +public final class CreateManagedFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.CreateManagedFolderRequest) + CreateManagedFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateManagedFolderRequest"); + } + + // Use CreateManagedFolderRequest.newBuilder() to construct. + private CreateManagedFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateManagedFolderRequest() { + parent_ = ""; + managedFolderId_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateManagedFolderRequest.class, + com.google.storage.control.v2.CreateManagedFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MANAGED_FOLDER_FIELD_NUMBER = 2; + private com.google.storage.control.v2.ManagedFolder managedFolder_; + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the managedFolder field is set. + */ + @java.lang.Override + public boolean hasManagedFolder() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The managedFolder. + */ + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder getManagedFolder() { + return managedFolder_ == null + ? com.google.storage.control.v2.ManagedFolder.getDefaultInstance() + : managedFolder_; + } + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFolderOrBuilder() { + return managedFolder_ == null + ? com.google.storage.control.v2.ManagedFolder.getDefaultInstance() + : managedFolder_; + } + + public static final int MANAGED_FOLDER_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object managedFolderId_ = ""; + + /** + * + * + *
+   * Required. The name of the managed folder. It uses a single `/` as delimiter
+   * and leading and trailing `/` are allowed.
+   * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The managedFolderId. + */ + @java.lang.Override + public java.lang.String getManagedFolderId() { + java.lang.Object ref = managedFolderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + managedFolderId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the managed folder. It uses a single `/` as delimiter
+   * and leading and trailing `/` are allowed.
+   * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for managedFolderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getManagedFolderIdBytes() { + java.lang.Object ref = managedFolderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + managedFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getManagedFolder()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(managedFolderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, managedFolderId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getManagedFolder()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(managedFolderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, managedFolderId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.CreateManagedFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.CreateManagedFolderRequest other = + (com.google.storage.control.v2.CreateManagedFolderRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasManagedFolder() != other.hasManagedFolder()) return false; + if (hasManagedFolder()) { + if (!getManagedFolder().equals(other.getManagedFolder())) return false; + } + if (!getManagedFolderId().equals(other.getManagedFolderId())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasManagedFolder()) { + hash = (37 * hash) + MANAGED_FOLDER_FIELD_NUMBER; + hash = (53 * hash) + getManagedFolder().hashCode(); + } + hash = (37 * hash) + MANAGED_FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getManagedFolderId().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.CreateManagedFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreateManagedFolder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.CreateManagedFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.CreateManagedFolderRequest) + com.google.storage.control.v2.CreateManagedFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.CreateManagedFolderRequest.class, + com.google.storage.control.v2.CreateManagedFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.CreateManagedFolderRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetManagedFolderFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + managedFolder_ = null; + if (managedFolderBuilder_ != null) { + managedFolderBuilder_.dispose(); + managedFolderBuilder_ = null; + } + managedFolderId_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateManagedFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.CreateManagedFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.CreateManagedFolderRequest build() { + com.google.storage.control.v2.CreateManagedFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateManagedFolderRequest buildPartial() { + com.google.storage.control.v2.CreateManagedFolderRequest result = + new com.google.storage.control.v2.CreateManagedFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.CreateManagedFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.managedFolder_ = + managedFolderBuilder_ == null ? managedFolder_ : managedFolderBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.managedFolderId_ = managedFolderId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.CreateManagedFolderRequest) { + return mergeFrom((com.google.storage.control.v2.CreateManagedFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.CreateManagedFolderRequest other) { + if (other == com.google.storage.control.v2.CreateManagedFolderRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasManagedFolder()) { + mergeManagedFolder(other.getManagedFolder()); + } + if (!other.getManagedFolderId().isEmpty()) { + managedFolderId_ = other.managedFolderId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetManagedFolderFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + managedFolderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.control.v2.ManagedFolder managedFolder_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder> + managedFolderBuilder_; + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the managedFolder field is set. + */ + public boolean hasManagedFolder() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The managedFolder. + */ + public com.google.storage.control.v2.ManagedFolder getManagedFolder() { + if (managedFolderBuilder_ == null) { + return managedFolder_ == null + ? com.google.storage.control.v2.ManagedFolder.getDefaultInstance() + : managedFolder_; + } else { + return managedFolderBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setManagedFolder(com.google.storage.control.v2.ManagedFolder value) { + if (managedFolderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + managedFolder_ = value; + } else { + managedFolderBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setManagedFolder( + com.google.storage.control.v2.ManagedFolder.Builder builderForValue) { + if (managedFolderBuilder_ == null) { + managedFolder_ = builderForValue.build(); + } else { + managedFolderBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeManagedFolder(com.google.storage.control.v2.ManagedFolder value) { + if (managedFolderBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && managedFolder_ != null + && managedFolder_ != com.google.storage.control.v2.ManagedFolder.getDefaultInstance()) { + getManagedFolderBuilder().mergeFrom(value); + } else { + managedFolder_ = value; + } + } else { + managedFolderBuilder_.mergeFrom(value); + } + if (managedFolder_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearManagedFolder() { + bitField0_ = (bitField0_ & ~0x00000002); + managedFolder_ = null; + if (managedFolderBuilder_ != null) { + managedFolderBuilder_.dispose(); + managedFolderBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.ManagedFolder.Builder getManagedFolderBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetManagedFolderFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFolderOrBuilder() { + if (managedFolderBuilder_ != null) { + return managedFolderBuilder_.getMessageOrBuilder(); + } else { + return managedFolder_ == null + ? com.google.storage.control.v2.ManagedFolder.getDefaultInstance() + : managedFolder_; + } + } + + /** + * + * + *
+     * Required. Properties of the managed folder being created.
+     * The bucket and managed folder names are specified in the `parent` and
+     * `managed_folder_id` fields. Populating these fields in `managed_folder`
+     * will result in an error.
+     * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder> + internalGetManagedFolderFieldBuilder() { + if (managedFolderBuilder_ == null) { + managedFolderBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder>( + getManagedFolder(), getParentForChildren(), isClean()); + managedFolder_ = null; + } + return managedFolderBuilder_; + } + + private java.lang.Object managedFolderId_ = ""; + + /** + * + * + *
+     * Required. The name of the managed folder. It uses a single `/` as delimiter
+     * and leading and trailing `/` are allowed.
+     * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The managedFolderId. + */ + public java.lang.String getManagedFolderId() { + java.lang.Object ref = managedFolderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + managedFolderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the managed folder. It uses a single `/` as delimiter
+     * and leading and trailing `/` are allowed.
+     * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for managedFolderId. + */ + public com.google.protobuf.ByteString getManagedFolderIdBytes() { + java.lang.Object ref = managedFolderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + managedFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the managed folder. It uses a single `/` as delimiter
+     * and leading and trailing `/` are allowed.
+     * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The managedFolderId to set. + * @return This builder for chaining. + */ + public Builder setManagedFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + managedFolderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the managed folder. It uses a single `/` as delimiter
+     * and leading and trailing `/` are allowed.
+     * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearManagedFolderId() { + managedFolderId_ = getDefaultInstance().getManagedFolderId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the managed folder. It uses a single `/` as delimiter
+     * and leading and trailing `/` are allowed.
+     * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for managedFolderId to set. + * @return This builder for chaining. + */ + public Builder setManagedFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + managedFolderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.CreateManagedFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.CreateManagedFolderRequest) + private static final com.google.storage.control.v2.CreateManagedFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.CreateManagedFolderRequest(); + } + + public static com.google.storage.control.v2.CreateManagedFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateManagedFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.CreateManagedFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequestOrBuilder.java new file mode 100644 index 000000000000..b1c4a2597bc0 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/CreateManagedFolderRequestOrBuilder.java @@ -0,0 +1,170 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface CreateManagedFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.CreateManagedFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the managedFolder field is set. + */ + boolean hasManagedFolder(); + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The managedFolder. + */ + com.google.storage.control.v2.ManagedFolder getManagedFolder(); + + /** + * + * + *
+   * Required. Properties of the managed folder being created.
+   * The bucket and managed folder names are specified in the `parent` and
+   * `managed_folder_id` fields. Populating these fields in `managed_folder`
+   * will result in an error.
+   * 
+ * + * + * .google.storage.control.v2.ManagedFolder managed_folder = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFolderOrBuilder(); + + /** + * + * + *
+   * Required. The name of the managed folder. It uses a single `/` as delimiter
+   * and leading and trailing `/` are allowed.
+   * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The managedFolderId. + */ + java.lang.String getManagedFolderId(); + + /** + * + * + *
+   * Required. The name of the managed folder. It uses a single `/` as delimiter
+   * and leading and trailing `/` are allowed.
+   * 
+ * + * string managed_folder_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for managedFolderId. + */ + com.google.protobuf.ByteString getManagedFolderIdBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadata.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadata.java new file mode 100644 index 000000000000..87699507d1a7 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadata.java @@ -0,0 +1,909 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Message returned in the metadata field of the Operation resource for
+ * DeleteFolderRecursive operations.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRecursiveMetadata} + */ +@com.google.protobuf.Generated +public final class DeleteFolderRecursiveMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.DeleteFolderRecursiveMetadata) + DeleteFolderRecursiveMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteFolderRecursiveMetadata"); + } + + // Use DeleteFolderRecursiveMetadata.newBuilder() to construct. + private DeleteFolderRecursiveMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteFolderRecursiveMetadata() { + folderId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRecursiveMetadata.class, + com.google.storage.control.v2.DeleteFolderRecursiveMetadata.Builder.class); + } + + private int bitField0_; + public static final int COMMON_METADATA_FIELD_NUMBER = 1; + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + @java.lang.Override + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + public static final int FOLDER_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object folderId_ = ""; + + /** + * + * + *
+   * The path of the folder recursively deleted.
+   * 
+ * + * string folder_id = 2; + * + * @return The folderId. + */ + @java.lang.Override + public java.lang.String getFolderId() { + java.lang.Object ref = folderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + folderId_ = s; + return s; + } + } + + /** + * + * + *
+   * The path of the folder recursively deleted.
+   * 
+ * + * string folder_id = 2; + * + * @return The bytes for folderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFolderIdBytes() { + java.lang.Object ref = folderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + folderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommonMetadata()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(folderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, folderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommonMetadata()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(folderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, folderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.DeleteFolderRecursiveMetadata)) { + return super.equals(obj); + } + com.google.storage.control.v2.DeleteFolderRecursiveMetadata other = + (com.google.storage.control.v2.DeleteFolderRecursiveMetadata) obj; + + if (hasCommonMetadata() != other.hasCommonMetadata()) return false; + if (hasCommonMetadata()) { + if (!getCommonMetadata().equals(other.getCommonMetadata())) return false; + } + if (!getFolderId().equals(other.getFolderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommonMetadata()) { + hash = (37 * hash) + COMMON_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getCommonMetadata().hashCode(); + } + hash = (37 * hash) + FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getFolderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.DeleteFolderRecursiveMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message returned in the metadata field of the Operation resource for
+   * DeleteFolderRecursive operations.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRecursiveMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.DeleteFolderRecursiveMetadata) + com.google.storage.control.v2.DeleteFolderRecursiveMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRecursiveMetadata.class, + com.google.storage.control.v2.DeleteFolderRecursiveMetadata.Builder.class); + } + + // Construct using com.google.storage.control.v2.DeleteFolderRecursiveMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonMetadataFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + folderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveMetadata getDefaultInstanceForType() { + return com.google.storage.control.v2.DeleteFolderRecursiveMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveMetadata build() { + com.google.storage.control.v2.DeleteFolderRecursiveMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveMetadata buildPartial() { + com.google.storage.control.v2.DeleteFolderRecursiveMetadata result = + new com.google.storage.control.v2.DeleteFolderRecursiveMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.DeleteFolderRecursiveMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commonMetadata_ = + commonMetadataBuilder_ == null ? commonMetadata_ : commonMetadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.folderId_ = folderId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.DeleteFolderRecursiveMetadata) { + return mergeFrom((com.google.storage.control.v2.DeleteFolderRecursiveMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.DeleteFolderRecursiveMetadata other) { + if (other == com.google.storage.control.v2.DeleteFolderRecursiveMetadata.getDefaultInstance()) + return this; + if (other.hasCommonMetadata()) { + mergeCommonMetadata(other.getCommonMetadata()); + } + if (!other.getFolderId().isEmpty()) { + folderId_ = other.folderId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommonMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + folderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + commonMetadataBuilder_; + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return Whether the commonMetadata field is set. + */ + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return The commonMetadata. + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + if (commonMetadataBuilder_ == null) { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } else { + return commonMetadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonMetadata_ = value; + } else { + commonMetadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder builderForValue) { + if (commonMetadataBuilder_ == null) { + commonMetadata_ = builderForValue.build(); + } else { + commonMetadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder mergeCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commonMetadata_ != null + && commonMetadata_ + != com.google.storage.control.v2.CommonLongRunningOperationMetadata + .getDefaultInstance()) { + getCommonMetadataBuilder().mergeFrom(value); + } else { + commonMetadata_ = value; + } + } else { + commonMetadataBuilder_.mergeFrom(value); + } + if (commonMetadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder clearCommonMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder + getCommonMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommonMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + if (commonMetadataBuilder_ != null) { + return commonMetadataBuilder_.getMessageOrBuilder(); + } else { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + internalGetCommonMetadataFieldBuilder() { + if (commonMetadataBuilder_ == null) { + commonMetadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder>( + getCommonMetadata(), getParentForChildren(), isClean()); + commonMetadata_ = null; + } + return commonMetadataBuilder_; + } + + private java.lang.Object folderId_ = ""; + + /** + * + * + *
+     * The path of the folder recursively deleted.
+     * 
+ * + * string folder_id = 2; + * + * @return The folderId. + */ + public java.lang.String getFolderId() { + java.lang.Object ref = folderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + folderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The path of the folder recursively deleted.
+     * 
+ * + * string folder_id = 2; + * + * @return The bytes for folderId. + */ + public com.google.protobuf.ByteString getFolderIdBytes() { + java.lang.Object ref = folderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + folderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The path of the folder recursively deleted.
+     * 
+ * + * string folder_id = 2; + * + * @param value The folderId to set. + * @return This builder for chaining. + */ + public Builder setFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + folderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the folder recursively deleted.
+     * 
+ * + * string folder_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearFolderId() { + folderId_ = getDefaultInstance().getFolderId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the folder recursively deleted.
+     * 
+ * + * string folder_id = 2; + * + * @param value The bytes for folderId to set. + * @return This builder for chaining. + */ + public Builder setFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + folderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.DeleteFolderRecursiveMetadata) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.DeleteFolderRecursiveMetadata) + private static final com.google.storage.control.v2.DeleteFolderRecursiveMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.DeleteFolderRecursiveMetadata(); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteFolderRecursiveMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadataOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadataOrBuilder.java new file mode 100644 index 000000000000..117d2df0e3a3 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveMetadataOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface DeleteFolderRecursiveMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.DeleteFolderRecursiveMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + boolean hasCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder(); + + /** + * + * + *
+   * The path of the folder recursively deleted.
+   * 
+ * + * string folder_id = 2; + * + * @return The folderId. + */ + java.lang.String getFolderId(); + + /** + * + * + *
+   * The path of the folder recursively deleted.
+   * 
+ * + * string folder_id = 2; + * + * @return The bytes for folderId. + */ + com.google.protobuf.ByteString getFolderIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequest.java new file mode 100644 index 000000000000..ac2a18d5648a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequest.java @@ -0,0 +1,1136 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for DeleteFolderRecursive.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRecursiveRequest} + */ +@com.google.protobuf.Generated +public final class DeleteFolderRecursiveRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.DeleteFolderRecursiveRequest) + DeleteFolderRecursiveRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteFolderRecursiveRequest"); + } + + // Use DeleteFolderRecursiveRequest.newBuilder() to construct. + private DeleteFolderRecursiveRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteFolderRecursiveRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRecursiveRequest.class, + com.google.storage.control.v2.DeleteFolderRecursiveRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the folder being deleted, however all of its contents
+   * will be deleted too. Format:
+   * `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the folder being deleted, however all of its contents
+   * will be deleted too. Format:
+   * `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 2; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(3, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.DeleteFolderRecursiveRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.DeleteFolderRecursiveRequest other = + (com.google.storage.control.v2.DeleteFolderRecursiveRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.DeleteFolderRecursiveRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for DeleteFolderRecursive.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRecursiveRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.DeleteFolderRecursiveRequest) + com.google.storage.control.v2.DeleteFolderRecursiveRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRecursiveRequest.class, + com.google.storage.control.v2.DeleteFolderRecursiveRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.DeleteFolderRecursiveRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.DeleteFolderRecursiveRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveRequest build() { + com.google.storage.control.v2.DeleteFolderRecursiveRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveRequest buildPartial() { + com.google.storage.control.v2.DeleteFolderRecursiveRequest result = + new com.google.storage.control.v2.DeleteFolderRecursiveRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.DeleteFolderRecursiveRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.DeleteFolderRecursiveRequest) { + return mergeFrom((com.google.storage.control.v2.DeleteFolderRecursiveRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.DeleteFolderRecursiveRequest other) { + if (other == com.google.storage.control.v2.DeleteFolderRecursiveRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the folder being deleted, however all of its contents
+     * will be deleted too. Format:
+     * `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder being deleted, however all of its contents
+     * will be deleted too. Format:
+     * `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder being deleted, however all of its contents
+     * will be deleted too. Format:
+     * `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder being deleted, however all of its contents
+     * will be deleted too. Format:
+     * `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder being deleted, however all of its contents
+     * will be deleted too. Format:
+     * `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation only succeed conditional on whether the root
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.DeleteFolderRecursiveRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.DeleteFolderRecursiveRequest) + private static final com.google.storage.control.v2.DeleteFolderRecursiveRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.DeleteFolderRecursiveRequest(); + } + + public static com.google.storage.control.v2.DeleteFolderRecursiveRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteFolderRecursiveRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRecursiveRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequestOrBuilder.java new file mode 100644 index 000000000000..4c06a0c04aa0 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRecursiveRequestOrBuilder.java @@ -0,0 +1,154 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface DeleteFolderRecursiveRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.DeleteFolderRecursiveRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the folder being deleted, however all of its contents
+   * will be deleted too. Format:
+   * `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the folder being deleted, however all of its contents
+   * will be deleted too. Format:
+   * `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation only succeed conditional on whether the root
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequest.java new file mode 100644 index 000000000000..6d3f5dc67f7b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequest.java @@ -0,0 +1,1114 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for DeleteFolder. This operation is only applicable to a
+ * hierarchical namespace enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRequest} + */ +@com.google.protobuf.Generated +public final class DeleteFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.DeleteFolderRequest) + DeleteFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteFolderRequest"); + } + + // Use DeleteFolderRequest.newBuilder() to construct. + private DeleteFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteFolderRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRequest.class, + com.google.storage.control.v2.DeleteFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.DeleteFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.DeleteFolderRequest other = + (com.google.storage.control.v2.DeleteFolderRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.DeleteFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for DeleteFolder. This operation is only applicable to a
+   * hierarchical namespace enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.DeleteFolderRequest) + com.google.storage.control.v2.DeleteFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteFolderRequest.class, + com.google.storage.control.v2.DeleteFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.DeleteFolderRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.DeleteFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRequest build() { + com.google.storage.control.v2.DeleteFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRequest buildPartial() { + com.google.storage.control.v2.DeleteFolderRequest result = + new com.google.storage.control.v2.DeleteFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.DeleteFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.DeleteFolderRequest) { + return mergeFrom((com.google.storage.control.v2.DeleteFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.DeleteFolderRequest other) { + if (other == com.google.storage.control.v2.DeleteFolderRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 24: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 32: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 42: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 50: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.DeleteFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.DeleteFolderRequest) + private static final com.google.storage.control.v2.DeleteFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.DeleteFolderRequest(); + } + + public static com.google.storage.control.v2.DeleteFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequestOrBuilder.java new file mode 100644 index 000000000000..79378018d996 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteFolderRequestOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface DeleteFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.DeleteFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequest.java new file mode 100644 index 000000000000..8f9dbb82842f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequest.java @@ -0,0 +1,1229 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * DeleteManagedFolder RPC request message.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteManagedFolderRequest} + */ +@com.google.protobuf.Generated +public final class DeleteManagedFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.DeleteManagedFolderRequest) + DeleteManagedFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteManagedFolderRequest"); + } + + // Use DeleteManagedFolderRequest.newBuilder() to construct. + private DeleteManagedFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteManagedFolderRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteManagedFolderRequest.class, + com.google.storage.control.v2.DeleteManagedFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int ALLOW_NON_EMPTY_FIELD_NUMBER = 5; + private boolean allowNonEmpty_ = false; + + /** + * + * + *
+   * Allows deletion of a managed folder even if it is not empty.
+   * A managed folder is empty if it manages no child managed folders or
+   * objects. Caller must have permission for
+   * storage.managedFolders.setIamPolicy.
+   * 
+ * + * bool allow_non_empty = 5; + * + * @return The allowNonEmpty. + */ + @java.lang.Override + public boolean getAllowNonEmpty() { + return allowNonEmpty_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifMetagenerationNotMatch_); + } + if (allowNonEmpty_ != false) { + output.writeBool(5, allowNonEmpty_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationNotMatch_); + } + if (allowNonEmpty_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, allowNonEmpty_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.DeleteManagedFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.DeleteManagedFolderRequest other = + (com.google.storage.control.v2.DeleteManagedFolderRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (getAllowNonEmpty() != other.getAllowNonEmpty()) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + ALLOW_NON_EMPTY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAllowNonEmpty()); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.DeleteManagedFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * DeleteManagedFolder RPC request message.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.DeleteManagedFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.DeleteManagedFolderRequest) + com.google.storage.control.v2.DeleteManagedFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DeleteManagedFolderRequest.class, + com.google.storage.control.v2.DeleteManagedFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.DeleteManagedFolderRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + allowNonEmpty_ = false; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteManagedFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.DeleteManagedFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteManagedFolderRequest build() { + com.google.storage.control.v2.DeleteManagedFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteManagedFolderRequest buildPartial() { + com.google.storage.control.v2.DeleteManagedFolderRequest result = + new com.google.storage.control.v2.DeleteManagedFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.DeleteManagedFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.allowNonEmpty_ = allowNonEmpty_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.DeleteManagedFolderRequest) { + return mergeFrom((com.google.storage.control.v2.DeleteManagedFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.DeleteManagedFolderRequest other) { + if (other == com.google.storage.control.v2.DeleteManagedFolderRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.getAllowNonEmpty() != false) { + setAllowNonEmpty(other.getAllowNonEmpty()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 24: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 32: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 40: + { + allowNonEmpty_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 50: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 50 + case 58: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private boolean allowNonEmpty_; + + /** + * + * + *
+     * Allows deletion of a managed folder even if it is not empty.
+     * A managed folder is empty if it manages no child managed folders or
+     * objects. Caller must have permission for
+     * storage.managedFolders.setIamPolicy.
+     * 
+ * + * bool allow_non_empty = 5; + * + * @return The allowNonEmpty. + */ + @java.lang.Override + public boolean getAllowNonEmpty() { + return allowNonEmpty_; + } + + /** + * + * + *
+     * Allows deletion of a managed folder even if it is not empty.
+     * A managed folder is empty if it manages no child managed folders or
+     * objects. Caller must have permission for
+     * storage.managedFolders.setIamPolicy.
+     * 
+ * + * bool allow_non_empty = 5; + * + * @param value The allowNonEmpty to set. + * @return This builder for chaining. + */ + public Builder setAllowNonEmpty(boolean value) { + + allowNonEmpty_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Allows deletion of a managed folder even if it is not empty.
+     * A managed folder is empty if it manages no child managed folders or
+     * objects. Caller must have permission for
+     * storage.managedFolders.setIamPolicy.
+     * 
+ * + * bool allow_non_empty = 5; + * + * @return This builder for chaining. + */ + public Builder clearAllowNonEmpty() { + bitField0_ = (bitField0_ & ~0x00000008); + allowNonEmpty_ = false; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.DeleteManagedFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.DeleteManagedFolderRequest) + private static final com.google.storage.control.v2.DeleteManagedFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.DeleteManagedFolderRequest(); + } + + public static com.google.storage.control.v2.DeleteManagedFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteManagedFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.DeleteManagedFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequestOrBuilder.java new file mode 100644 index 000000000000..b8d2ac7c60ad --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DeleteManagedFolderRequestOrBuilder.java @@ -0,0 +1,166 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface DeleteManagedFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.DeleteManagedFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Allows deletion of a managed folder even if it is not empty.
+   * A managed folder is empty if it manages no child managed folders or
+   * objects. Caller must have permission for
+   * storage.managedFolders.setIamPolicy.
+   * 
+ * + * bool allow_non_empty = 5; + * + * @return The allowNonEmpty. + */ + boolean getAllowNonEmpty(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequest.java new file mode 100644 index 000000000000..e6bdbe290db4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequest.java @@ -0,0 +1,835 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for DisableAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.DisableAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class DisableAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.DisableAnywhereCacheRequest) + DisableAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DisableAnywhereCacheRequest"); + } + + // Use DisableAnywhereCacheRequest.newBuilder() to construct. + private DisableAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DisableAnywhereCacheRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DisableAnywhereCacheRequest.class, + com.google.storage.control.v2.DisableAnywhereCacheRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.DisableAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.DisableAnywhereCacheRequest other = + (com.google.storage.control.v2.DisableAnywhereCacheRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.DisableAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for DisableAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.DisableAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.DisableAnywhereCacheRequest) + com.google.storage.control.v2.DisableAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.DisableAnywhereCacheRequest.class, + com.google.storage.control.v2.DisableAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.DisableAnywhereCacheRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.DisableAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.DisableAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.DisableAnywhereCacheRequest build() { + com.google.storage.control.v2.DisableAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.DisableAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.DisableAnywhereCacheRequest result = + new com.google.storage.control.v2.DisableAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.DisableAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.DisableAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.DisableAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.DisableAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.DisableAnywhereCacheRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.DisableAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.DisableAnywhereCacheRequest) + private static final com.google.storage.control.v2.DisableAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.DisableAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.DisableAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DisableAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.DisableAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..5f1b15bec581 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/DisableAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,94 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface DisableAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.DisableAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/Folder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/Folder.java new file mode 100644 index 000000000000..ece7371b142b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/Folder.java @@ -0,0 +1,1668 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * A folder resource. This resource can only exist in a hierarchical namespace
+ * enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.Folder} + */ +@com.google.protobuf.Generated +public final class Folder extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.Folder) + FolderOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Folder"); + } + + // Use Folder.newBuilder() to construct. + private Folder(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Folder() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_Folder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_Folder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.Folder.class, + com.google.storage.control.v2.Folder.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Identifier. The name of this folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Identifier. The name of this folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METAGENERATION_FIELD_NUMBER = 3; + private long metageneration_ = 0L; + + /** + * + * + *
+   * Output only. The version of the metadata for this folder. Used for
+   * preconditions and for detecting changes in metadata.
+   * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int PENDING_RENAME_INFO_FIELD_NUMBER = 7; + private com.google.storage.control.v2.PendingRenameInfo pendingRenameInfo_; + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the pendingRenameInfo field is set. + */ + @java.lang.Override + public boolean hasPendingRenameInfo() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The pendingRenameInfo. + */ + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfo getPendingRenameInfo() { + return pendingRenameInfo_ == null + ? com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance() + : pendingRenameInfo_; + } + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfoOrBuilder getPendingRenameInfoOrBuilder() { + return pendingRenameInfo_ == null + ? com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance() + : pendingRenameInfo_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (metageneration_ != 0L) { + output.writeInt64(3, metageneration_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getUpdateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getPendingRenameInfo()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (metageneration_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, metageneration_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getUpdateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getPendingRenameInfo()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.Folder)) { + return super.equals(obj); + } + com.google.storage.control.v2.Folder other = (com.google.storage.control.v2.Folder) obj; + + if (!getName().equals(other.getName())) return false; + if (getMetageneration() != other.getMetageneration()) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (hasPendingRenameInfo() != other.hasPendingRenameInfo()) return false; + if (hasPendingRenameInfo()) { + if (!getPendingRenameInfo().equals(other.getPendingRenameInfo())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + METAGENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMetageneration()); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (hasPendingRenameInfo()) { + hash = (37 * hash) + PENDING_RENAME_INFO_FIELD_NUMBER; + hash = (53 * hash) + getPendingRenameInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.Folder parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.Folder parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.Folder parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.Folder parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.Folder parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.Folder parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.Folder parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.Folder parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.Folder parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.Folder parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.Folder parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.Folder parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.Folder prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * A folder resource. This resource can only exist in a hierarchical namespace
+   * enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.Folder} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.Folder) + com.google.storage.control.v2.FolderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_Folder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_Folder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.Folder.class, + com.google.storage.control.v2.Folder.Builder.class); + } + + // Construct using com.google.storage.control.v2.Folder.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + internalGetPendingRenameInfoFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + metageneration_ = 0L; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + pendingRenameInfo_ = null; + if (pendingRenameInfoBuilder_ != null) { + pendingRenameInfoBuilder_.dispose(); + pendingRenameInfoBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_Folder_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.Folder getDefaultInstanceForType() { + return com.google.storage.control.v2.Folder.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.Folder build() { + com.google.storage.control.v2.Folder result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.Folder buildPartial() { + com.google.storage.control.v2.Folder result = new com.google.storage.control.v2.Folder(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.Folder result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metageneration_ = metageneration_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.pendingRenameInfo_ = + pendingRenameInfoBuilder_ == null + ? pendingRenameInfo_ + : pendingRenameInfoBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.Folder) { + return mergeFrom((com.google.storage.control.v2.Folder) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.Folder other) { + if (other == com.google.storage.control.v2.Folder.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getMetageneration() != 0L) { + setMetageneration(other.getMetageneration()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (other.hasPendingRenameInfo()) { + mergePendingRenameInfo(other.getPendingRenameInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 24: + { + metageneration_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 34: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 58: + { + input.readMessage( + internalGetPendingRenameInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Identifier. The name of this folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of this folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of this folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of this folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of this folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long metageneration_; + + /** + * + * + *
+     * Output only. The version of the metadata for this folder. Used for
+     * preconditions and for detecting changes in metadata.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + /** + * + * + *
+     * Output only. The version of the metadata for this folder. Used for
+     * preconditions and for detecting changes in metadata.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The metageneration to set. + * @return This builder for chaining. + */ + public Builder setMetageneration(long value) { + + metageneration_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The version of the metadata for this folder. Used for
+     * preconditions and for detecting changes in metadata.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearMetageneration() { + bitField0_ = (bitField0_ & ~0x00000002); + metageneration_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The modification time of the folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.storage.control.v2.PendingRenameInfo pendingRenameInfo_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.PendingRenameInfo, + com.google.storage.control.v2.PendingRenameInfo.Builder, + com.google.storage.control.v2.PendingRenameInfoOrBuilder> + pendingRenameInfoBuilder_; + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the pendingRenameInfo field is set. + */ + public boolean hasPendingRenameInfo() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The pendingRenameInfo. + */ + public com.google.storage.control.v2.PendingRenameInfo getPendingRenameInfo() { + if (pendingRenameInfoBuilder_ == null) { + return pendingRenameInfo_ == null + ? com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance() + : pendingRenameInfo_; + } else { + return pendingRenameInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setPendingRenameInfo(com.google.storage.control.v2.PendingRenameInfo value) { + if (pendingRenameInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + pendingRenameInfo_ = value; + } else { + pendingRenameInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setPendingRenameInfo( + com.google.storage.control.v2.PendingRenameInfo.Builder builderForValue) { + if (pendingRenameInfoBuilder_ == null) { + pendingRenameInfo_ = builderForValue.build(); + } else { + pendingRenameInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergePendingRenameInfo(com.google.storage.control.v2.PendingRenameInfo value) { + if (pendingRenameInfoBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && pendingRenameInfo_ != null + && pendingRenameInfo_ + != com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance()) { + getPendingRenameInfoBuilder().mergeFrom(value); + } else { + pendingRenameInfo_ = value; + } + } else { + pendingRenameInfoBuilder_.mergeFrom(value); + } + if (pendingRenameInfo_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearPendingRenameInfo() { + bitField0_ = (bitField0_ & ~0x00000010); + pendingRenameInfo_ = null; + if (pendingRenameInfoBuilder_ != null) { + pendingRenameInfoBuilder_.dispose(); + pendingRenameInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.PendingRenameInfo.Builder getPendingRenameInfoBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetPendingRenameInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.PendingRenameInfoOrBuilder + getPendingRenameInfoOrBuilder() { + if (pendingRenameInfoBuilder_ != null) { + return pendingRenameInfoBuilder_.getMessageOrBuilder(); + } else { + return pendingRenameInfo_ == null + ? com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance() + : pendingRenameInfo_; + } + } + + /** + * + * + *
+     * Output only. Only present if the folder is part of an ongoing RenameFolder
+     * operation. Contains information which can be used to query the operation
+     * status. The presence of this field also indicates all write operations are
+     * blocked for this folder, including folder, managed folder, and object
+     * operations.
+     * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.PendingRenameInfo, + com.google.storage.control.v2.PendingRenameInfo.Builder, + com.google.storage.control.v2.PendingRenameInfoOrBuilder> + internalGetPendingRenameInfoFieldBuilder() { + if (pendingRenameInfoBuilder_ == null) { + pendingRenameInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.PendingRenameInfo, + com.google.storage.control.v2.PendingRenameInfo.Builder, + com.google.storage.control.v2.PendingRenameInfoOrBuilder>( + getPendingRenameInfo(), getParentForChildren(), isClean()); + pendingRenameInfo_ = null; + } + return pendingRenameInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.Folder) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.Folder) + private static final com.google.storage.control.v2.Folder DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.Folder(); + } + + public static com.google.storage.control.v2.Folder getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Folder parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.Folder getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderName.java new file mode 100644 index 000000000000..239b5fe061d5 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderName.java @@ -0,0 +1,218 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class FolderName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET_FOLDER = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/buckets/{bucket}/folders/{folder=**}"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + private final String folder; + + @Deprecated + protected FolderName() { + project = null; + bucket = null; + folder = null; + } + + private FolderName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + folder = Preconditions.checkNotNull(builder.getFolder()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getFolder() { + return folder; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static FolderName of(String project, String bucket, String folder) { + return newBuilder().setProject(project).setBucket(bucket).setFolder(folder).build(); + } + + public static String format(String project, String bucket, String folder) { + return newBuilder().setProject(project).setBucket(bucket).setFolder(folder).build().toString(); + } + + public static FolderName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET_FOLDER.validatedMatch( + formattedString, "FolderName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket"), matchMap.get("folder")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (FolderName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET_FOLDER.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + if (folder != null) { + fieldMapBuilder.put("folder", folder); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET_FOLDER.instantiate( + "project", project, "bucket", bucket, "folder", folder); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + FolderName that = ((FolderName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.bucket, that.bucket) + && Objects.equals(this.folder, that.folder); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + h *= 1000003; + h ^= Objects.hashCode(folder); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}/folders/{folder=**}. */ + public static class Builder { + private String project; + private String bucket; + private String folder; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getFolder() { + return folder; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + public Builder setFolder(String folder) { + this.folder = folder; + return this; + } + + private Builder(FolderName folderName) { + this.project = folderName.project; + this.bucket = folderName.bucket; + this.folder = folderName.folder; + } + + public FolderName build() { + return new FolderName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderOrBuilder.java new file mode 100644 index 000000000000..6b5ebe7943c8 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/FolderOrBuilder.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface FolderOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.Folder) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Identifier. The name of this folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Identifier. The name of this folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. The version of the metadata for this folder. Used for
+   * preconditions and for detecting changes in metadata.
+   * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + long getMetageneration(); + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the pendingRenameInfo field is set. + */ + boolean hasPendingRenameInfo(); + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The pendingRenameInfo. + */ + com.google.storage.control.v2.PendingRenameInfo getPendingRenameInfo(); + + /** + * + * + *
+   * Output only. Only present if the folder is part of an ongoing RenameFolder
+   * operation. Contains information which can be used to query the operation
+   * status. The presence of this field also indicates all write operations are
+   * blocked for this folder, including folder, managed folder, and object
+   * operations.
+   * 
+ * + * + * .google.storage.control.v2.PendingRenameInfo pending_rename_info = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.storage.control.v2.PendingRenameInfoOrBuilder getPendingRenameInfoOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequest.java new file mode 100644 index 000000000000..ada45abf7bf2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequest.java @@ -0,0 +1,828 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for GetAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class GetAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetAnywhereCacheRequest) + GetAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetAnywhereCacheRequest"); + } + + // Use GetAnywhereCacheRequest.newBuilder() to construct. + private GetAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetAnywhereCacheRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetAnywhereCacheRequest.class, + com.google.storage.control.v2.GetAnywhereCacheRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetAnywhereCacheRequest other = + (com.google.storage.control.v2.GetAnywhereCacheRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for GetAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetAnywhereCacheRequest) + com.google.storage.control.v2.GetAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetAnywhereCacheRequest.class, + com.google.storage.control.v2.GetAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.GetAnywhereCacheRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.GetAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetAnywhereCacheRequest build() { + com.google.storage.control.v2.GetAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.GetAnywhereCacheRequest result = + new com.google.storage.control.v2.GetAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.GetAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.GetAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.GetAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.GetAnywhereCacheRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetAnywhereCacheRequest) + private static final com.google.storage.control.v2.GetAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.GetAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..3bf82626be51 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequest.java new file mode 100644 index 000000000000..4a89c575b2ed --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to get the `IntelligenceConfig` resource associated with your
+ * folder.
+ *
+ * **IAM Permissions**
+ *
+ * Requires `storage.intelligenceConfigs.get`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+ * the folder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetFolderIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class GetFolderIntelligenceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetFolderIntelligenceConfigRequest) + GetFolderIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetFolderIntelligenceConfigRequest"); + } + + // Use GetFolderIntelligenceConfigRequest.newBuilder() to construct. + private GetFolderIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetFolderIntelligenceConfigRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your folder.
+   *
+   * Format: `folders/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your folder.
+   *
+   * Format: `folders/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetFolderIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest other = + (com.google.storage.control.v2.GetFolderIntelligenceConfigRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to get the `IntelligenceConfig` resource associated with your
+   * folder.
+   *
+   * **IAM Permissions**
+   *
+   * Requires `storage.intelligenceConfigs.get`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+   * the folder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetFolderIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetFolderIntelligenceConfigRequest) + com.google.storage.control.v2.GetFolderIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderIntelligenceConfigRequest build() { + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderIntelligenceConfigRequest buildPartial() { + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest result = + new com.google.storage.control.v2.GetFolderIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetFolderIntelligenceConfigRequest) { + return mergeFrom((com.google.storage.control.v2.GetFolderIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.GetFolderIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.GetFolderIntelligenceConfigRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your folder.
+     *
+     * Format: `folders/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your folder.
+     *
+     * Format: `folders/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your folder.
+     *
+     * Format: `folders/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your folder.
+     *
+     * Format: `folders/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your folder.
+     *
+     * Format: `folders/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetFolderIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetFolderIntelligenceConfigRequest) + private static final com.google.storage.control.v2.GetFolderIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetFolderIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.GetFolderIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetFolderIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..51138a3750c4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetFolderIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetFolderIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your folder.
+   *
+   * Format: `folders/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your folder.
+   *
+   * Format: `folders/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequest.java new file mode 100644 index 000000000000..e4f6111ba3d9 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequest.java @@ -0,0 +1,1113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for GetFolder. This operation is only applicable to a
+ * hierarchical namespace enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetFolderRequest} + */ +@com.google.protobuf.Generated +public final class GetFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetFolderRequest) + GetFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetFolderRequest"); + } + + // Use GetFolderRequest.newBuilder() to construct. + private GetFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetFolderRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetFolderRequest.class, + com.google.storage.control.v2.GetFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetFolderRequest other = + (com.google.storage.control.v2.GetFolderRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.GetFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for GetFolder. This operation is only applicable to a
+   * hierarchical namespace enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetFolderRequest) + com.google.storage.control.v2.GetFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetFolderRequest.class, + com.google.storage.control.v2.GetFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.GetFolderRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.GetFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderRequest build() { + com.google.storage.control.v2.GetFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderRequest buildPartial() { + com.google.storage.control.v2.GetFolderRequest result = + new com.google.storage.control.v2.GetFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.GetFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetFolderRequest) { + return mergeFrom((com.google.storage.control.v2.GetFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.GetFolderRequest other) { + if (other == com.google.storage.control.v2.GetFolderRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 24: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 32: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 42: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 50: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the folder.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the folder's
+     * current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetFolderRequest) + private static final com.google.storage.control.v2.GetFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetFolderRequest(); + } + + public static com.google.storage.control.v2.GetFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequestOrBuilder.java new file mode 100644 index 000000000000..d202c522aef2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetFolderRequestOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the folder.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the folder's
+   * current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequest.java new file mode 100644 index 000000000000..081e57e1b9e6 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequest.java @@ -0,0 +1,1120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for GetManagedFolder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetManagedFolderRequest} + */ +@com.google.protobuf.Generated +public final class GetManagedFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetManagedFolderRequest) + GetManagedFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetManagedFolderRequest"); + } + + // Use GetManagedFolderRequest.newBuilder() to construct. + private GetManagedFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetManagedFolderRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetManagedFolderRequest.class, + com.google.storage.control.v2.GetManagedFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetManagedFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetManagedFolderRequest other = + (com.google.storage.control.v2.GetManagedFolderRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetManagedFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for GetManagedFolder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetManagedFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetManagedFolderRequest) + com.google.storage.control.v2.GetManagedFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetManagedFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetManagedFolderRequest.class, + com.google.storage.control.v2.GetManagedFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.GetManagedFolderRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetManagedFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.GetManagedFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetManagedFolderRequest build() { + com.google.storage.control.v2.GetManagedFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetManagedFolderRequest buildPartial() { + com.google.storage.control.v2.GetManagedFolderRequest result = + new com.google.storage.control.v2.GetManagedFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.GetManagedFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetManagedFolderRequest) { + return mergeFrom((com.google.storage.control.v2.GetManagedFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.GetManagedFolderRequest other) { + if (other == com.google.storage.control.v2.GetManagedFolderRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 24: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 32: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 42: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 50: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The operation succeeds conditional on the managed folder's current
+     * metageneration NOT matching the value here specified.
+     * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetManagedFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetManagedFolderRequest) + private static final com.google.storage.control.v2.GetManagedFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetManagedFolderRequest(); + } + + public static com.google.storage.control.v2.GetManagedFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetManagedFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetManagedFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequestOrBuilder.java new file mode 100644 index 000000000000..b7ddcade5e3a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetManagedFolderRequestOrBuilder.java @@ -0,0 +1,150 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetManagedFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetManagedFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * + * string name = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_match = 3; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * The operation succeeds conditional on the managed folder's current
+   * metageneration NOT matching the value here specified.
+   * 
+ * + * optional int64 if_metageneration_not_match = 4; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequest.java new file mode 100644 index 000000000000..cb029883f400 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequest.java @@ -0,0 +1,660 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to get the `IntelligenceConfig` resource associated with your
+ * organization.
+ *
+ * **IAM Permissions**
+ *
+ * Requires `storage.intelligenceConfigs.get`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+ * the organization.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetOrganizationIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class GetOrganizationIntelligenceConfigRequest + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) + GetOrganizationIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetOrganizationIntelligenceConfigRequest"); + } + + // Use GetOrganizationIntelligenceConfigRequest.newBuilder() to construct. + private GetOrganizationIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetOrganizationIntelligenceConfigRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your organization.
+   *
+   * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your organization.
+   *
+   * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest other = + (com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to get the `IntelligenceConfig` resource associated with your
+   * organization.
+   *
+   * **IAM Permissions**
+   *
+   * Requires `storage.intelligenceConfigs.get`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+   * the organization.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetOrganizationIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest build() { + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest buildPartial() { + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest result = + new com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) { + return mergeFrom( + (com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your organization.
+     *
+     * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your organization.
+     *
+     * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your organization.
+     *
+     * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your organization.
+     *
+     * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your organization.
+     *
+     * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) + private static final com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrganizationIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetOrganizationIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..f34394bc4b4f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetOrganizationIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetOrganizationIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetOrganizationIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your organization.
+   *
+   * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your organization.
+   *
+   * Format: `organizations/{org_id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequest.java new file mode 100644 index 000000000000..02d09a3a7755 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequest.java @@ -0,0 +1,656 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to get the `IntelligenceConfig` resource associated with your
+ * project.
+ *
+ * **IAM Permissions**:
+ *
+ * Requires `storage.intelligenceConfigs.get`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission
+ * on the project.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetProjectIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class GetProjectIntelligenceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetProjectIntelligenceConfigRequest) + GetProjectIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetProjectIntelligenceConfigRequest"); + } + + // Use GetProjectIntelligenceConfigRequest.newBuilder() to construct. + private GetProjectIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetProjectIntelligenceConfigRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your project.
+   *
+   * Format: `projects/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your project.
+   *
+   * Format: `projects/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetProjectIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest other = + (com.google.storage.control.v2.GetProjectIntelligenceConfigRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to get the `IntelligenceConfig` resource associated with your
+   * project.
+   *
+   * **IAM Permissions**:
+   *
+   * Requires `storage.intelligenceConfigs.get`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission
+   * on the project.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetProjectIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetProjectIntelligenceConfigRequest) + com.google.storage.control.v2.GetProjectIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.class, + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetProjectIntelligenceConfigRequest build() { + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetProjectIntelligenceConfigRequest buildPartial() { + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest result = + new com.google.storage.control.v2.GetProjectIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetProjectIntelligenceConfigRequest) { + return mergeFrom((com.google.storage.control.v2.GetProjectIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.GetProjectIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.GetProjectIntelligenceConfigRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your project.
+     *
+     * Format: `projects/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your project.
+     *
+     * Format: `projects/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your project.
+     *
+     * Format: `projects/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your project.
+     *
+     * Format: `projects/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the `IntelligenceConfig` resource associated with
+     * your project.
+     *
+     * Format: `projects/{id}/locations/global/intelligenceConfig`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetProjectIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetProjectIntelligenceConfigRequest) + private static final com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetProjectIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetProjectIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetProjectIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..45bf8a2fe4c0 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetProjectIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetProjectIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetProjectIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your project.
+   *
+   * Format: `projects/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the `IntelligenceConfig` resource associated with
+   * your project.
+   *
+   * Format: `projects/{id}/locations/global/intelligenceConfig`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequest.java new file mode 100644 index 000000000000..0171cc697c89 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequest.java @@ -0,0 +1,1024 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for GetStorageLayout.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.GetStorageLayoutRequest} + */ +@com.google.protobuf.Generated +public final class GetStorageLayoutRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.GetStorageLayoutRequest) + GetStorageLayoutRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetStorageLayoutRequest"); + } + + // Use GetStorageLayoutRequest.newBuilder() to construct. + private GetStorageLayoutRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetStorageLayoutRequest() { + name_ = ""; + prefix_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetStorageLayoutRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetStorageLayoutRequest.class, + com.google.storage.control.v2.GetStorageLayoutRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREFIX_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object prefix_ = ""; + + /** + * + * + *
+   * An optional prefix used for permission check. It is useful when the caller
+   * only has limited permissions under a specific prefix.
+   * 
+ * + * string prefix = 2; + * + * @return The prefix. + */ + @java.lang.Override + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } + } + + /** + * + * + *
+   * An optional prefix used for permission check. It is useful when the caller
+   * only has limited permissions under a specific prefix.
+   * 
+ * + * string prefix = 2; + * + * @return The bytes for prefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.GetStorageLayoutRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.GetStorageLayoutRequest other = + (com.google.storage.control.v2.GetStorageLayoutRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getPrefix().equals(other.getPrefix())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getPrefix().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.GetStorageLayoutRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for GetStorageLayout.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.GetStorageLayoutRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.GetStorageLayoutRequest) + com.google.storage.control.v2.GetStorageLayoutRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetStorageLayoutRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.GetStorageLayoutRequest.class, + com.google.storage.control.v2.GetStorageLayoutRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.GetStorageLayoutRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + prefix_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.GetStorageLayoutRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.GetStorageLayoutRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.GetStorageLayoutRequest build() { + com.google.storage.control.v2.GetStorageLayoutRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.GetStorageLayoutRequest buildPartial() { + com.google.storage.control.v2.GetStorageLayoutRequest result = + new com.google.storage.control.v2.GetStorageLayoutRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.GetStorageLayoutRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.prefix_ = prefix_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.GetStorageLayoutRequest) { + return mergeFrom((com.google.storage.control.v2.GetStorageLayoutRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.GetStorageLayoutRequest other) { + if (other == com.google.storage.control.v2.GetStorageLayoutRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getPrefix().isEmpty()) { + prefix_ = other.prefix_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + prefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object prefix_ = ""; + + /** + * + * + *
+     * An optional prefix used for permission check. It is useful when the caller
+     * only has limited permissions under a specific prefix.
+     * 
+ * + * string prefix = 2; + * + * @return The prefix. + */ + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * An optional prefix used for permission check. It is useful when the caller
+     * only has limited permissions under a specific prefix.
+     * 
+ * + * string prefix = 2; + * + * @return The bytes for prefix. + */ + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * An optional prefix used for permission check. It is useful when the caller
+     * only has limited permissions under a specific prefix.
+     * 
+ * + * string prefix = 2; + * + * @param value The prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + prefix_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional prefix used for permission check. It is useful when the caller
+     * only has limited permissions under a specific prefix.
+     * 
+ * + * string prefix = 2; + * + * @return This builder for chaining. + */ + public Builder clearPrefix() { + prefix_ = getDefaultInstance().getPrefix(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional prefix used for permission check. It is useful when the caller
+     * only has limited permissions under a specific prefix.
+     * 
+ * + * string prefix = 2; + * + * @param value The bytes for prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + prefix_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.GetStorageLayoutRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.GetStorageLayoutRequest) + private static final com.google.storage.control.v2.GetStorageLayoutRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.GetStorageLayoutRequest(); + } + + public static com.google.storage.control.v2.GetStorageLayoutRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetStorageLayoutRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.GetStorageLayoutRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequestOrBuilder.java new file mode 100644 index 000000000000..b8b2b4c27bd3 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/GetStorageLayoutRequestOrBuilder.java @@ -0,0 +1,120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface GetStorageLayoutRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.GetStorageLayoutRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * An optional prefix used for permission check. It is useful when the caller
+   * only has limited permissions under a specific prefix.
+   * 
+ * + * string prefix = 2; + * + * @return The prefix. + */ + java.lang.String getPrefix(); + + /** + * + * + *
+   * An optional prefix used for permission check. It is useful when the caller
+   * only has limited permissions under a specific prefix.
+   * 
+ * + * string prefix = 2; + * + * @return The bytes for prefix. + */ + com.google.protobuf.ByteString getPrefixBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfig.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfig.java new file mode 100644 index 000000000000..3689ed5d342a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfig.java @@ -0,0 +1,8068 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * The `IntelligenceConfig` resource associated with your organization, folder,
+ * or project.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig} + */ +@com.google.protobuf.Generated +public final class IntelligenceConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig) + IntelligenceConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IntelligenceConfig"); + } + + // Use IntelligenceConfig.newBuilder() to construct. + private IntelligenceConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IntelligenceConfig() { + name_ = ""; + editionConfig_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.class, + com.google.storage.control.v2.IntelligenceConfig.Builder.class); + } + + /** + * + * + *
+   * The edition configuration of the `IntelligenceConfig` resource. This
+   * signifies the edition used for configuring the `IntelligenceConfig`
+   * resource and can only take the following values:
+   * `EDITION_CONFIG_UNSPECIFIED`, `INHERIT`, `DISABLED`, `STANDARD` and
+   * `TRIAL`.
+   * 
+ * + * Protobuf enum {@code google.storage.control.v2.IntelligenceConfig.EditionConfig} + */ + public enum EditionConfig implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * This is an unknown edition of the resource.
+     * 
+ * + * EDITION_CONFIG_UNSPECIFIED = 0; + */ + EDITION_CONFIG_UNSPECIFIED(0), + /** + * + * + *
+     * The inherited edition from the parent and filters. This is the default
+     * edition when there is no `IntelligenceConfig` setup for a GCP resource.
+     * 
+ * + * INHERIT = 1; + */ + INHERIT(1), + /** + * + * + *
+     * The edition configuration is disabled for the `IntelligenceConfig`
+     * resource and its children. Filters are not applicable.
+     * 
+ * + * DISABLED = 2; + */ + DISABLED(2), + /** + * + * + *
+     * The `IntelligenceConfig` resource is of STANDARD edition.
+     * 
+ * + * STANDARD = 3; + */ + STANDARD(3), + /** + * + * + *
+     * The `IntelligenceConfig` resource is available in `TRIAL` edition. During
+     * the trial period, Cloud Storage does not charge for Storage Intelligence
+     * usage. You can specify the buckets to include in the trial period by
+     * using filters. At the end of the trial period, the `IntelligenceConfig`
+     * resource is upgraded to `STANDARD` edition.
+     * 
+ * + * TRIAL = 5; + */ + TRIAL(5), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EditionConfig"); + } + + /** + * + * + *
+     * This is an unknown edition of the resource.
+     * 
+ * + * EDITION_CONFIG_UNSPECIFIED = 0; + */ + public static final int EDITION_CONFIG_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * The inherited edition from the parent and filters. This is the default
+     * edition when there is no `IntelligenceConfig` setup for a GCP resource.
+     * 
+ * + * INHERIT = 1; + */ + public static final int INHERIT_VALUE = 1; + + /** + * + * + *
+     * The edition configuration is disabled for the `IntelligenceConfig`
+     * resource and its children. Filters are not applicable.
+     * 
+ * + * DISABLED = 2; + */ + public static final int DISABLED_VALUE = 2; + + /** + * + * + *
+     * The `IntelligenceConfig` resource is of STANDARD edition.
+     * 
+ * + * STANDARD = 3; + */ + public static final int STANDARD_VALUE = 3; + + /** + * + * + *
+     * The `IntelligenceConfig` resource is available in `TRIAL` edition. During
+     * the trial period, Cloud Storage does not charge for Storage Intelligence
+     * usage. You can specify the buckets to include in the trial period by
+     * using filters. At the end of the trial period, the `IntelligenceConfig`
+     * resource is upgraded to `STANDARD` edition.
+     * 
+ * + * TRIAL = 5; + */ + public static final int TRIAL_VALUE = 5; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EditionConfig valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static EditionConfig forNumber(int value) { + switch (value) { + case 0: + return EDITION_CONFIG_UNSPECIFIED; + case 1: + return INHERIT; + case 2: + return DISABLED; + case 3: + return STANDARD; + case 5: + return TRIAL; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EditionConfig findValueByNumber(int number) { + return EditionConfig.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.storage.control.v2.IntelligenceConfig.getDescriptor().getEnumTypes().get(0); + } + + private static final EditionConfig[] VALUES = values(); + + public static EditionConfig valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private EditionConfig(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.storage.control.v2.IntelligenceConfig.EditionConfig) + } + + public interface FilterOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig.Filter) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return Whether the includedCloudStorageLocations field is set. + */ + boolean hasIncludedCloudStorageLocations(); + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return The includedCloudStorageLocations. + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getIncludedCloudStorageLocations(); + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getIncludedCloudStorageLocationsOrBuilder(); + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return Whether the excludedCloudStorageLocations field is set. + */ + boolean hasExcludedCloudStorageLocations(); + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return The excludedCloudStorageLocations. + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getExcludedCloudStorageLocations(); + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getExcludedCloudStorageLocationsOrBuilder(); + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return Whether the includedCloudStorageBuckets field is set. + */ + boolean hasIncludedCloudStorageBuckets(); + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return The includedCloudStorageBuckets. + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getIncludedCloudStorageBuckets(); + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getIncludedCloudStorageBucketsOrBuilder(); + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return Whether the excludedCloudStorageBuckets field is set. + */ + boolean hasExcludedCloudStorageBuckets(); + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return The excludedCloudStorageBuckets. + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getExcludedCloudStorageBuckets(); + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getExcludedCloudStorageBucketsOrBuilder(); + + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsCase + getCloudStorageLocationsCase(); + + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsCase + getCloudStorageBucketsCase(); + } + + /** + * + * + *
+   * Filter over location and bucket using include or exclude semantics.
+   * Resources that match the include or exclude filter are exclusively included
+   * or excluded from the Storage Intelligence plan.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.Filter} + */ + public static final class Filter extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig.Filter) + FilterOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Filter"); + } + + // Use Filter.newBuilder() to construct. + private Filter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Filter() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.class, + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder.class); + } + + public interface CloudStorageLocationsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the locations. + */ + java.util.List getLocationsList(); + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of locations. + */ + int getLocationsCount(); + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The locations at the given index. + */ + java.lang.String getLocations(int index); + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the locations at the given index. + */ + com.google.protobuf.ByteString getLocationsBytes(int index); + } + + /** + * + * + *
+     * Collection of bucket locations.
+     * 
+ * + * Protobuf type {@code + * google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations} + */ + public static final class CloudStorageLocations extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + CloudStorageLocationsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudStorageLocations"); + } + + // Use CloudStorageLocations.newBuilder() to construct. + private CloudStorageLocations(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudStorageLocations() { + locations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.class, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .Builder.class); + } + + public static final int LOCATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList locations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the locations. + */ + public com.google.protobuf.ProtocolStringList getLocationsList() { + return locations_; + } + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of locations. + */ + public int getLocationsCount() { + return locations_.size(); + } + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The locations at the given index. + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + + /** + * + * + *
+       * Optional. Bucket locations. Location can be any of the Cloud Storage
+       * regions specified in lower case format. For example, `us-east1`,
+       * `us-west1`.
+       * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the locations at the given index. + */ + public com.google.protobuf.ByteString getLocationsBytes(int index) { + return locations_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < locations_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, locations_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < locations_.size(); i++) { + dataSize += computeStringSizeNoTag(locations_.getRaw(i)); + } + size += dataSize; + size += 1 * getLocationsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations other = + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) obj; + + if (!getLocationsList().equals(other.getLocationsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getLocationsCount() > 0) { + hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getLocationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Collection of bucket locations.
+       * 
+ * + * Protobuf type {@code + * google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .class, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .Builder.class); + } + + // Construct using + // com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + locations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + build() { + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + buildPartial() { + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations result = + new com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations( + this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + locations_.makeImmutable(); + result.locations_ = locations_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) { + return mergeFrom( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations other) { + if (other + == com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance()) return this; + if (!other.locations_.isEmpty()) { + if (locations_.isEmpty()) { + locations_ = other.locations_; + bitField0_ |= 0x00000001; + } else { + ensureLocationsIsMutable(); + locations_.addAll(other.locations_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureLocationsIsMutable(); + locations_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList locations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureLocationsIsMutable() { + if (!locations_.isModifiable()) { + locations_ = new com.google.protobuf.LazyStringArrayList(locations_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the locations. + */ + public com.google.protobuf.ProtocolStringList getLocationsList() { + locations_.makeImmutable(); + return locations_; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of locations. + */ + public int getLocationsCount() { + return locations_.size(); + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The locations at the given index. + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the locations at the given index. + */ + public com.google.protobuf.ByteString getLocationsBytes(int index) { + return locations_.getByteString(index); + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The locations to set. + * @return This builder for chaining. + */ + public Builder setLocations(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The locations to add. + * @return This builder for chaining. + */ + public Builder addLocations(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The locations to add. + * @return This builder for chaining. + */ + public Builder addAllLocations(java.lang.Iterable values) { + ensureLocationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, locations_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLocations() { + locations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. Bucket locations. Location can be any of the Cloud Storage
+         * regions specified in lower case format. For example, `us-east1`,
+         * `us-west1`.
+         * 
+ * + * repeated string locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the locations to add. + * @return This builder for chaining. + */ + public Builder addLocationsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureLocationsIsMutable(); + locations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + private static final com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocations + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations(); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudStorageLocations parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CloudStorageBucketsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the bucketIdRegexes. + */ + java.util.List getBucketIdRegexesList(); + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of bucketIdRegexes. + */ + int getBucketIdRegexesCount(); + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The bucketIdRegexes at the given index. + */ + java.lang.String getBucketIdRegexes(int index); + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the bucketIdRegexes at the given index. + */ + com.google.protobuf.ByteString getBucketIdRegexesBytes(int index); + } + + /** + * + * + *
+     * Collection of buckets.
+     * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets} + */ + public static final class CloudStorageBuckets extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + CloudStorageBucketsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudStorageBuckets"); + } + + // Use CloudStorageBuckets.newBuilder() to construct. + private CloudStorageBuckets(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudStorageBuckets() { + bucketIdRegexes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.class, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder + .class); + } + + public static final int BUCKET_ID_REGEXES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList bucketIdRegexes_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the bucketIdRegexes. + */ + public com.google.protobuf.ProtocolStringList getBucketIdRegexesList() { + return bucketIdRegexes_; + } + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of bucketIdRegexes. + */ + public int getBucketIdRegexesCount() { + return bucketIdRegexes_.size(); + } + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The bucketIdRegexes at the given index. + */ + public java.lang.String getBucketIdRegexes(int index) { + return bucketIdRegexes_.get(index); + } + + /** + * + * + *
+       * Optional. A regex pattern for matching bucket names. Regex should
+       * follow the syntax specified in
+       * [google/re2](https://github.com/google/re2). For example,
+       * `^sample_.*` matches all buckets of the form
+       * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+       * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+       * If you want to match a single bucket, say `gs://sample_bucket`,
+       * use `sample_bucket`.
+       * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the bucketIdRegexes at the given index. + */ + public com.google.protobuf.ByteString getBucketIdRegexesBytes(int index) { + return bucketIdRegexes_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < bucketIdRegexes_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucketIdRegexes_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < bucketIdRegexes_.size(); i++) { + dataSize += computeStringSizeNoTag(bucketIdRegexes_.getRaw(i)); + } + size += dataSize; + size += 1 * getBucketIdRegexesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets other = + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) obj; + + if (!getBucketIdRegexesList().equals(other.getBucketIdRegexesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBucketIdRegexesCount() > 0) { + hash = (37 * hash) + BUCKET_ID_REGEXES_FIELD_NUMBER; + hash = (53 * hash) + getBucketIdRegexesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Collection of buckets.
+       * 
+ * + * Protobuf type {@code + * google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.class, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .Builder.class); + } + + // Construct using + // com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucketIdRegexes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets build() { + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + buildPartial() { + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets result = + new com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + bucketIdRegexes_.makeImmutable(); + result.bucketIdRegexes_ = bucketIdRegexes_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) { + return mergeFrom( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets other) { + if (other + == com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance()) return this; + if (!other.bucketIdRegexes_.isEmpty()) { + if (bucketIdRegexes_.isEmpty()) { + bucketIdRegexes_ = other.bucketIdRegexes_; + bitField0_ |= 0x00000001; + } else { + ensureBucketIdRegexesIsMutable(); + bucketIdRegexes_.addAll(other.bucketIdRegexes_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureBucketIdRegexesIsMutable(); + bucketIdRegexes_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList bucketIdRegexes_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureBucketIdRegexesIsMutable() { + if (!bucketIdRegexes_.isModifiable()) { + bucketIdRegexes_ = new com.google.protobuf.LazyStringArrayList(bucketIdRegexes_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the bucketIdRegexes. + */ + public com.google.protobuf.ProtocolStringList getBucketIdRegexesList() { + bucketIdRegexes_.makeImmutable(); + return bucketIdRegexes_; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of bucketIdRegexes. + */ + public int getBucketIdRegexesCount() { + return bucketIdRegexes_.size(); + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The bucketIdRegexes at the given index. + */ + public java.lang.String getBucketIdRegexes(int index) { + return bucketIdRegexes_.get(index); + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the bucketIdRegexes at the given index. + */ + public com.google.protobuf.ByteString getBucketIdRegexesBytes(int index) { + return bucketIdRegexes_.getByteString(index); + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The bucketIdRegexes to set. + * @return This builder for chaining. + */ + public Builder setBucketIdRegexes(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketIdRegexesIsMutable(); + bucketIdRegexes_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bucketIdRegexes to add. + * @return This builder for chaining. + */ + public Builder addBucketIdRegexes(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketIdRegexesIsMutable(); + bucketIdRegexes_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The bucketIdRegexes to add. + * @return This builder for chaining. + */ + public Builder addAllBucketIdRegexes(java.lang.Iterable values) { + ensureBucketIdRegexesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, bucketIdRegexes_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearBucketIdRegexes() { + bucketIdRegexes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. A regex pattern for matching bucket names. Regex should
+         * follow the syntax specified in
+         * [google/re2](https://github.com/google/re2). For example,
+         * `^sample_.*` matches all buckets of the form
+         * `gs://sample_bucket-1`, `gs://sample_bucket-2`,
+         * `gs://sample_bucket-n` but not `gs://test_sample_bucket`.
+         * If you want to match a single bucket, say `gs://sample_bucket`,
+         * use `sample_bucket`.
+         * 
+ * + * repeated string bucket_id_regexes = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the bucketIdRegexes to add. + * @return This builder for chaining. + */ + public Builder addBucketIdRegexesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureBucketIdRegexesIsMutable(); + bucketIdRegexes_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + private static final com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageBuckets + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets(); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudStorageBuckets parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int cloudStorageLocationsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object cloudStorageLocations_; + + public enum CloudStorageLocationsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INCLUDED_CLOUD_STORAGE_LOCATIONS(1), + EXCLUDED_CLOUD_STORAGE_LOCATIONS(2), + CLOUDSTORAGELOCATIONS_NOT_SET(0); + private final int value; + + private CloudStorageLocationsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static CloudStorageLocationsCase valueOf(int value) { + return forNumber(value); + } + + public static CloudStorageLocationsCase forNumber(int value) { + switch (value) { + case 1: + return INCLUDED_CLOUD_STORAGE_LOCATIONS; + case 2: + return EXCLUDED_CLOUD_STORAGE_LOCATIONS; + case 0: + return CLOUDSTORAGELOCATIONS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public CloudStorageLocationsCase getCloudStorageLocationsCase() { + return CloudStorageLocationsCase.forNumber(cloudStorageLocationsCase_); + } + + private int cloudStorageBucketsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object cloudStorageBuckets_; + + public enum CloudStorageBucketsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INCLUDED_CLOUD_STORAGE_BUCKETS(3), + EXCLUDED_CLOUD_STORAGE_BUCKETS(4), + CLOUDSTORAGEBUCKETS_NOT_SET(0); + private final int value; + + private CloudStorageBucketsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static CloudStorageBucketsCase valueOf(int value) { + return forNumber(value); + } + + public static CloudStorageBucketsCase forNumber(int value) { + switch (value) { + case 3: + return INCLUDED_CLOUD_STORAGE_BUCKETS; + case 4: + return EXCLUDED_CLOUD_STORAGE_BUCKETS; + case 0: + return CLOUDSTORAGEBUCKETS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public CloudStorageBucketsCase getCloudStorageBucketsCase() { + return CloudStorageBucketsCase.forNumber(cloudStorageBucketsCase_); + } + + public static final int INCLUDED_CLOUD_STORAGE_LOCATIONS_FIELD_NUMBER = 1; + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return Whether the includedCloudStorageLocations field is set. + */ + @java.lang.Override + public boolean hasIncludedCloudStorageLocations() { + return cloudStorageLocationsCase_ == 1; + } + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return The includedCloudStorageLocations. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getIncludedCloudStorageLocations() { + if (cloudStorageLocationsCase_ == 1) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + + /** + * + * + *
+     * Bucket locations to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getIncludedCloudStorageLocationsOrBuilder() { + if (cloudStorageLocationsCase_ == 1) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + + public static final int EXCLUDED_CLOUD_STORAGE_LOCATIONS_FIELD_NUMBER = 2; + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return Whether the excludedCloudStorageLocations field is set. + */ + @java.lang.Override + public boolean hasExcludedCloudStorageLocations() { + return cloudStorageLocationsCase_ == 2; + } + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return The excludedCloudStorageLocations. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getExcludedCloudStorageLocations() { + if (cloudStorageLocationsCase_ == 2) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + + /** + * + * + *
+     * Bucket locations to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getExcludedCloudStorageLocationsOrBuilder() { + if (cloudStorageLocationsCase_ == 2) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + + public static final int INCLUDED_CLOUD_STORAGE_BUCKETS_FIELD_NUMBER = 3; + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return Whether the includedCloudStorageBuckets field is set. + */ + @java.lang.Override + public boolean hasIncludedCloudStorageBuckets() { + return cloudStorageBucketsCase_ == 3; + } + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return The includedCloudStorageBuckets. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getIncludedCloudStorageBuckets() { + if (cloudStorageBucketsCase_ == 3) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + + /** + * + * + *
+     * Buckets to include.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getIncludedCloudStorageBucketsOrBuilder() { + if (cloudStorageBucketsCase_ == 3) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + + public static final int EXCLUDED_CLOUD_STORAGE_BUCKETS_FIELD_NUMBER = 4; + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return Whether the excludedCloudStorageBuckets field is set. + */ + @java.lang.Override + public boolean hasExcludedCloudStorageBuckets() { + return cloudStorageBucketsCase_ == 4; + } + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return The excludedCloudStorageBuckets. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getExcludedCloudStorageBuckets() { + if (cloudStorageBucketsCase_ == 4) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + + /** + * + * + *
+     * Buckets to exclude.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getExcludedCloudStorageBucketsOrBuilder() { + if (cloudStorageBucketsCase_ == 4) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (cloudStorageLocationsCase_ == 1) { + output.writeMessage( + 1, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_); + } + if (cloudStorageLocationsCase_ == 2) { + output.writeMessage( + 2, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_); + } + if (cloudStorageBucketsCase_ == 3) { + output.writeMessage( + 3, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_); + } + if (cloudStorageBucketsCase_ == 4) { + output.writeMessage( + 4, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (cloudStorageLocationsCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_); + } + if (cloudStorageLocationsCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_); + } + if (cloudStorageBucketsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_); + } + if (cloudStorageBucketsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.IntelligenceConfig.Filter)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig.Filter other = + (com.google.storage.control.v2.IntelligenceConfig.Filter) obj; + + if (!getCloudStorageLocationsCase().equals(other.getCloudStorageLocationsCase())) + return false; + switch (cloudStorageLocationsCase_) { + case 1: + if (!getIncludedCloudStorageLocations().equals(other.getIncludedCloudStorageLocations())) + return false; + break; + case 2: + if (!getExcludedCloudStorageLocations().equals(other.getExcludedCloudStorageLocations())) + return false; + break; + case 0: + default: + } + if (!getCloudStorageBucketsCase().equals(other.getCloudStorageBucketsCase())) return false; + switch (cloudStorageBucketsCase_) { + case 3: + if (!getIncludedCloudStorageBuckets().equals(other.getIncludedCloudStorageBuckets())) + return false; + break; + case 4: + if (!getExcludedCloudStorageBuckets().equals(other.getExcludedCloudStorageBuckets())) + return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (cloudStorageLocationsCase_) { + case 1: + hash = (37 * hash) + INCLUDED_CLOUD_STORAGE_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getIncludedCloudStorageLocations().hashCode(); + break; + case 2: + hash = (37 * hash) + EXCLUDED_CLOUD_STORAGE_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getExcludedCloudStorageLocations().hashCode(); + break; + case 0: + default: + } + switch (cloudStorageBucketsCase_) { + case 3: + hash = (37 * hash) + INCLUDED_CLOUD_STORAGE_BUCKETS_FIELD_NUMBER; + hash = (53 * hash) + getIncludedCloudStorageBuckets().hashCode(); + break; + case 4: + hash = (37 * hash) + EXCLUDED_CLOUD_STORAGE_BUCKETS_FIELD_NUMBER; + hash = (53 * hash) + getExcludedCloudStorageBuckets().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.IntelligenceConfig.Filter prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Filter over location and bucket using include or exclude semantics.
+     * Resources that match the include or exclude filter are exclusively included
+     * or excluded from the Storage Intelligence plan.
+     * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.Filter} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig.Filter) + com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.Filter.class, + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder.class); + } + + // Construct using com.google.storage.control.v2.IntelligenceConfig.Filter.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (includedCloudStorageLocationsBuilder_ != null) { + includedCloudStorageLocationsBuilder_.clear(); + } + if (excludedCloudStorageLocationsBuilder_ != null) { + excludedCloudStorageLocationsBuilder_.clear(); + } + if (includedCloudStorageBucketsBuilder_ != null) { + includedCloudStorageBucketsBuilder_.clear(); + } + if (excludedCloudStorageBucketsBuilder_ != null) { + excludedCloudStorageBucketsBuilder_.clear(); + } + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter build() { + com.google.storage.control.v2.IntelligenceConfig.Filter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter buildPartial() { + com.google.storage.control.v2.IntelligenceConfig.Filter result = + new com.google.storage.control.v2.IntelligenceConfig.Filter(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.IntelligenceConfig.Filter result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.storage.control.v2.IntelligenceConfig.Filter result) { + result.cloudStorageLocationsCase_ = cloudStorageLocationsCase_; + result.cloudStorageLocations_ = this.cloudStorageLocations_; + if (cloudStorageLocationsCase_ == 1 && includedCloudStorageLocationsBuilder_ != null) { + result.cloudStorageLocations_ = includedCloudStorageLocationsBuilder_.build(); + } + if (cloudStorageLocationsCase_ == 2 && excludedCloudStorageLocationsBuilder_ != null) { + result.cloudStorageLocations_ = excludedCloudStorageLocationsBuilder_.build(); + } + result.cloudStorageBucketsCase_ = cloudStorageBucketsCase_; + result.cloudStorageBuckets_ = this.cloudStorageBuckets_; + if (cloudStorageBucketsCase_ == 3 && includedCloudStorageBucketsBuilder_ != null) { + result.cloudStorageBuckets_ = includedCloudStorageBucketsBuilder_.build(); + } + if (cloudStorageBucketsCase_ == 4 && excludedCloudStorageBucketsBuilder_ != null) { + result.cloudStorageBuckets_ = excludedCloudStorageBucketsBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.IntelligenceConfig.Filter) { + return mergeFrom((com.google.storage.control.v2.IntelligenceConfig.Filter) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.IntelligenceConfig.Filter other) { + if (other == com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance()) + return this; + switch (other.getCloudStorageLocationsCase()) { + case INCLUDED_CLOUD_STORAGE_LOCATIONS: + { + mergeIncludedCloudStorageLocations(other.getIncludedCloudStorageLocations()); + break; + } + case EXCLUDED_CLOUD_STORAGE_LOCATIONS: + { + mergeExcludedCloudStorageLocations(other.getExcludedCloudStorageLocations()); + break; + } + case CLOUDSTORAGELOCATIONS_NOT_SET: + { + break; + } + } + switch (other.getCloudStorageBucketsCase()) { + case INCLUDED_CLOUD_STORAGE_BUCKETS: + { + mergeIncludedCloudStorageBuckets(other.getIncludedCloudStorageBuckets()); + break; + } + case EXCLUDED_CLOUD_STORAGE_BUCKETS: + { + mergeExcludedCloudStorageBuckets(other.getExcludedCloudStorageBuckets()); + break; + } + case CLOUDSTORAGEBUCKETS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIncludedCloudStorageLocationsFieldBuilder().getBuilder(), + extensionRegistry); + cloudStorageLocationsCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetExcludedCloudStorageLocationsFieldBuilder().getBuilder(), + extensionRegistry); + cloudStorageLocationsCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetIncludedCloudStorageBucketsFieldBuilder().getBuilder(), + extensionRegistry); + cloudStorageBucketsCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetExcludedCloudStorageBucketsFieldBuilder().getBuilder(), + extensionRegistry); + cloudStorageBucketsCase_ = 4; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int cloudStorageLocationsCase_ = 0; + private java.lang.Object cloudStorageLocations_; + + public CloudStorageLocationsCase getCloudStorageLocationsCase() { + return CloudStorageLocationsCase.forNumber(cloudStorageLocationsCase_); + } + + public Builder clearCloudStorageLocations() { + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + onChanged(); + return this; + } + + private int cloudStorageBucketsCase_ = 0; + private java.lang.Object cloudStorageBuckets_; + + public CloudStorageBucketsCase getCloudStorageBucketsCase() { + return CloudStorageBucketsCase.forNumber(cloudStorageBucketsCase_); + } + + public Builder clearCloudStorageBuckets() { + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder> + includedCloudStorageLocationsBuilder_; + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return Whether the includedCloudStorageLocations field is set. + */ + @java.lang.Override + public boolean hasIncludedCloudStorageLocations() { + return cloudStorageLocationsCase_ == 1; + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + * + * @return The includedCloudStorageLocations. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getIncludedCloudStorageLocations() { + if (includedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 1) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } else { + if (cloudStorageLocationsCase_ == 1) { + return includedCloudStorageLocationsBuilder_.getMessage(); + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + public Builder setIncludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations value) { + if (includedCloudStorageLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cloudStorageLocations_ = value; + onChanged(); + } else { + includedCloudStorageLocationsBuilder_.setMessage(value); + } + cloudStorageLocationsCase_ = 1; + return this; + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + public Builder setIncludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder + builderForValue) { + if (includedCloudStorageLocationsBuilder_ == null) { + cloudStorageLocations_ = builderForValue.build(); + onChanged(); + } else { + includedCloudStorageLocationsBuilder_.setMessage(builderForValue.build()); + } + cloudStorageLocationsCase_ = 1; + return this; + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + public Builder mergeIncludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations value) { + if (includedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 1 + && cloudStorageLocations_ + != com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance()) { + cloudStorageLocations_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .newBuilder( + (com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocations) + cloudStorageLocations_) + .mergeFrom(value) + .buildPartial(); + } else { + cloudStorageLocations_ = value; + } + onChanged(); + } else { + if (cloudStorageLocationsCase_ == 1) { + includedCloudStorageLocationsBuilder_.mergeFrom(value); + } else { + includedCloudStorageLocationsBuilder_.setMessage(value); + } + } + cloudStorageLocationsCase_ = 1; + return this; + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + public Builder clearIncludedCloudStorageLocations() { + if (includedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 1) { + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + onChanged(); + } + } else { + if (cloudStorageLocationsCase_ == 1) { + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + } + includedCloudStorageLocationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder + getIncludedCloudStorageLocationsBuilder() { + return internalGetIncludedCloudStorageLocationsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getIncludedCloudStorageLocationsOrBuilder() { + if ((cloudStorageLocationsCase_ == 1) && (includedCloudStorageLocationsBuilder_ != null)) { + return includedCloudStorageLocationsBuilder_.getMessageOrBuilder(); + } else { + if (cloudStorageLocationsCase_ == 1) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Bucket locations to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations included_cloud_storage_locations = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder> + internalGetIncludedCloudStorageLocationsFieldBuilder() { + if (includedCloudStorageLocationsBuilder_ == null) { + if (!(cloudStorageLocationsCase_ == 1)) { + cloudStorageLocations_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + includedCloudStorageLocationsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder>( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_, + getParentForChildren(), + isClean()); + cloudStorageLocations_ = null; + } + cloudStorageLocationsCase_ = 1; + onChanged(); + return includedCloudStorageLocationsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder> + excludedCloudStorageLocationsBuilder_; + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return Whether the excludedCloudStorageLocations field is set. + */ + @java.lang.Override + public boolean hasExcludedCloudStorageLocations() { + return cloudStorageLocationsCase_ == 2; + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + * + * @return The excludedCloudStorageLocations. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + getExcludedCloudStorageLocations() { + if (excludedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 2) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } else { + if (cloudStorageLocationsCase_ == 2) { + return excludedCloudStorageLocationsBuilder_.getMessage(); + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + public Builder setExcludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations value) { + if (excludedCloudStorageLocationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cloudStorageLocations_ = value; + onChanged(); + } else { + excludedCloudStorageLocationsBuilder_.setMessage(value); + } + cloudStorageLocationsCase_ = 2; + return this; + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + public Builder setExcludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder + builderForValue) { + if (excludedCloudStorageLocationsBuilder_ == null) { + cloudStorageLocations_ = builderForValue.build(); + onChanged(); + } else { + excludedCloudStorageLocationsBuilder_.setMessage(builderForValue.build()); + } + cloudStorageLocationsCase_ = 2; + return this; + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + public Builder mergeExcludedCloudStorageLocations( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations value) { + if (excludedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 2 + && cloudStorageLocations_ + != com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance()) { + cloudStorageLocations_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .newBuilder( + (com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocations) + cloudStorageLocations_) + .mergeFrom(value) + .buildPartial(); + } else { + cloudStorageLocations_ = value; + } + onChanged(); + } else { + if (cloudStorageLocationsCase_ == 2) { + excludedCloudStorageLocationsBuilder_.mergeFrom(value); + } else { + excludedCloudStorageLocationsBuilder_.setMessage(value); + } + } + cloudStorageLocationsCase_ = 2; + return this; + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + public Builder clearExcludedCloudStorageLocations() { + if (excludedCloudStorageLocationsBuilder_ == null) { + if (cloudStorageLocationsCase_ == 2) { + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + onChanged(); + } + } else { + if (cloudStorageLocationsCase_ == 2) { + cloudStorageLocationsCase_ = 0; + cloudStorageLocations_ = null; + } + excludedCloudStorageLocationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder + getExcludedCloudStorageLocationsBuilder() { + return internalGetExcludedCloudStorageLocationsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocationsOrBuilder + getExcludedCloudStorageLocationsOrBuilder() { + if ((cloudStorageLocationsCase_ == 2) && (excludedCloudStorageLocationsBuilder_ != null)) { + return excludedCloudStorageLocationsBuilder_.getMessageOrBuilder(); + } else { + if (cloudStorageLocationsCase_ == 2) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Bucket locations to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations excluded_cloud_storage_locations = 2; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder> + internalGetExcludedCloudStorageLocationsFieldBuilder() { + if (excludedCloudStorageLocationsBuilder_ == null) { + if (!(cloudStorageLocationsCase_ == 2)) { + cloudStorageLocations_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .getDefaultInstance(); + } + excludedCloudStorageLocationsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations + .Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageLocationsOrBuilder>( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageLocations) + cloudStorageLocations_, + getParentForChildren(), + isClean()); + cloudStorageLocations_ = null; + } + cloudStorageLocationsCase_ = 2; + onChanged(); + return excludedCloudStorageLocationsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder> + includedCloudStorageBucketsBuilder_; + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return Whether the includedCloudStorageBuckets field is set. + */ + @java.lang.Override + public boolean hasIncludedCloudStorageBuckets() { + return cloudStorageBucketsCase_ == 3; + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + * + * @return The includedCloudStorageBuckets. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getIncludedCloudStorageBuckets() { + if (includedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 3) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } else { + if (cloudStorageBucketsCase_ == 3) { + return includedCloudStorageBucketsBuilder_.getMessage(); + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + public Builder setIncludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets value) { + if (includedCloudStorageBucketsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cloudStorageBuckets_ = value; + onChanged(); + } else { + includedCloudStorageBucketsBuilder_.setMessage(value); + } + cloudStorageBucketsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + public Builder setIncludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder + builderForValue) { + if (includedCloudStorageBucketsBuilder_ == null) { + cloudStorageBuckets_ = builderForValue.build(); + onChanged(); + } else { + includedCloudStorageBucketsBuilder_.setMessage(builderForValue.build()); + } + cloudStorageBucketsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + public Builder mergeIncludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets value) { + if (includedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 3 + && cloudStorageBuckets_ + != com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance()) { + cloudStorageBuckets_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .newBuilder( + (com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageBuckets) + cloudStorageBuckets_) + .mergeFrom(value) + .buildPartial(); + } else { + cloudStorageBuckets_ = value; + } + onChanged(); + } else { + if (cloudStorageBucketsCase_ == 3) { + includedCloudStorageBucketsBuilder_.mergeFrom(value); + } else { + includedCloudStorageBucketsBuilder_.setMessage(value); + } + } + cloudStorageBucketsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + public Builder clearIncludedCloudStorageBuckets() { + if (includedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 3) { + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + onChanged(); + } + } else { + if (cloudStorageBucketsCase_ == 3) { + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + } + includedCloudStorageBucketsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder + getIncludedCloudStorageBucketsBuilder() { + return internalGetIncludedCloudStorageBucketsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getIncludedCloudStorageBucketsOrBuilder() { + if ((cloudStorageBucketsCase_ == 3) && (includedCloudStorageBucketsBuilder_ != null)) { + return includedCloudStorageBucketsBuilder_.getMessageOrBuilder(); + } else { + if (cloudStorageBucketsCase_ == 3) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Buckets to include.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets included_cloud_storage_buckets = 3; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder> + internalGetIncludedCloudStorageBucketsFieldBuilder() { + if (includedCloudStorageBucketsBuilder_ == null) { + if (!(cloudStorageBucketsCase_ == 3)) { + cloudStorageBuckets_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + includedCloudStorageBucketsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageBucketsOrBuilder>( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_, + getParentForChildren(), + isClean()); + cloudStorageBuckets_ = null; + } + cloudStorageBucketsCase_ = 3; + onChanged(); + return includedCloudStorageBucketsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder> + excludedCloudStorageBucketsBuilder_; + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return Whether the excludedCloudStorageBuckets field is set. + */ + @java.lang.Override + public boolean hasExcludedCloudStorageBuckets() { + return cloudStorageBucketsCase_ == 4; + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + * + * @return The excludedCloudStorageBuckets. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + getExcludedCloudStorageBuckets() { + if (excludedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 4) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } else { + if (cloudStorageBucketsCase_ == 4) { + return excludedCloudStorageBucketsBuilder_.getMessage(); + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + public Builder setExcludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets value) { + if (excludedCloudStorageBucketsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cloudStorageBuckets_ = value; + onChanged(); + } else { + excludedCloudStorageBucketsBuilder_.setMessage(value); + } + cloudStorageBucketsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + public Builder setExcludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder + builderForValue) { + if (excludedCloudStorageBucketsBuilder_ == null) { + cloudStorageBuckets_ = builderForValue.build(); + onChanged(); + } else { + excludedCloudStorageBucketsBuilder_.setMessage(builderForValue.build()); + } + cloudStorageBucketsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + public Builder mergeExcludedCloudStorageBuckets( + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets value) { + if (excludedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 4 + && cloudStorageBuckets_ + != com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance()) { + cloudStorageBuckets_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .newBuilder( + (com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageBuckets) + cloudStorageBuckets_) + .mergeFrom(value) + .buildPartial(); + } else { + cloudStorageBuckets_ = value; + } + onChanged(); + } else { + if (cloudStorageBucketsCase_ == 4) { + excludedCloudStorageBucketsBuilder_.mergeFrom(value); + } else { + excludedCloudStorageBucketsBuilder_.setMessage(value); + } + } + cloudStorageBucketsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + public Builder clearExcludedCloudStorageBuckets() { + if (excludedCloudStorageBucketsBuilder_ == null) { + if (cloudStorageBucketsCase_ == 4) { + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + onChanged(); + } + } else { + if (cloudStorageBucketsCase_ == 4) { + cloudStorageBucketsCase_ = 0; + cloudStorageBuckets_ = null; + } + excludedCloudStorageBucketsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder + getExcludedCloudStorageBucketsBuilder() { + return internalGetExcludedCloudStorageBucketsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder + getExcludedCloudStorageBucketsOrBuilder() { + if ((cloudStorageBucketsCase_ == 4) && (excludedCloudStorageBucketsBuilder_ != null)) { + return excludedCloudStorageBucketsBuilder_.getMessageOrBuilder(); + } else { + if (cloudStorageBucketsCase_ == 4) { + return (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_; + } + return com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Buckets to exclude.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets excluded_cloud_storage_buckets = 4; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets.Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsOrBuilder> + internalGetExcludedCloudStorageBucketsFieldBuilder() { + if (excludedCloudStorageBucketsBuilder_ == null) { + if (!(cloudStorageBucketsCase_ == 4)) { + cloudStorageBuckets_ = + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .getDefaultInstance(); + } + excludedCloudStorageBucketsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets, + com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets + .Builder, + com.google.storage.control.v2.IntelligenceConfig.Filter + .CloudStorageBucketsOrBuilder>( + (com.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBuckets) + cloudStorageBuckets_, + getParentForChildren(), + isClean()); + cloudStorageBuckets_ = null; + } + cloudStorageBucketsCase_ = 4; + onChanged(); + return excludedCloudStorageBucketsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig.Filter) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig.Filter) + private static final com.google.storage.control.v2.IntelligenceConfig.Filter DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.IntelligenceConfig.Filter(); + } + + public static com.google.storage.control.v2.IntelligenceConfig.Filter getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Filter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface EffectiveIntelligenceConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` edition that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for effectiveEdition. + */ + int getEffectiveEditionValue(); + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` edition that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveEdition. + */ + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition + getEffectiveEdition(); + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applied for the
+     * target resource. Format:
+     * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+     * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The intelligenceConfig. + */ + java.lang.String getIntelligenceConfig(); + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applied for the
+     * target resource. Format:
+     * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+     * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for intelligenceConfig. + */ + com.google.protobuf.ByteString getIntelligenceConfigBytes(); + } + + /** + * + * + *
+   * The effective `IntelligenceConfig` for the resource.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig} + */ + public static final class EffectiveIntelligenceConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) + EffectiveIntelligenceConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EffectiveIntelligenceConfig"); + } + + // Use EffectiveIntelligenceConfig.newBuilder() to construct. + private EffectiveIntelligenceConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private EffectiveIntelligenceConfig() { + effectiveEdition_ = 0; + intelligenceConfig_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.class, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder + .class); + } + + /** + * + * + *
+     * The effective edition of the `IntelligenceConfig` resource.
+     * 
+ * + * Protobuf enum {@code + * google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition} + */ + public enum EffectiveEdition implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * This is an unknown edition of the resource.
+       * 
+ * + * EFFECTIVE_EDITION_UNSPECIFIED = 0; + */ + EFFECTIVE_EDITION_UNSPECIFIED(0), + /** + * + * + *
+       * No edition.
+       * 
+ * + * NONE = 1; + */ + NONE(1), + /** + * + * + *
+       * The `IntelligenceConfig` resource is of STANDARD edition.
+       * 
+ * + * STANDARD = 2; + */ + STANDARD(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EffectiveEdition"); + } + + /** + * + * + *
+       * This is an unknown edition of the resource.
+       * 
+ * + * EFFECTIVE_EDITION_UNSPECIFIED = 0; + */ + public static final int EFFECTIVE_EDITION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+       * No edition.
+       * 
+ * + * NONE = 1; + */ + public static final int NONE_VALUE = 1; + + /** + * + * + *
+       * The `IntelligenceConfig` resource is of STANDARD edition.
+       * 
+ * + * STANDARD = 2; + */ + public static final int STANDARD_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EffectiveEdition valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static EffectiveEdition forNumber(int value) { + switch (value) { + case 0: + return EFFECTIVE_EDITION_UNSPECIFIED; + case 1: + return NONE; + case 2: + return STANDARD; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EffectiveEdition findValueByNumber(int number) { + return EffectiveEdition.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final EffectiveEdition[] VALUES = values(); + + public static EffectiveEdition valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private EffectiveEdition(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition) + } + + public static final int EFFECTIVE_EDITION_FIELD_NUMBER = 1; + private int effectiveEdition_ = 0; + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` edition that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for effectiveEdition. + */ + @java.lang.Override + public int getEffectiveEditionValue() { + return effectiveEdition_; + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` edition that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveEdition. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition + getEffectiveEdition() { + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition + result = + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.forNumber(effectiveEdition_); + return result == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.UNRECOGNIZED + : result; + } + + public static final int INTELLIGENCE_CONFIG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object intelligenceConfig_ = ""; + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applied for the
+     * target resource. Format:
+     * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+     * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The intelligenceConfig. + */ + @java.lang.Override + public java.lang.String getIntelligenceConfig() { + java.lang.Object ref = intelligenceConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + intelligenceConfig_ = s; + return s; + } + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applied for the
+     * target resource. Format:
+     * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+     * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for intelligenceConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIntelligenceConfigBytes() { + java.lang.Object ref = intelligenceConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + intelligenceConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (effectiveEdition_ + != com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.EFFECTIVE_EDITION_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, effectiveEdition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(intelligenceConfig_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, intelligenceConfig_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (effectiveEdition_ + != com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.EFFECTIVE_EDITION_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, effectiveEdition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(intelligenceConfig_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, intelligenceConfig_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig other = + (com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) obj; + + if (effectiveEdition_ != other.effectiveEdition_) return false; + if (!getIntelligenceConfig().equals(other.getIntelligenceConfig())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + EFFECTIVE_EDITION_FIELD_NUMBER; + hash = (53 * hash) + effectiveEdition_; + hash = (37 * hash) + INTELLIGENCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIntelligenceConfig().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * The effective `IntelligenceConfig` for the resource.
+     * 
+ * + * Protobuf type {@code + * google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.class, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder + .class); + } + + // Construct using + // com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + effectiveEdition_ = 0; + intelligenceConfig_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig build() { + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + buildPartial() { + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig result = + new com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.effectiveEdition_ = effectiveEdition_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.intelligenceConfig_ = intelligenceConfig_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) { + return mergeFrom( + (com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig other) { + if (other + == com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance()) return this; + if (other.effectiveEdition_ != 0) { + setEffectiveEditionValue(other.getEffectiveEditionValue()); + } + if (!other.getIntelligenceConfig().isEmpty()) { + intelligenceConfig_ = other.intelligenceConfig_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + effectiveEdition_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + intelligenceConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int effectiveEdition_ = 0; + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` edition that is applicable for the
+       * resource.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for effectiveEdition. + */ + @java.lang.Override + public int getEffectiveEditionValue() { + return effectiveEdition_; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` edition that is applicable for the
+       * resource.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for effectiveEdition to set. + * @return This builder for chaining. + */ + public Builder setEffectiveEditionValue(int value) { + effectiveEdition_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` edition that is applicable for the
+       * resource.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveEdition. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition + getEffectiveEdition() { + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition + result = + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.forNumber(effectiveEdition_); + return result == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` edition that is applicable for the
+       * resource.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The effectiveEdition to set. + * @return This builder for chaining. + */ + public Builder setEffectiveEdition( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .EffectiveEdition + value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + effectiveEdition_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` edition that is applicable for the
+       * resource.
+       * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.EffectiveEdition effective_edition = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEffectiveEdition() { + bitField0_ = (bitField0_ & ~0x00000001); + effectiveEdition_ = 0; + onChanged(); + return this; + } + + private java.lang.Object intelligenceConfig_ = ""; + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` resource that is applied for the
+       * target resource. Format:
+       * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+       * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The intelligenceConfig. + */ + public java.lang.String getIntelligenceConfig() { + java.lang.Object ref = intelligenceConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + intelligenceConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` resource that is applied for the
+       * target resource. Format:
+       * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+       * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for intelligenceConfig. + */ + public com.google.protobuf.ByteString getIntelligenceConfigBytes() { + java.lang.Object ref = intelligenceConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + intelligenceConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` resource that is applied for the
+       * target resource. Format:
+       * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+       * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The intelligenceConfig to set. + * @return This builder for chaining. + */ + public Builder setIntelligenceConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + intelligenceConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` resource that is applied for the
+       * target resource. Format:
+       * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+       * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearIntelligenceConfig() { + intelligenceConfig_ = getDefaultInstance().getIntelligenceConfig(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The `IntelligenceConfig` resource that is applied for the
+       * target resource. Format:
+       * `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig`
+       * 
+ * + * string intelligence_config = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for intelligenceConfig to set. + * @return This builder for chaining. + */ + public Builder setIntelligenceConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + intelligenceConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig) + private static final com.google.storage.control.v2.IntelligenceConfig + .EffectiveIntelligenceConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig(); + } + + public static com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public EffectiveIntelligenceConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TrialConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig.TrialConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + } + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.TrialConfig} + */ + public static final class TrialConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.IntelligenceConfig.TrialConfig) + TrialConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TrialConfig"); + } + + // Use TrialConfig.newBuilder() to construct. + private TrialConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TrialConfig() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.class, + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder.class); + } + + private int bitField0_; + public static final int EXPIRE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
+     * Output only. The time at which the trial expires.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getExpireTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getExpireTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.IntelligenceConfig.TrialConfig)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig.TrialConfig other = + (com.google.storage.control.v2.IntelligenceConfig.TrialConfig) obj; + + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig.TrialConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig.TrialConfig) + com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.class, + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder.class); + } + + // Construct using com.google.storage.control.v2.IntelligenceConfig.TrialConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig + getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig build() { + com.google.storage.control.v2.IntelligenceConfig.TrialConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig buildPartial() { + com.google.storage.control.v2.IntelligenceConfig.TrialConfig result = + new com.google.storage.control.v2.IntelligenceConfig.TrialConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.expireTime_ = + expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.IntelligenceConfig.TrialConfig) { + return mergeFrom((com.google.storage.control.v2.IntelligenceConfig.TrialConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.IntelligenceConfig.TrialConfig other) { + if (other + == com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance()) + return this; + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 26: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000001); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
+       * Output only. The time at which the trial expires.
+       * 
+ * + * + * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig.TrialConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig.TrialConfig) + private static final com.google.storage.control.v2.IntelligenceConfig.TrialConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.IntelligenceConfig.TrialConfig(); + } + + public static com.google.storage.control.v2.IntelligenceConfig.TrialConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TrialConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Identifier. The name of the `IntelligenceConfig` resource associated with
+   * your organization, folder, or project.
+   *
+   * The name format varies based on the GCP resource hierarchy as follows:
+   *
+   * * For project:
+   * `projects/{project_number}/locations/global/intelligenceConfig`
+   * * For organization:
+   * `organizations/{org_id}/locations/global/intelligenceConfig`
+   * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Identifier. The name of the `IntelligenceConfig` resource associated with
+   * your organization, folder, or project.
+   *
+   * The name format varies based on the GCP resource hierarchy as follows:
+   *
+   * * For project:
+   * `projects/{project_number}/locations/global/intelligenceConfig`
+   * * For organization:
+   * `organizations/{org_id}/locations/global/intelligenceConfig`
+   * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EDITION_CONFIG_FIELD_NUMBER = 2; + private int editionConfig_ = 0; + + /** + * + * + *
+   * Optional. The edition configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for editionConfig. + */ + @java.lang.Override + public int getEditionConfigValue() { + return editionConfig_; + } + + /** + * + * + *
+   * Optional. The edition configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The editionConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EditionConfig getEditionConfig() { + com.google.storage.control.v2.IntelligenceConfig.EditionConfig result = + com.google.storage.control.v2.IntelligenceConfig.EditionConfig.forNumber(editionConfig_); + return result == null + ? com.google.storage.control.v2.IntelligenceConfig.EditionConfig.UNRECOGNIZED + : result; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int FILTER_FIELD_NUMBER = 4; + private com.google.storage.control.v2.IntelligenceConfig.Filter filter_; + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the filter field is set. + */ + @java.lang.Override + public boolean hasFilter() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The filter. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.Filter getFilter() { + return filter_ == null + ? com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance() + : filter_; + } + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder getFilterOrBuilder() { + return filter_ == null + ? com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance() + : filter_; + } + + public static final int EFFECTIVE_INTELLIGENCE_CONFIG_FIELD_NUMBER = 5; + private com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + effectiveIntelligenceConfig_; + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the effectiveIntelligenceConfig field is set. + */ + @java.lang.Override + public boolean hasEffectiveIntelligenceConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveIntelligenceConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getEffectiveIntelligenceConfig() { + return effectiveIntelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance() + : effectiveIntelligenceConfig_; + } + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder + getEffectiveIntelligenceConfigOrBuilder() { + return effectiveIntelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance() + : effectiveIntelligenceConfig_; + } + + public static final int TRIAL_CONFIG_FIELD_NUMBER = 7; + private com.google.storage.control.v2.IntelligenceConfig.TrialConfig trialConfig_; + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return Whether the trialConfig field is set. + */ + @java.lang.Override + public boolean hasTrialConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return The trialConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig getTrialConfig() { + return trialConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance() + : trialConfig_; + } + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder + getTrialConfigOrBuilder() { + return trialConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance() + : trialConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (editionConfig_ + != com.google.storage.control.v2.IntelligenceConfig.EditionConfig.EDITION_CONFIG_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, editionConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getUpdateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getFilter()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getEffectiveIntelligenceConfig()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(7, getTrialConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (editionConfig_ + != com.google.storage.control.v2.IntelligenceConfig.EditionConfig.EDITION_CONFIG_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, editionConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getFilter()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, getEffectiveIntelligenceConfig()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getTrialConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.IntelligenceConfig)) { + return super.equals(obj); + } + com.google.storage.control.v2.IntelligenceConfig other = + (com.google.storage.control.v2.IntelligenceConfig) obj; + + if (!getName().equals(other.getName())) return false; + if (editionConfig_ != other.editionConfig_) return false; + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (hasFilter() != other.hasFilter()) return false; + if (hasFilter()) { + if (!getFilter().equals(other.getFilter())) return false; + } + if (hasEffectiveIntelligenceConfig() != other.hasEffectiveIntelligenceConfig()) return false; + if (hasEffectiveIntelligenceConfig()) { + if (!getEffectiveIntelligenceConfig().equals(other.getEffectiveIntelligenceConfig())) + return false; + } + if (hasTrialConfig() != other.hasTrialConfig()) return false; + if (hasTrialConfig()) { + if (!getTrialConfig().equals(other.getTrialConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + EDITION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + editionConfig_; + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (hasFilter()) { + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + } + if (hasEffectiveIntelligenceConfig()) { + hash = (37 * hash) + EFFECTIVE_INTELLIGENCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveIntelligenceConfig().hashCode(); + } + if (hasTrialConfig()) { + hash = (37 * hash) + TRIAL_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTrialConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.IntelligenceConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.IntelligenceConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The `IntelligenceConfig` resource associated with your organization, folder,
+   * or project.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.IntelligenceConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.IntelligenceConfig) + com.google.storage.control.v2.IntelligenceConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.IntelligenceConfig.class, + com.google.storage.control.v2.IntelligenceConfig.Builder.class); + } + + // Construct using com.google.storage.control.v2.IntelligenceConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetUpdateTimeFieldBuilder(); + internalGetFilterFieldBuilder(); + internalGetEffectiveIntelligenceConfigFieldBuilder(); + internalGetTrialConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + editionConfig_ = 0; + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + filter_ = null; + if (filterBuilder_ != null) { + filterBuilder_.dispose(); + filterBuilder_ = null; + } + effectiveIntelligenceConfig_ = null; + if (effectiveIntelligenceConfigBuilder_ != null) { + effectiveIntelligenceConfigBuilder_.dispose(); + effectiveIntelligenceConfigBuilder_ = null; + } + trialConfig_ = null; + if (trialConfigBuilder_ != null) { + trialConfigBuilder_.dispose(); + trialConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_IntelligenceConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig getDefaultInstanceForType() { + return com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig build() { + com.google.storage.control.v2.IntelligenceConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig buildPartial() { + com.google.storage.control.v2.IntelligenceConfig result = + new com.google.storage.control.v2.IntelligenceConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.IntelligenceConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.editionConfig_ = editionConfig_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.filter_ = filterBuilder_ == null ? filter_ : filterBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.effectiveIntelligenceConfig_ = + effectiveIntelligenceConfigBuilder_ == null + ? effectiveIntelligenceConfig_ + : effectiveIntelligenceConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.trialConfig_ = + trialConfigBuilder_ == null ? trialConfig_ : trialConfigBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.IntelligenceConfig) { + return mergeFrom((com.google.storage.control.v2.IntelligenceConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.IntelligenceConfig other) { + if (other == com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.editionConfig_ != 0) { + setEditionConfigValue(other.getEditionConfigValue()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (other.hasFilter()) { + mergeFilter(other.getFilter()); + } + if (other.hasEffectiveIntelligenceConfig()) { + mergeEffectiveIntelligenceConfig(other.getEffectiveIntelligenceConfig()); + } + if (other.hasTrialConfig()) { + mergeTrialConfig(other.getTrialConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + editionConfig_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetFilterFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetEffectiveIntelligenceConfigFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 58: + { + input.readMessage( + internalGetTrialConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Identifier. The name of the `IntelligenceConfig` resource associated with
+     * your organization, folder, or project.
+     *
+     * The name format varies based on the GCP resource hierarchy as follows:
+     *
+     * * For project:
+     * `projects/{project_number}/locations/global/intelligenceConfig`
+     * * For organization:
+     * `organizations/{org_id}/locations/global/intelligenceConfig`
+     * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of the `IntelligenceConfig` resource associated with
+     * your organization, folder, or project.
+     *
+     * The name format varies based on the GCP resource hierarchy as follows:
+     *
+     * * For project:
+     * `projects/{project_number}/locations/global/intelligenceConfig`
+     * * For organization:
+     * `organizations/{org_id}/locations/global/intelligenceConfig`
+     * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of the `IntelligenceConfig` resource associated with
+     * your organization, folder, or project.
+     *
+     * The name format varies based on the GCP resource hierarchy as follows:
+     *
+     * * For project:
+     * `projects/{project_number}/locations/global/intelligenceConfig`
+     * * For organization:
+     * `organizations/{org_id}/locations/global/intelligenceConfig`
+     * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of the `IntelligenceConfig` resource associated with
+     * your organization, folder, or project.
+     *
+     * The name format varies based on the GCP resource hierarchy as follows:
+     *
+     * * For project:
+     * `projects/{project_number}/locations/global/intelligenceConfig`
+     * * For organization:
+     * `organizations/{org_id}/locations/global/intelligenceConfig`
+     * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of the `IntelligenceConfig` resource associated with
+     * your organization, folder, or project.
+     *
+     * The name format varies based on the GCP resource hierarchy as follows:
+     *
+     * * For project:
+     * `projects/{project_number}/locations/global/intelligenceConfig`
+     * * For organization:
+     * `organizations/{org_id}/locations/global/intelligenceConfig`
+     * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int editionConfig_ = 0; + + /** + * + * + *
+     * Optional. The edition configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for editionConfig. + */ + @java.lang.Override + public int getEditionConfigValue() { + return editionConfig_; + } + + /** + * + * + *
+     * Optional. The edition configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for editionConfig to set. + * @return This builder for chaining. + */ + public Builder setEditionConfigValue(int value) { + editionConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The edition configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The editionConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig.EditionConfig getEditionConfig() { + com.google.storage.control.v2.IntelligenceConfig.EditionConfig result = + com.google.storage.control.v2.IntelligenceConfig.EditionConfig.forNumber(editionConfig_); + return result == null + ? com.google.storage.control.v2.IntelligenceConfig.EditionConfig.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. The edition configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The editionConfig to set. + * @return This builder for chaining. + */ + public Builder setEditionConfig( + com.google.storage.control.v2.IntelligenceConfig.EditionConfig value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + editionConfig_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The edition configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearEditionConfig() { + bitField0_ = (bitField0_ & ~0x00000002); + editionConfig_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The time at which the `IntelligenceConfig` resource is last
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.storage.control.v2.IntelligenceConfig.Filter filter_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter, + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder, + com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder> + filterBuilder_; + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the filter field is set. + */ + public boolean hasFilter() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The filter. + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter getFilter() { + if (filterBuilder_ == null) { + return filter_ == null + ? com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance() + : filter_; + } else { + return filterBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFilter(com.google.storage.control.v2.IntelligenceConfig.Filter value) { + if (filterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + } else { + filterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFilter( + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder builderForValue) { + if (filterBuilder_ == null) { + filter_ = builderForValue.build(); + } else { + filterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeFilter(com.google.storage.control.v2.IntelligenceConfig.Filter value) { + if (filterBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && filter_ != null + && filter_ + != com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance()) { + getFilterBuilder().mergeFrom(value); + } else { + filter_ = value; + } + } else { + filterBuilder_.mergeFrom(value); + } + if (filter_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFilter() { + bitField0_ = (bitField0_ & ~0x00000008); + filter_ = null; + if (filterBuilder_ != null) { + filterBuilder_.dispose(); + filterBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Filter.Builder getFilterBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetFilterFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder getFilterOrBuilder() { + if (filterBuilder_ != null) { + return filterBuilder_.getMessageOrBuilder(); + } else { + return filter_ == null + ? com.google.storage.control.v2.IntelligenceConfig.Filter.getDefaultInstance() + : filter_; + } + } + + /** + * + * + *
+     * Optional. Filter over location and bucket.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter, + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder, + com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder> + internalGetFilterFieldBuilder() { + if (filterBuilder_ == null) { + filterBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.Filter, + com.google.storage.control.v2.IntelligenceConfig.Filter.Builder, + com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder>( + getFilter(), getParentForChildren(), isClean()); + filter_ = null; + } + return filterBuilder_; + } + + private com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + effectiveIntelligenceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder> + effectiveIntelligenceConfigBuilder_; + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the effectiveIntelligenceConfig field is set. + */ + public boolean hasEffectiveIntelligenceConfig() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveIntelligenceConfig. + */ + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getEffectiveIntelligenceConfig() { + if (effectiveIntelligenceConfigBuilder_ == null) { + return effectiveIntelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance() + : effectiveIntelligenceConfig_; + } else { + return effectiveIntelligenceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEffectiveIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig value) { + if (effectiveIntelligenceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveIntelligenceConfig_ = value; + } else { + effectiveIntelligenceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEffectiveIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder + builderForValue) { + if (effectiveIntelligenceConfigBuilder_ == null) { + effectiveIntelligenceConfig_ = builderForValue.build(); + } else { + effectiveIntelligenceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEffectiveIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig value) { + if (effectiveIntelligenceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && effectiveIntelligenceConfig_ != null + && effectiveIntelligenceConfig_ + != com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance()) { + getEffectiveIntelligenceConfigBuilder().mergeFrom(value); + } else { + effectiveIntelligenceConfig_ = value; + } + } else { + effectiveIntelligenceConfigBuilder_.mergeFrom(value); + } + if (effectiveIntelligenceConfig_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEffectiveIntelligenceConfig() { + bitField0_ = (bitField0_ & ~0x00000010); + effectiveIntelligenceConfig_ = null; + if (effectiveIntelligenceConfigBuilder_ != null) { + effectiveIntelligenceConfigBuilder_.dispose(); + effectiveIntelligenceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder + getEffectiveIntelligenceConfigBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetEffectiveIntelligenceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder + getEffectiveIntelligenceConfigOrBuilder() { + if (effectiveIntelligenceConfigBuilder_ != null) { + return effectiveIntelligenceConfigBuilder_.getMessageOrBuilder(); + } else { + return effectiveIntelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .getDefaultInstance() + : effectiveIntelligenceConfig_; + } + } + + /** + * + * + *
+     * Output only. The `IntelligenceConfig` resource that is applicable for the
+     * resource.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder> + internalGetEffectiveIntelligenceConfigFieldBuilder() { + if (effectiveIntelligenceConfigBuilder_ == null) { + effectiveIntelligenceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + .Builder, + com.google.storage.control.v2.IntelligenceConfig + .EffectiveIntelligenceConfigOrBuilder>( + getEffectiveIntelligenceConfig(), getParentForChildren(), isClean()); + effectiveIntelligenceConfig_ = null; + } + return effectiveIntelligenceConfigBuilder_; + } + + private com.google.storage.control.v2.IntelligenceConfig.TrialConfig trialConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.TrialConfig, + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder, + com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder> + trialConfigBuilder_; + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return Whether the trialConfig field is set. + */ + public boolean hasTrialConfig() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return The trialConfig. + */ + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig getTrialConfig() { + if (trialConfigBuilder_ == null) { + return trialConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance() + : trialConfig_; + } else { + return trialConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public Builder setTrialConfig( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig value) { + if (trialConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + trialConfig_ = value; + } else { + trialConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public Builder setTrialConfig( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder builderForValue) { + if (trialConfigBuilder_ == null) { + trialConfig_ = builderForValue.build(); + } else { + trialConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public Builder mergeTrialConfig( + com.google.storage.control.v2.IntelligenceConfig.TrialConfig value) { + if (trialConfigBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && trialConfig_ != null + && trialConfig_ + != com.google.storage.control.v2.IntelligenceConfig.TrialConfig + .getDefaultInstance()) { + getTrialConfigBuilder().mergeFrom(value); + } else { + trialConfig_ = value; + } + } else { + trialConfigBuilder_.mergeFrom(value); + } + if (trialConfig_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public Builder clearTrialConfig() { + bitField0_ = (bitField0_ & ~0x00000020); + trialConfig_ = null; + if (trialConfigBuilder_ != null) { + trialConfigBuilder_.dispose(); + trialConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder + getTrialConfigBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetTrialConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + public com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder + getTrialConfigOrBuilder() { + if (trialConfigBuilder_ != null) { + return trialConfigBuilder_.getMessageOrBuilder(); + } else { + return trialConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.TrialConfig.getDefaultInstance() + : trialConfig_; + } + } + + /** + * + * + *
+     * The trial configuration of the `IntelligenceConfig` resource.
+     * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.TrialConfig, + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder, + com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder> + internalGetTrialConfigFieldBuilder() { + if (trialConfigBuilder_ == null) { + trialConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig.TrialConfig, + com.google.storage.control.v2.IntelligenceConfig.TrialConfig.Builder, + com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder>( + getTrialConfig(), getParentForChildren(), isClean()); + trialConfig_ = null; + } + return trialConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.IntelligenceConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.IntelligenceConfig) + private static final com.google.storage.control.v2.IntelligenceConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.IntelligenceConfig(); + } + + public static com.google.storage.control.v2.IntelligenceConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IntelligenceConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigName.java new file mode 100644 index 000000000000..24c1cef52d66 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigName.java @@ -0,0 +1,357 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.pathtemplate.ValidationException; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class IntelligenceConfigName implements ResourceName { + private static final PathTemplate FOLDER_LOCATION = + PathTemplate.createWithoutUrlEncoding( + "folders/{folder}/locations/{location}/intelligenceConfig"); + private static final PathTemplate ORG_LOCATION = + PathTemplate.createWithoutUrlEncoding( + "organizations/{org}/locations/{location}/intelligenceConfig"); + private static final PathTemplate PROJECT_LOCATION = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/intelligenceConfig"); + private volatile Map fieldValuesMap; + private PathTemplate pathTemplate; + private String fixedValue; + private final String folder; + private final String location; + private final String org; + private final String project; + + @Deprecated + protected IntelligenceConfigName() { + folder = null; + location = null; + org = null; + project = null; + } + + private IntelligenceConfigName(Builder builder) { + folder = Preconditions.checkNotNull(builder.getFolder()); + location = Preconditions.checkNotNull(builder.getLocation()); + org = null; + project = null; + pathTemplate = FOLDER_LOCATION; + } + + private IntelligenceConfigName(OrgLocationBuilder builder) { + org = Preconditions.checkNotNull(builder.getOrg()); + location = Preconditions.checkNotNull(builder.getLocation()); + folder = null; + project = null; + pathTemplate = ORG_LOCATION; + } + + private IntelligenceConfigName(ProjectLocationBuilder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + folder = null; + org = null; + pathTemplate = PROJECT_LOCATION; + } + + public String getFolder() { + return folder; + } + + public String getLocation() { + return location; + } + + public String getOrg() { + return org; + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static Builder newFolderLocationBuilder() { + return new Builder(); + } + + public static OrgLocationBuilder newOrgLocationBuilder() { + return new OrgLocationBuilder(); + } + + public static ProjectLocationBuilder newProjectLocationBuilder() { + return new ProjectLocationBuilder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static IntelligenceConfigName of(String folder, String location) { + return newBuilder().setFolder(folder).setLocation(location).build(); + } + + public static IntelligenceConfigName ofFolderLocationName(String folder, String location) { + return newBuilder().setFolder(folder).setLocation(location).build(); + } + + public static IntelligenceConfigName ofOrgLocationName(String org, String location) { + return newOrgLocationBuilder().setOrg(org).setLocation(location).build(); + } + + public static IntelligenceConfigName ofProjectLocationName(String project, String location) { + return newProjectLocationBuilder().setProject(project).setLocation(location).build(); + } + + public static String format(String folder, String location) { + return newBuilder().setFolder(folder).setLocation(location).build().toString(); + } + + public static String formatFolderLocationName(String folder, String location) { + return newBuilder().setFolder(folder).setLocation(location).build().toString(); + } + + public static String formatOrgLocationName(String org, String location) { + return newOrgLocationBuilder().setOrg(org).setLocation(location).build().toString(); + } + + public static String formatProjectLocationName(String project, String location) { + return newProjectLocationBuilder().setProject(project).setLocation(location).build().toString(); + } + + public static IntelligenceConfigName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + if (FOLDER_LOCATION.matches(formattedString)) { + Map matchMap = FOLDER_LOCATION.match(formattedString); + return ofFolderLocationName(matchMap.get("folder"), matchMap.get("location")); + } else if (ORG_LOCATION.matches(formattedString)) { + Map matchMap = ORG_LOCATION.match(formattedString); + return ofOrgLocationName(matchMap.get("org"), matchMap.get("location")); + } else if (PROJECT_LOCATION.matches(formattedString)) { + Map matchMap = PROJECT_LOCATION.match(formattedString); + return ofProjectLocationName(matchMap.get("project"), matchMap.get("location")); + } + throw new ValidationException( + "IntelligenceConfigName.parse: formattedString not in valid format"); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (IntelligenceConfigName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return FOLDER_LOCATION.matches(formattedString) + || ORG_LOCATION.matches(formattedString) + || PROJECT_LOCATION.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (folder != null) { + fieldMapBuilder.put("folder", folder); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (org != null) { + fieldMapBuilder.put("org", org); + } + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return fixedValue != null ? fixedValue : pathTemplate.instantiate(getFieldValuesMap()); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + IntelligenceConfigName that = ((IntelligenceConfigName) o); + return Objects.equals(this.folder, that.folder) + && Objects.equals(this.location, that.location) + && Objects.equals(this.org, that.org) + && Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(fixedValue); + h *= 1000003; + h ^= Objects.hashCode(folder); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(org); + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for folders/{folder}/locations/{location}/intelligenceConfig. */ + public static class Builder { + private String folder; + private String location; + + protected Builder() {} + + public String getFolder() { + return folder; + } + + public String getLocation() { + return location; + } + + public Builder setFolder(String folder) { + this.folder = folder; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + private Builder(IntelligenceConfigName intelligenceConfigName) { + Preconditions.checkArgument( + Objects.equals(intelligenceConfigName.pathTemplate, FOLDER_LOCATION), + "toBuilder is only supported when IntelligenceConfigName has the pattern of" + + " folders/{folder}/locations/{location}/intelligenceConfig"); + this.folder = intelligenceConfigName.folder; + this.location = intelligenceConfigName.location; + } + + public IntelligenceConfigName build() { + return new IntelligenceConfigName(this); + } + } + + /** Builder for organizations/{org}/locations/{location}/intelligenceConfig. */ + public static class OrgLocationBuilder { + private String org; + private String location; + + protected OrgLocationBuilder() {} + + public String getOrg() { + return org; + } + + public String getLocation() { + return location; + } + + public OrgLocationBuilder setOrg(String org) { + this.org = org; + return this; + } + + public OrgLocationBuilder setLocation(String location) { + this.location = location; + return this; + } + + public IntelligenceConfigName build() { + return new IntelligenceConfigName(this); + } + } + + /** Builder for projects/{project}/locations/{location}/intelligenceConfig. */ + public static class ProjectLocationBuilder { + private String project; + private String location; + + protected ProjectLocationBuilder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public ProjectLocationBuilder setProject(String project) { + this.project = project; + return this; + } + + public ProjectLocationBuilder setLocation(String location) { + this.location = location; + return this; + } + + public IntelligenceConfigName build() { + return new IntelligenceConfigName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigOrBuilder.java new file mode 100644 index 000000000000..495184916351 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/IntelligenceConfigOrBuilder.java @@ -0,0 +1,273 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface IntelligenceConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.IntelligenceConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Identifier. The name of the `IntelligenceConfig` resource associated with
+   * your organization, folder, or project.
+   *
+   * The name format varies based on the GCP resource hierarchy as follows:
+   *
+   * * For project:
+   * `projects/{project_number}/locations/global/intelligenceConfig`
+   * * For organization:
+   * `organizations/{org_id}/locations/global/intelligenceConfig`
+   * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Identifier. The name of the `IntelligenceConfig` resource associated with
+   * your organization, folder, or project.
+   *
+   * The name format varies based on the GCP resource hierarchy as follows:
+   *
+   * * For project:
+   * `projects/{project_number}/locations/global/intelligenceConfig`
+   * * For organization:
+   * `organizations/{org_id}/locations/global/intelligenceConfig`
+   * * For folder: `folders/{folder_id}/locations/global/intelligenceConfig`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. The edition configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for editionConfig. + */ + int getEditionConfigValue(); + + /** + * + * + *
+   * Optional. The edition configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EditionConfig edition_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The editionConfig. + */ + com.google.storage.control.v2.IntelligenceConfig.EditionConfig getEditionConfig(); + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The time at which the `IntelligenceConfig` resource is last
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the filter field is set. + */ + boolean hasFilter(); + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The filter. + */ + com.google.storage.control.v2.IntelligenceConfig.Filter getFilter(); + + /** + * + * + *
+   * Optional. Filter over location and bucket.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.Filter filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.control.v2.IntelligenceConfig.FilterOrBuilder getFilterOrBuilder(); + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the effectiveIntelligenceConfig field is set. + */ + boolean hasEffectiveIntelligenceConfig(); + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The effectiveIntelligenceConfig. + */ + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig + getEffectiveIntelligenceConfig(); + + /** + * + * + *
+   * Output only. The `IntelligenceConfig` resource that is applicable for the
+   * resource.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfig effective_intelligence_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.storage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigOrBuilder + getEffectiveIntelligenceConfigOrBuilder(); + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return Whether the trialConfig field is set. + */ + boolean hasTrialConfig(); + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + * + * @return The trialConfig. + */ + com.google.storage.control.v2.IntelligenceConfig.TrialConfig getTrialConfig(); + + /** + * + * + *
+   * The trial configuration of the `IntelligenceConfig` resource.
+   * 
+ * + * .google.storage.control.v2.IntelligenceConfig.TrialConfig trial_config = 7; + */ + com.google.storage.control.v2.IntelligenceConfig.TrialConfigOrBuilder getTrialConfigOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequest.java new file mode 100644 index 000000000000..8c4dfa173984 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequest.java @@ -0,0 +1,1118 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for ListAnywhereCaches.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListAnywhereCachesRequest} + */ +@com.google.protobuf.Generated +public final class ListAnywhereCachesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListAnywhereCachesRequest) + ListAnywhereCachesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListAnywhereCachesRequest"); + } + + // Use ListAnywhereCachesRequest.newBuilder() to construct. + private ListAnywhereCachesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListAnywhereCachesRequest() { + parent_ = ""; + pageToken_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListAnywhereCachesRequest.class, + com.google.storage.control.v2.ListAnywhereCachesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Maximum number of caches to return in a single response.
+   * The service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * A previously-returned page token representing part of the larger set of
+   * results to view.
+   * 
+ * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * A previously-returned page token representing part of the larger set of
+   * results to view.
+   * 
+ * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListAnywhereCachesRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListAnywhereCachesRequest other = + (com.google.storage.control.v2.ListAnywhereCachesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.ListAnywhereCachesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListAnywhereCaches.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListAnywhereCachesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListAnywhereCachesRequest) + com.google.storage.control.v2.ListAnywhereCachesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListAnywhereCachesRequest.class, + com.google.storage.control.v2.ListAnywhereCachesRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListAnywhereCachesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.ListAnywhereCachesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesRequest build() { + com.google.storage.control.v2.ListAnywhereCachesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesRequest buildPartial() { + com.google.storage.control.v2.ListAnywhereCachesRequest result = + new com.google.storage.control.v2.ListAnywhereCachesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.ListAnywhereCachesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListAnywhereCachesRequest) { + return mergeFrom((com.google.storage.control.v2.ListAnywhereCachesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListAnywhereCachesRequest other) { + if (other == com.google.storage.control.v2.ListAnywhereCachesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to which this cache belongs.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Maximum number of caches to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Maximum number of caches to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Maximum number of caches to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * A previously-returned page token representing part of the larger set of
+     * results to view.
+     * 
+ * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * A previously-returned page token representing part of the larger set of
+     * results to view.
+     * 
+ * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * A previously-returned page token representing part of the larger set of
+     * results to view.
+     * 
+ * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * A previously-returned page token representing part of the larger set of
+     * results to view.
+     * 
+ * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * A previously-returned page token representing part of the larger set of
+     * results to view.
+     * 
+ * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListAnywhereCachesRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListAnywhereCachesRequest) + private static final com.google.storage.control.v2.ListAnywhereCachesRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListAnywhereCachesRequest(); + } + + public static com.google.storage.control.v2.ListAnywhereCachesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListAnywhereCachesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequestOrBuilder.java new file mode 100644 index 000000000000..7fd2c82f330f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesRequestOrBuilder.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListAnywhereCachesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListAnywhereCachesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The bucket to which this cache belongs.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Maximum number of caches to return in a single response.
+   * The service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * A previously-returned page token representing part of the larger set of
+   * results to view.
+   * 
+ * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * A previously-returned page token representing part of the larger set of
+   * results to view.
+   * 
+ * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponse.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponse.java new file mode 100644 index 000000000000..846d35e16f0b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponse.java @@ -0,0 +1,1126 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Response message for ListAnywhereCaches.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListAnywhereCachesResponse} + */ +@com.google.protobuf.Generated +public final class ListAnywhereCachesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListAnywhereCachesResponse) + ListAnywhereCachesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListAnywhereCachesResponse"); + } + + // Use ListAnywhereCachesResponse.newBuilder() to construct. + private ListAnywhereCachesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListAnywhereCachesResponse() { + anywhereCaches_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListAnywhereCachesResponse.class, + com.google.storage.control.v2.ListAnywhereCachesResponse.Builder.class); + } + + public static final int ANYWHERE_CACHES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List anywhereCaches_; + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + @java.lang.Override + public java.util.List getAnywhereCachesList() { + return anywhereCaches_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + @java.lang.Override + public java.util.List + getAnywhereCachesOrBuilderList() { + return anywhereCaches_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + @java.lang.Override + public int getAnywhereCachesCount() { + return anywhereCaches_.size(); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache getAnywhereCaches(int index) { + return anywhereCaches_.get(index); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCachesOrBuilder( + int index) { + return anywhereCaches_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < anywhereCaches_.size(); i++) { + output.writeMessage(1, anywhereCaches_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < anywhereCaches_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, anywhereCaches_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListAnywhereCachesResponse)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListAnywhereCachesResponse other = + (com.google.storage.control.v2.ListAnywhereCachesResponse) obj; + + if (!getAnywhereCachesList().equals(other.getAnywhereCachesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getAnywhereCachesCount() > 0) { + hash = (37 * hash) + ANYWHERE_CACHES_FIELD_NUMBER; + hash = (53 * hash) + getAnywhereCachesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.ListAnywhereCachesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListAnywhereCaches.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListAnywhereCachesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListAnywhereCachesResponse) + com.google.storage.control.v2.ListAnywhereCachesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListAnywhereCachesResponse.class, + com.google.storage.control.v2.ListAnywhereCachesResponse.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListAnywhereCachesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (anywhereCachesBuilder_ == null) { + anywhereCaches_ = java.util.Collections.emptyList(); + } else { + anywhereCaches_ = null; + anywhereCachesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesResponse getDefaultInstanceForType() { + return com.google.storage.control.v2.ListAnywhereCachesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesResponse build() { + com.google.storage.control.v2.ListAnywhereCachesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesResponse buildPartial() { + com.google.storage.control.v2.ListAnywhereCachesResponse result = + new com.google.storage.control.v2.ListAnywhereCachesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.storage.control.v2.ListAnywhereCachesResponse result) { + if (anywhereCachesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + anywhereCaches_ = java.util.Collections.unmodifiableList(anywhereCaches_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.anywhereCaches_ = anywhereCaches_; + } else { + result.anywhereCaches_ = anywhereCachesBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.control.v2.ListAnywhereCachesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListAnywhereCachesResponse) { + return mergeFrom((com.google.storage.control.v2.ListAnywhereCachesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListAnywhereCachesResponse other) { + if (other == com.google.storage.control.v2.ListAnywhereCachesResponse.getDefaultInstance()) + return this; + if (anywhereCachesBuilder_ == null) { + if (!other.anywhereCaches_.isEmpty()) { + if (anywhereCaches_.isEmpty()) { + anywhereCaches_ = other.anywhereCaches_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.addAll(other.anywhereCaches_); + } + onChanged(); + } + } else { + if (!other.anywhereCaches_.isEmpty()) { + if (anywhereCachesBuilder_.isEmpty()) { + anywhereCachesBuilder_.dispose(); + anywhereCachesBuilder_ = null; + anywhereCaches_ = other.anywhereCaches_; + bitField0_ = (bitField0_ & ~0x00000001); + anywhereCachesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetAnywhereCachesFieldBuilder() + : null; + } else { + anywhereCachesBuilder_.addAllMessages(other.anywhereCaches_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.control.v2.AnywhereCache m = + input.readMessage( + com.google.storage.control.v2.AnywhereCache.parser(), extensionRegistry); + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.add(m); + } else { + anywhereCachesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List anywhereCaches_ = + java.util.Collections.emptyList(); + + private void ensureAnywhereCachesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + anywhereCaches_ = + new java.util.ArrayList(anywhereCaches_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + anywhereCachesBuilder_; + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public java.util.List getAnywhereCachesList() { + if (anywhereCachesBuilder_ == null) { + return java.util.Collections.unmodifiableList(anywhereCaches_); + } else { + return anywhereCachesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public int getAnywhereCachesCount() { + if (anywhereCachesBuilder_ == null) { + return anywhereCaches_.size(); + } else { + return anywhereCachesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public com.google.storage.control.v2.AnywhereCache getAnywhereCaches(int index) { + if (anywhereCachesBuilder_ == null) { + return anywhereCaches_.get(index); + } else { + return anywhereCachesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder setAnywhereCaches(int index, com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCachesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAnywhereCachesIsMutable(); + anywhereCaches_.set(index, value); + onChanged(); + } else { + anywhereCachesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder setAnywhereCaches( + int index, com.google.storage.control.v2.AnywhereCache.Builder builderForValue) { + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.set(index, builderForValue.build()); + onChanged(); + } else { + anywhereCachesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder addAnywhereCaches(com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCachesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAnywhereCachesIsMutable(); + anywhereCaches_.add(value); + onChanged(); + } else { + anywhereCachesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder addAnywhereCaches(int index, com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCachesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAnywhereCachesIsMutable(); + anywhereCaches_.add(index, value); + onChanged(); + } else { + anywhereCachesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder addAnywhereCaches( + com.google.storage.control.v2.AnywhereCache.Builder builderForValue) { + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.add(builderForValue.build()); + onChanged(); + } else { + anywhereCachesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder addAnywhereCaches( + int index, com.google.storage.control.v2.AnywhereCache.Builder builderForValue) { + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.add(index, builderForValue.build()); + onChanged(); + } else { + anywhereCachesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder addAllAnywhereCaches( + java.lang.Iterable values) { + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, anywhereCaches_); + onChanged(); + } else { + anywhereCachesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder clearAnywhereCaches() { + if (anywhereCachesBuilder_ == null) { + anywhereCaches_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + anywhereCachesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public Builder removeAnywhereCaches(int index) { + if (anywhereCachesBuilder_ == null) { + ensureAnywhereCachesIsMutable(); + anywhereCaches_.remove(index); + onChanged(); + } else { + anywhereCachesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public com.google.storage.control.v2.AnywhereCache.Builder getAnywhereCachesBuilder(int index) { + return internalGetAnywhereCachesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCachesOrBuilder( + int index) { + if (anywhereCachesBuilder_ == null) { + return anywhereCaches_.get(index); + } else { + return anywhereCachesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public java.util.List + getAnywhereCachesOrBuilderList() { + if (anywhereCachesBuilder_ != null) { + return anywhereCachesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(anywhereCaches_); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public com.google.storage.control.v2.AnywhereCache.Builder addAnywhereCachesBuilder() { + return internalGetAnywhereCachesFieldBuilder() + .addBuilder(com.google.storage.control.v2.AnywhereCache.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public com.google.storage.control.v2.AnywhereCache.Builder addAnywhereCachesBuilder(int index) { + return internalGetAnywhereCachesFieldBuilder() + .addBuilder(index, com.google.storage.control.v2.AnywhereCache.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + public java.util.List + getAnywhereCachesBuilderList() { + return internalGetAnywhereCachesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + internalGetAnywhereCachesFieldBuilder() { + if (anywhereCachesBuilder_ == null) { + anywhereCachesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder>( + anywhereCaches_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + anywhereCaches_ = null; + } + return anywhereCachesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListAnywhereCachesResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListAnywhereCachesResponse) + private static final com.google.storage.control.v2.ListAnywhereCachesResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListAnywhereCachesResponse(); + } + + public static com.google.storage.control.v2.ListAnywhereCachesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListAnywhereCachesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListAnywhereCachesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponseOrBuilder.java new file mode 100644 index 000000000000..e1267c530256 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListAnywhereCachesResponseOrBuilder.java @@ -0,0 +1,112 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListAnywhereCachesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListAnywhereCachesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + java.util.List getAnywhereCachesList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + com.google.storage.control.v2.AnywhereCache getAnywhereCaches(int index); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + int getAnywhereCachesCount(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + java.util.List + getAnywhereCachesOrBuilderList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.control.v2.AnywhereCache anywhere_caches = 1; + */ + com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCachesOrBuilder(int index); + + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequest.java new file mode 100644 index 000000000000..e9aec24043f1 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequest.java @@ -0,0 +1,1945 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for ListFolders. This operation is only applicable to a
+ * hierarchical namespace enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListFoldersRequest} + */ +@com.google.protobuf.Generated +public final class ListFoldersRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListFoldersRequest) + ListFoldersRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListFoldersRequest"); + } + + // Use ListFoldersRequest.newBuilder() to construct. + private ListFoldersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListFoldersRequest() { + parent_ = ""; + pageToken_ = ""; + prefix_ = ""; + delimiter_ = ""; + lexicographicStart_ = ""; + lexicographicEnd_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListFoldersRequest.class, + com.google.storage.control.v2.ListFoldersRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which to look for folders. The bucket must
+   * be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which to look for folders. The bucket must
+   * be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Maximum number of folders to return in a single response. The
+   * service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREFIX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object prefix_ = ""; + + /** + * + * + *
+   * Optional. Filter results to folders whose names begin with this prefix.
+   * If set, the value must either be an empty string or end with a '/'.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + @java.lang.Override + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to folders whose names begin with this prefix.
+   * If set, the value must either be an empty string or end with a '/'.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DELIMITER_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object delimiter_ = ""; + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. The results
+   * will only include folders that either exactly match the above prefix, or
+   * are one level below the prefix. The only supported value is '/'.
+   * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + @java.lang.Override + public java.lang.String getDelimiter() { + java.lang.Object ref = delimiter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + delimiter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. The results
+   * will only include folders that either exactly match the above prefix, or
+   * are one level below the prefix. The only supported value is '/'.
+   * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDelimiterBytes() { + java.lang.Object ref = delimiter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + delimiter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LEXICOGRAPHIC_START_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object lexicographicStart_ = ""; + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically equal
+   * to or after lexicographic_start. If lexicographic_end is also set, the
+   * folders listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + @java.lang.Override + public java.lang.String getLexicographicStart() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicStart_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically equal
+   * to or after lexicographic_start. If lexicographic_end is also set, the
+   * folders listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLexicographicStartBytes() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicStart_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LEXICOGRAPHIC_END_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object lexicographicEnd_ = ""; + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically
+   * before lexicographic_end. If lexicographic_start is also set, the folders
+   * listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + @java.lang.Override + public java.lang.String getLexicographicEnd() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicEnd_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically
+   * before lexicographic_end. If lexicographic_start is also set, the folders
+   * listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLexicographicEndBytes() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicEnd_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicStart_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, lexicographicStart_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicEnd_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, lexicographicEnd_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(delimiter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, delimiter_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicStart_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, lexicographicStart_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicEnd_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, lexicographicEnd_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(delimiter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, delimiter_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListFoldersRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListFoldersRequest other = + (com.google.storage.control.v2.ListFoldersRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getPrefix().equals(other.getPrefix())) return false; + if (!getDelimiter().equals(other.getDelimiter())) return false; + if (!getLexicographicStart().equals(other.getLexicographicStart())) return false; + if (!getLexicographicEnd().equals(other.getLexicographicEnd())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getPrefix().hashCode(); + hash = (37 * hash) + DELIMITER_FIELD_NUMBER; + hash = (53 * hash) + getDelimiter().hashCode(); + hash = (37 * hash) + LEXICOGRAPHIC_START_FIELD_NUMBER; + hash = (53 * hash) + getLexicographicStart().hashCode(); + hash = (37 * hash) + LEXICOGRAPHIC_END_FIELD_NUMBER; + hash = (53 * hash) + getLexicographicEnd().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.ListFoldersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListFolders. This operation is only applicable to a
+   * hierarchical namespace enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListFoldersRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListFoldersRequest) + com.google.storage.control.v2.ListFoldersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListFoldersRequest.class, + com.google.storage.control.v2.ListFoldersRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListFoldersRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + prefix_ = ""; + delimiter_ = ""; + lexicographicStart_ = ""; + lexicographicEnd_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.ListFoldersRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersRequest build() { + com.google.storage.control.v2.ListFoldersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersRequest buildPartial() { + com.google.storage.control.v2.ListFoldersRequest result = + new com.google.storage.control.v2.ListFoldersRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.ListFoldersRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.prefix_ = prefix_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.delimiter_ = delimiter_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.lexicographicStart_ = lexicographicStart_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.lexicographicEnd_ = lexicographicEnd_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListFoldersRequest) { + return mergeFrom((com.google.storage.control.v2.ListFoldersRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListFoldersRequest other) { + if (other == com.google.storage.control.v2.ListFoldersRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getPrefix().isEmpty()) { + prefix_ = other.prefix_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getDelimiter().isEmpty()) { + delimiter_ = other.delimiter_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (!other.getLexicographicStart().isEmpty()) { + lexicographicStart_ = other.lexicographicStart_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (!other.getLexicographicEnd().isEmpty()) { + lexicographicEnd_ = other.lexicographicEnd_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000080; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + prefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 50: + { + lexicographicStart_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + lexicographicEnd_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 66: + { + delimiter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 66 + case 74: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which to look for folders. The bucket must
+     * be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for folders. The bucket must
+     * be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for folders. The bucket must
+     * be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for folders. The bucket must
+     * be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for folders. The bucket must
+     * be a hierarchical namespace enabled bucket.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Maximum number of folders to return in a single response. The
+     * service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Maximum number of folders to return in a single response. The
+     * service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Maximum number of folders to return in a single response. The
+     * service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object prefix_ = ""; + + /** + * + * + *
+     * Optional. Filter results to folders whose names begin with this prefix.
+     * If set, the value must either be an empty string or end with a '/'.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names begin with this prefix.
+     * If set, the value must either be an empty string or end with a '/'.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names begin with this prefix.
+     * If set, the value must either be an empty string or end with a '/'.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names begin with this prefix.
+     * If set, the value must either be an empty string or end with a '/'.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrefix() { + prefix_ = getDefaultInstance().getPrefix(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names begin with this prefix.
+     * If set, the value must either be an empty string or end with a '/'.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object delimiter_ = ""; + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. The results
+     * will only include folders that either exactly match the above prefix, or
+     * are one level below the prefix. The only supported value is '/'.
+     * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + public java.lang.String getDelimiter() { + java.lang.Object ref = delimiter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + delimiter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. The results
+     * will only include folders that either exactly match the above prefix, or
+     * are one level below the prefix. The only supported value is '/'.
+     * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + public com.google.protobuf.ByteString getDelimiterBytes() { + java.lang.Object ref = delimiter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + delimiter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. The results
+     * will only include folders that either exactly match the above prefix, or
+     * are one level below the prefix. The only supported value is '/'.
+     * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The delimiter to set. + * @return This builder for chaining. + */ + public Builder setDelimiter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + delimiter_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. The results
+     * will only include folders that either exactly match the above prefix, or
+     * are one level below the prefix. The only supported value is '/'.
+     * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDelimiter() { + delimiter_ = getDefaultInstance().getDelimiter(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. The results
+     * will only include folders that either exactly match the above prefix, or
+     * are one level below the prefix. The only supported value is '/'.
+     * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for delimiter to set. + * @return This builder for chaining. + */ + public Builder setDelimiterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + delimiter_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.lang.Object lexicographicStart_ = ""; + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically equal
+     * to or after lexicographic_start. If lexicographic_end is also set, the
+     * folders listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + public java.lang.String getLexicographicStart() { + java.lang.Object ref = lexicographicStart_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicStart_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically equal
+     * to or after lexicographic_start. If lexicographic_end is also set, the
+     * folders listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + public com.google.protobuf.ByteString getLexicographicStartBytes() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicStart_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically equal
+     * to or after lexicographic_start. If lexicographic_end is also set, the
+     * folders listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lexicographicStart to set. + * @return This builder for chaining. + */ + public Builder setLexicographicStart(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + lexicographicStart_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically equal
+     * to or after lexicographic_start. If lexicographic_end is also set, the
+     * folders listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLexicographicStart() { + lexicographicStart_ = getDefaultInstance().getLexicographicStart(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically equal
+     * to or after lexicographic_start. If lexicographic_end is also set, the
+     * folders listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for lexicographicStart to set. + * @return This builder for chaining. + */ + public Builder setLexicographicStartBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + lexicographicStart_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private java.lang.Object lexicographicEnd_ = ""; + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically
+     * before lexicographic_end. If lexicographic_start is also set, the folders
+     * listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + public java.lang.String getLexicographicEnd() { + java.lang.Object ref = lexicographicEnd_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicEnd_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically
+     * before lexicographic_end. If lexicographic_start is also set, the folders
+     * listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + public com.google.protobuf.ByteString getLexicographicEndBytes() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicEnd_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically
+     * before lexicographic_end. If lexicographic_start is also set, the folders
+     * listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lexicographicEnd to set. + * @return This builder for chaining. + */ + public Builder setLexicographicEnd(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + lexicographicEnd_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically
+     * before lexicographic_end. If lexicographic_start is also set, the folders
+     * listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLexicographicEnd() { + lexicographicEnd_ = getDefaultInstance().getLexicographicEnd(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to folders whose names are lexicographically
+     * before lexicographic_end. If lexicographic_start is also set, the folders
+     * listed have names between lexicographic_start (inclusive) and
+     * lexicographic_end (exclusive).
+     * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for lexicographicEnd to set. + * @return This builder for chaining. + */ + public Builder setLexicographicEndBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + lexicographicEnd_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListFoldersRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListFoldersRequest) + private static final com.google.storage.control.v2.ListFoldersRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListFoldersRequest(); + } + + public static com.google.storage.control.v2.ListFoldersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListFoldersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequestOrBuilder.java new file mode 100644 index 000000000000..19e8525a59a6 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersRequestOrBuilder.java @@ -0,0 +1,256 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListFoldersRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListFoldersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which to look for folders. The bucket must
+   * be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Name of the bucket in which to look for folders. The bucket must
+   * be a hierarchical namespace enabled bucket.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Maximum number of folders to return in a single response. The
+   * service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names begin with this prefix.
+   * If set, the value must either be an empty string or end with a '/'.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + java.lang.String getPrefix(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names begin with this prefix.
+   * If set, the value must either be an empty string or end with a '/'.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + com.google.protobuf.ByteString getPrefixBytes(); + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. The results
+   * will only include folders that either exactly match the above prefix, or
+   * are one level below the prefix. The only supported value is '/'.
+   * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + java.lang.String getDelimiter(); + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. The results
+   * will only include folders that either exactly match the above prefix, or
+   * are one level below the prefix. The only supported value is '/'.
+   * 
+ * + * string delimiter = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + com.google.protobuf.ByteString getDelimiterBytes(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically equal
+   * to or after lexicographic_start. If lexicographic_end is also set, the
+   * folders listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + java.lang.String getLexicographicStart(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically equal
+   * to or after lexicographic_start. If lexicographic_end is also set, the
+   * folders listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_start = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + com.google.protobuf.ByteString getLexicographicStartBytes(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically
+   * before lexicographic_end. If lexicographic_start is also set, the folders
+   * listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + java.lang.String getLexicographicEnd(); + + /** + * + * + *
+   * Optional. Filter results to folders whose names are lexicographically
+   * before lexicographic_end. If lexicographic_start is also set, the folders
+   * listed have names between lexicographic_start (inclusive) and
+   * lexicographic_end (exclusive).
+   * 
+ * + * string lexicographic_end = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + com.google.protobuf.ByteString getLexicographicEndBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 9 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponse.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponse.java new file mode 100644 index 000000000000..eb0121d7d1e7 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponse.java @@ -0,0 +1,1117 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Response message for ListFolders.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListFoldersResponse} + */ +@com.google.protobuf.Generated +public final class ListFoldersResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListFoldersResponse) + ListFoldersResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListFoldersResponse"); + } + + // Use ListFoldersResponse.newBuilder() to construct. + private ListFoldersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListFoldersResponse() { + folders_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListFoldersResponse.class, + com.google.storage.control.v2.ListFoldersResponse.Builder.class); + } + + public static final int FOLDERS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List folders_; + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + @java.lang.Override + public java.util.List getFoldersList() { + return folders_; + } + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + @java.lang.Override + public java.util.List + getFoldersOrBuilderList() { + return folders_; + } + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + @java.lang.Override + public int getFoldersCount() { + return folders_.size(); + } + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.Folder getFolders(int index) { + return folders_.get(index); + } + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.FolderOrBuilder getFoldersOrBuilder(int index) { + return folders_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < folders_.size(); i++) { + output.writeMessage(1, folders_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < folders_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, folders_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListFoldersResponse)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListFoldersResponse other = + (com.google.storage.control.v2.ListFoldersResponse) obj; + + if (!getFoldersList().equals(other.getFoldersList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFoldersCount() > 0) { + hash = (37 * hash) + FOLDERS_FIELD_NUMBER; + hash = (53 * hash) + getFoldersList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListFoldersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.ListFoldersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListFolders.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListFoldersResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListFoldersResponse) + com.google.storage.control.v2.ListFoldersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListFoldersResponse.class, + com.google.storage.control.v2.ListFoldersResponse.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListFoldersResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (foldersBuilder_ == null) { + folders_ = java.util.Collections.emptyList(); + } else { + folders_ = null; + foldersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListFoldersResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersResponse getDefaultInstanceForType() { + return com.google.storage.control.v2.ListFoldersResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersResponse build() { + com.google.storage.control.v2.ListFoldersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersResponse buildPartial() { + com.google.storage.control.v2.ListFoldersResponse result = + new com.google.storage.control.v2.ListFoldersResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.storage.control.v2.ListFoldersResponse result) { + if (foldersBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + folders_ = java.util.Collections.unmodifiableList(folders_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.folders_ = folders_; + } else { + result.folders_ = foldersBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.control.v2.ListFoldersResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListFoldersResponse) { + return mergeFrom((com.google.storage.control.v2.ListFoldersResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListFoldersResponse other) { + if (other == com.google.storage.control.v2.ListFoldersResponse.getDefaultInstance()) + return this; + if (foldersBuilder_ == null) { + if (!other.folders_.isEmpty()) { + if (folders_.isEmpty()) { + folders_ = other.folders_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFoldersIsMutable(); + folders_.addAll(other.folders_); + } + onChanged(); + } + } else { + if (!other.folders_.isEmpty()) { + if (foldersBuilder_.isEmpty()) { + foldersBuilder_.dispose(); + foldersBuilder_ = null; + folders_ = other.folders_; + bitField0_ = (bitField0_ & ~0x00000001); + foldersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetFoldersFieldBuilder() + : null; + } else { + foldersBuilder_.addAllMessages(other.folders_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.control.v2.Folder m = + input.readMessage( + com.google.storage.control.v2.Folder.parser(), extensionRegistry); + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + folders_.add(m); + } else { + foldersBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List folders_ = + java.util.Collections.emptyList(); + + private void ensureFoldersIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + folders_ = new java.util.ArrayList(folders_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder> + foldersBuilder_; + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public java.util.List getFoldersList() { + if (foldersBuilder_ == null) { + return java.util.Collections.unmodifiableList(folders_); + } else { + return foldersBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public int getFoldersCount() { + if (foldersBuilder_ == null) { + return folders_.size(); + } else { + return foldersBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public com.google.storage.control.v2.Folder getFolders(int index) { + if (foldersBuilder_ == null) { + return folders_.get(index); + } else { + return foldersBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder setFolders(int index, com.google.storage.control.v2.Folder value) { + if (foldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFoldersIsMutable(); + folders_.set(index, value); + onChanged(); + } else { + foldersBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder setFolders( + int index, com.google.storage.control.v2.Folder.Builder builderForValue) { + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + folders_.set(index, builderForValue.build()); + onChanged(); + } else { + foldersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder addFolders(com.google.storage.control.v2.Folder value) { + if (foldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFoldersIsMutable(); + folders_.add(value); + onChanged(); + } else { + foldersBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder addFolders(int index, com.google.storage.control.v2.Folder value) { + if (foldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFoldersIsMutable(); + folders_.add(index, value); + onChanged(); + } else { + foldersBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder addFolders(com.google.storage.control.v2.Folder.Builder builderForValue) { + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + folders_.add(builderForValue.build()); + onChanged(); + } else { + foldersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder addFolders( + int index, com.google.storage.control.v2.Folder.Builder builderForValue) { + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + folders_.add(index, builderForValue.build()); + onChanged(); + } else { + foldersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder addAllFolders( + java.lang.Iterable values) { + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, folders_); + onChanged(); + } else { + foldersBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder clearFolders() { + if (foldersBuilder_ == null) { + folders_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + foldersBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public Builder removeFolders(int index) { + if (foldersBuilder_ == null) { + ensureFoldersIsMutable(); + folders_.remove(index); + onChanged(); + } else { + foldersBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public com.google.storage.control.v2.Folder.Builder getFoldersBuilder(int index) { + return internalGetFoldersFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public com.google.storage.control.v2.FolderOrBuilder getFoldersOrBuilder(int index) { + if (foldersBuilder_ == null) { + return folders_.get(index); + } else { + return foldersBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public java.util.List + getFoldersOrBuilderList() { + if (foldersBuilder_ != null) { + return foldersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(folders_); + } + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public com.google.storage.control.v2.Folder.Builder addFoldersBuilder() { + return internalGetFoldersFieldBuilder() + .addBuilder(com.google.storage.control.v2.Folder.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public com.google.storage.control.v2.Folder.Builder addFoldersBuilder(int index) { + return internalGetFoldersFieldBuilder() + .addBuilder(index, com.google.storage.control.v2.Folder.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of child folders
+     * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + public java.util.List getFoldersBuilderList() { + return internalGetFoldersFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder> + internalGetFoldersFieldBuilder() { + if (foldersBuilder_ == null) { + foldersBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.Folder, + com.google.storage.control.v2.Folder.Builder, + com.google.storage.control.v2.FolderOrBuilder>( + folders_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + folders_ = null; + } + return foldersBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListFoldersResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListFoldersResponse) + private static final com.google.storage.control.v2.ListFoldersResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListFoldersResponse(); + } + + public static com.google.storage.control.v2.ListFoldersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListFoldersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListFoldersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponseOrBuilder.java new file mode 100644 index 000000000000..7b9f0964db71 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListFoldersResponseOrBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListFoldersResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListFoldersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + java.util.List getFoldersList(); + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + com.google.storage.control.v2.Folder getFolders(int index); + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + int getFoldersCount(); + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + java.util.List getFoldersOrBuilderList(); + + /** + * + * + *
+   * The list of child folders
+   * 
+ * + * repeated .google.storage.control.v2.Folder folders = 1; + */ + com.google.storage.control.v2.FolderOrBuilder getFoldersOrBuilder(int index); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequest.java new file mode 100644 index 000000000000..6d0fe1414ae5 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequest.java @@ -0,0 +1,1314 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for ListManagedFolders.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListManagedFoldersRequest} + */ +@com.google.protobuf.Generated +public final class ListManagedFoldersRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListManagedFoldersRequest) + ListManagedFoldersRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListManagedFoldersRequest"); + } + + // Use ListManagedFoldersRequest.newBuilder() to construct. + private ListManagedFoldersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListManagedFoldersRequest() { + parent_ = ""; + pageToken_ = ""; + prefix_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListManagedFoldersRequest.class, + com.google.storage.control.v2.ListManagedFoldersRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Maximum number of managed folders to return in a single response.
+   * The service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREFIX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object prefix_ = ""; + + /** + * + * + *
+   * Optional. Filter results to match managed folders with name starting with
+   * this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + @java.lang.Override + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to match managed folders with name starting with
+   * this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, prefix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListManagedFoldersRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListManagedFoldersRequest other = + (com.google.storage.control.v2.ListManagedFoldersRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getPrefix().equals(other.getPrefix())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getPrefix().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.ListManagedFoldersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListManagedFolders.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListManagedFoldersRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListManagedFoldersRequest) + com.google.storage.control.v2.ListManagedFoldersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListManagedFoldersRequest.class, + com.google.storage.control.v2.ListManagedFoldersRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListManagedFoldersRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + prefix_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.ListManagedFoldersRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersRequest build() { + com.google.storage.control.v2.ListManagedFoldersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersRequest buildPartial() { + com.google.storage.control.v2.ListManagedFoldersRequest result = + new com.google.storage.control.v2.ListManagedFoldersRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.ListManagedFoldersRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.prefix_ = prefix_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListManagedFoldersRequest) { + return mergeFrom((com.google.storage.control.v2.ListManagedFoldersRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListManagedFoldersRequest other) { + if (other == com.google.storage.control.v2.ListManagedFoldersRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getPrefix().isEmpty()) { + prefix_ = other.prefix_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + prefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket this managed folder belongs to.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Maximum number of managed folders to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Maximum number of managed folders to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Maximum number of managed folders to return in a single response.
+     * The service will use this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object prefix_ = ""; + + /** + * + * + *
+     * Optional. Filter results to match managed folders with name starting with
+     * this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to match managed folders with name starting with
+     * this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to match managed folders with name starting with
+     * this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to match managed folders with name starting with
+     * this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrefix() { + prefix_ = getDefaultInstance().getPrefix(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to match managed folders with name starting with
+     * this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted.
+     * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListManagedFoldersRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListManagedFoldersRequest) + private static final com.google.storage.control.v2.ListManagedFoldersRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListManagedFoldersRequest(); + } + + public static com.google.storage.control.v2.ListManagedFoldersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListManagedFoldersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequestOrBuilder.java new file mode 100644 index 000000000000..6b0955160fd4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersRequestOrBuilder.java @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListManagedFoldersRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListManagedFoldersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Name of the bucket this managed folder belongs to.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Maximum number of managed folders to return in a single response.
+   * The service will use this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. Filter results to match managed folders with name starting with
+   * this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + java.lang.String getPrefix(); + + /** + * + * + *
+   * Optional. Filter results to match managed folders with name starting with
+   * this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + com.google.protobuf.ByteString getPrefixBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted.
+   * 
+ * + * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponse.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponse.java new file mode 100644 index 000000000000..bc25038d8a09 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponse.java @@ -0,0 +1,1126 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Response message for ListManagedFolders.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ListManagedFoldersResponse} + */ +@com.google.protobuf.Generated +public final class ListManagedFoldersResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ListManagedFoldersResponse) + ListManagedFoldersResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListManagedFoldersResponse"); + } + + // Use ListManagedFoldersResponse.newBuilder() to construct. + private ListManagedFoldersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListManagedFoldersResponse() { + managedFolders_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListManagedFoldersResponse.class, + com.google.storage.control.v2.ListManagedFoldersResponse.Builder.class); + } + + public static final int MANAGED_FOLDERS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List managedFolders_; + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + @java.lang.Override + public java.util.List getManagedFoldersList() { + return managedFolders_; + } + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + @java.lang.Override + public java.util.List + getManagedFoldersOrBuilderList() { + return managedFolders_; + } + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + @java.lang.Override + public int getManagedFoldersCount() { + return managedFolders_.size(); + } + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder getManagedFolders(int index) { + return managedFolders_.get(index); + } + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFoldersOrBuilder( + int index) { + return managedFolders_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < managedFolders_.size(); i++) { + output.writeMessage(1, managedFolders_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < managedFolders_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, managedFolders_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ListManagedFoldersResponse)) { + return super.equals(obj); + } + com.google.storage.control.v2.ListManagedFoldersResponse other = + (com.google.storage.control.v2.ListManagedFoldersResponse) obj; + + if (!getManagedFoldersList().equals(other.getManagedFoldersList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getManagedFoldersCount() > 0) { + hash = (37 * hash) + MANAGED_FOLDERS_FIELD_NUMBER; + hash = (53 * hash) + getManagedFoldersList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.ListManagedFoldersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListManagedFolders.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ListManagedFoldersResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ListManagedFoldersResponse) + com.google.storage.control.v2.ListManagedFoldersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ListManagedFoldersResponse.class, + com.google.storage.control.v2.ListManagedFoldersResponse.Builder.class); + } + + // Construct using com.google.storage.control.v2.ListManagedFoldersResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (managedFoldersBuilder_ == null) { + managedFolders_ = java.util.Collections.emptyList(); + } else { + managedFolders_ = null; + managedFoldersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersResponse getDefaultInstanceForType() { + return com.google.storage.control.v2.ListManagedFoldersResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersResponse build() { + com.google.storage.control.v2.ListManagedFoldersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersResponse buildPartial() { + com.google.storage.control.v2.ListManagedFoldersResponse result = + new com.google.storage.control.v2.ListManagedFoldersResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.storage.control.v2.ListManagedFoldersResponse result) { + if (managedFoldersBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + managedFolders_ = java.util.Collections.unmodifiableList(managedFolders_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.managedFolders_ = managedFolders_; + } else { + result.managedFolders_ = managedFoldersBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.control.v2.ListManagedFoldersResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ListManagedFoldersResponse) { + return mergeFrom((com.google.storage.control.v2.ListManagedFoldersResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ListManagedFoldersResponse other) { + if (other == com.google.storage.control.v2.ListManagedFoldersResponse.getDefaultInstance()) + return this; + if (managedFoldersBuilder_ == null) { + if (!other.managedFolders_.isEmpty()) { + if (managedFolders_.isEmpty()) { + managedFolders_ = other.managedFolders_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureManagedFoldersIsMutable(); + managedFolders_.addAll(other.managedFolders_); + } + onChanged(); + } + } else { + if (!other.managedFolders_.isEmpty()) { + if (managedFoldersBuilder_.isEmpty()) { + managedFoldersBuilder_.dispose(); + managedFoldersBuilder_ = null; + managedFolders_ = other.managedFolders_; + bitField0_ = (bitField0_ & ~0x00000001); + managedFoldersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetManagedFoldersFieldBuilder() + : null; + } else { + managedFoldersBuilder_.addAllMessages(other.managedFolders_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.control.v2.ManagedFolder m = + input.readMessage( + com.google.storage.control.v2.ManagedFolder.parser(), extensionRegistry); + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + managedFolders_.add(m); + } else { + managedFoldersBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List managedFolders_ = + java.util.Collections.emptyList(); + + private void ensureManagedFoldersIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + managedFolders_ = + new java.util.ArrayList(managedFolders_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder> + managedFoldersBuilder_; + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public java.util.List getManagedFoldersList() { + if (managedFoldersBuilder_ == null) { + return java.util.Collections.unmodifiableList(managedFolders_); + } else { + return managedFoldersBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public int getManagedFoldersCount() { + if (managedFoldersBuilder_ == null) { + return managedFolders_.size(); + } else { + return managedFoldersBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public com.google.storage.control.v2.ManagedFolder getManagedFolders(int index) { + if (managedFoldersBuilder_ == null) { + return managedFolders_.get(index); + } else { + return managedFoldersBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder setManagedFolders(int index, com.google.storage.control.v2.ManagedFolder value) { + if (managedFoldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureManagedFoldersIsMutable(); + managedFolders_.set(index, value); + onChanged(); + } else { + managedFoldersBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder setManagedFolders( + int index, com.google.storage.control.v2.ManagedFolder.Builder builderForValue) { + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + managedFolders_.set(index, builderForValue.build()); + onChanged(); + } else { + managedFoldersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder addManagedFolders(com.google.storage.control.v2.ManagedFolder value) { + if (managedFoldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureManagedFoldersIsMutable(); + managedFolders_.add(value); + onChanged(); + } else { + managedFoldersBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder addManagedFolders(int index, com.google.storage.control.v2.ManagedFolder value) { + if (managedFoldersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureManagedFoldersIsMutable(); + managedFolders_.add(index, value); + onChanged(); + } else { + managedFoldersBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder addManagedFolders( + com.google.storage.control.v2.ManagedFolder.Builder builderForValue) { + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + managedFolders_.add(builderForValue.build()); + onChanged(); + } else { + managedFoldersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder addManagedFolders( + int index, com.google.storage.control.v2.ManagedFolder.Builder builderForValue) { + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + managedFolders_.add(index, builderForValue.build()); + onChanged(); + } else { + managedFoldersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder addAllManagedFolders( + java.lang.Iterable values) { + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, managedFolders_); + onChanged(); + } else { + managedFoldersBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder clearManagedFolders() { + if (managedFoldersBuilder_ == null) { + managedFolders_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + managedFoldersBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public Builder removeManagedFolders(int index) { + if (managedFoldersBuilder_ == null) { + ensureManagedFoldersIsMutable(); + managedFolders_.remove(index); + onChanged(); + } else { + managedFoldersBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public com.google.storage.control.v2.ManagedFolder.Builder getManagedFoldersBuilder(int index) { + return internalGetManagedFoldersFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFoldersOrBuilder( + int index) { + if (managedFoldersBuilder_ == null) { + return managedFolders_.get(index); + } else { + return managedFoldersBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public java.util.List + getManagedFoldersOrBuilderList() { + if (managedFoldersBuilder_ != null) { + return managedFoldersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(managedFolders_); + } + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public com.google.storage.control.v2.ManagedFolder.Builder addManagedFoldersBuilder() { + return internalGetManagedFoldersFieldBuilder() + .addBuilder(com.google.storage.control.v2.ManagedFolder.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public com.google.storage.control.v2.ManagedFolder.Builder addManagedFoldersBuilder(int index) { + return internalGetManagedFoldersFieldBuilder() + .addBuilder(index, com.google.storage.control.v2.ManagedFolder.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of matching managed folders
+     * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + public java.util.List + getManagedFoldersBuilderList() { + return internalGetManagedFoldersFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder> + internalGetManagedFoldersFieldBuilder() { + if (managedFoldersBuilder_ == null) { + managedFoldersBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.control.v2.ManagedFolder, + com.google.storage.control.v2.ManagedFolder.Builder, + com.google.storage.control.v2.ManagedFolderOrBuilder>( + managedFolders_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + managedFolders_ = null; + } + return managedFoldersBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ListManagedFoldersResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ListManagedFoldersResponse) + private static final com.google.storage.control.v2.ListManagedFoldersResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ListManagedFoldersResponse(); + } + + public static com.google.storage.control.v2.ListManagedFoldersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListManagedFoldersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ListManagedFoldersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponseOrBuilder.java new file mode 100644 index 000000000000..f717391d55cf --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ListManagedFoldersResponseOrBuilder.java @@ -0,0 +1,112 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ListManagedFoldersResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ListManagedFoldersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + java.util.List getManagedFoldersList(); + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + com.google.storage.control.v2.ManagedFolder getManagedFolders(int index); + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + int getManagedFoldersCount(); + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + java.util.List + getManagedFoldersOrBuilderList(); + + /** + * + * + *
+   * The list of matching managed folders
+   * 
+ * + * repeated .google.storage.control.v2.ManagedFolder managed_folders = 1; + */ + com.google.storage.control.v2.ManagedFolderOrBuilder getManagedFoldersOrBuilder(int index); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolder.java new file mode 100644 index 000000000000..0fb933e7cacb --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolder.java @@ -0,0 +1,1321 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * A managed folder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ManagedFolder} + */ +@com.google.protobuf.Generated +public final class ManagedFolder extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ManagedFolder) + ManagedFolderOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ManagedFolder"); + } + + // Use ManagedFolder.newBuilder() to construct. + private ManagedFolder(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ManagedFolder() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ManagedFolder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ManagedFolder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ManagedFolder.class, + com.google.storage.control.v2.ManagedFolder.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Identifier. The name of this managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Identifier. The name of this managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METAGENERATION_FIELD_NUMBER = 3; + private long metageneration_ = 0L; + + /** + * + * + *
+   * Output only. The metadata version of this managed folder. It increases
+   * whenever the metadata is updated. Used for preconditions and for detecting
+   * changes in metadata. Managed folders don't have a generation number.
+   * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (metageneration_ != 0L) { + output.writeInt64(3, metageneration_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getUpdateTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (metageneration_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, metageneration_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getUpdateTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ManagedFolder)) { + return super.equals(obj); + } + com.google.storage.control.v2.ManagedFolder other = + (com.google.storage.control.v2.ManagedFolder) obj; + + if (!getName().equals(other.getName())) return false; + if (getMetageneration() != other.getMetageneration()) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + METAGENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMetageneration()); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ManagedFolder parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ManagedFolder parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ManagedFolder parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.ManagedFolder prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * A managed folder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ManagedFolder} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ManagedFolder) + com.google.storage.control.v2.ManagedFolderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ManagedFolder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ManagedFolder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ManagedFolder.class, + com.google.storage.control.v2.ManagedFolder.Builder.class); + } + + // Construct using com.google.storage.control.v2.ManagedFolder.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + metageneration_ = 0L; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ManagedFolder_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder getDefaultInstanceForType() { + return com.google.storage.control.v2.ManagedFolder.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder build() { + com.google.storage.control.v2.ManagedFolder result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder buildPartial() { + com.google.storage.control.v2.ManagedFolder result = + new com.google.storage.control.v2.ManagedFolder(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.ManagedFolder result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metageneration_ = metageneration_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ManagedFolder) { + return mergeFrom((com.google.storage.control.v2.ManagedFolder) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ManagedFolder other) { + if (other == com.google.storage.control.v2.ManagedFolder.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getMetageneration() != 0L) { + setMetageneration(other.getMetageneration()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 24: + { + metageneration_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 34: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Identifier. The name of this managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of this managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of this managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of this managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of this managed folder.
+     * Format:
+     * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long metageneration_; + + /** + * + * + *
+     * Output only. The metadata version of this managed folder. It increases
+     * whenever the metadata is updated. Used for preconditions and for detecting
+     * changes in metadata. Managed folders don't have a generation number.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + /** + * + * + *
+     * Output only. The metadata version of this managed folder. It increases
+     * whenever the metadata is updated. Used for preconditions and for detecting
+     * changes in metadata. Managed folders don't have a generation number.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The metageneration to set. + * @return This builder for chaining. + */ + public Builder setMetageneration(long value) { + + metageneration_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The metadata version of this managed folder. It increases
+     * whenever the metadata is updated. Used for preconditions and for detecting
+     * changes in metadata. Managed folders don't have a generation number.
+     * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearMetageneration() { + bitField0_ = (bitField0_ & ~0x00000002); + metageneration_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The modification time of the managed folder.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ManagedFolder) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ManagedFolder) + private static final com.google.storage.control.v2.ManagedFolder DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ManagedFolder(); + } + + public static com.google.storage.control.v2.ManagedFolder getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ManagedFolder parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ManagedFolder getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderName.java new file mode 100644 index 000000000000..f5778809092b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderName.java @@ -0,0 +1,227 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ManagedFolderName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET_MANAGED_FOLDER = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/buckets/{bucket}/managedFolders/{managed_folder=**}"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + private final String managedFolder; + + @Deprecated + protected ManagedFolderName() { + project = null; + bucket = null; + managedFolder = null; + } + + private ManagedFolderName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + managedFolder = Preconditions.checkNotNull(builder.getManagedFolder()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getManagedFolder() { + return managedFolder; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ManagedFolderName of(String project, String bucket, String managedFolder) { + return newBuilder() + .setProject(project) + .setBucket(bucket) + .setManagedFolder(managedFolder) + .build(); + } + + public static String format(String project, String bucket, String managedFolder) { + return newBuilder() + .setProject(project) + .setBucket(bucket) + .setManagedFolder(managedFolder) + .build() + .toString(); + } + + public static ManagedFolderName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET_MANAGED_FOLDER.validatedMatch( + formattedString, "ManagedFolderName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket"), matchMap.get("managed_folder")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ManagedFolderName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET_MANAGED_FOLDER.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + if (managedFolder != null) { + fieldMapBuilder.put("managed_folder", managedFolder); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET_MANAGED_FOLDER.instantiate( + "project", project, "bucket", bucket, "managed_folder", managedFolder); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ManagedFolderName that = ((ManagedFolderName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.bucket, that.bucket) + && Objects.equals(this.managedFolder, that.managedFolder); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + h *= 1000003; + h ^= Objects.hashCode(managedFolder); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}/managedFolders/{managed_folder=**}. */ + public static class Builder { + private String project; + private String bucket; + private String managedFolder; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public String getManagedFolder() { + return managedFolder; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + public Builder setManagedFolder(String managedFolder) { + this.managedFolder = managedFolder; + return this; + } + + private Builder(ManagedFolderName managedFolderName) { + this.project = managedFolderName.project; + this.bucket = managedFolderName.bucket; + this.managedFolder = managedFolderName.managedFolder; + } + + public ManagedFolderName build() { + return new ManagedFolderName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderOrBuilder.java new file mode 100644 index 000000000000..7f7afe97149b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ManagedFolderOrBuilder.java @@ -0,0 +1,153 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ManagedFolderOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ManagedFolder) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Identifier. The name of this managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Identifier. The name of this managed folder.
+   * Format:
+   * `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. The metadata version of this managed folder. It increases
+   * whenever the metadata is updated. Used for preconditions and for detecting
+   * changes in metadata. Managed folders don't have a generation number.
+   * 
+ * + * int64 metageneration = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + long getMetageneration(); + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the managed folder.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequest.java new file mode 100644 index 000000000000..948fad7f193f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequest.java @@ -0,0 +1,835 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for PauseAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.PauseAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class PauseAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.PauseAnywhereCacheRequest) + PauseAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PauseAnywhereCacheRequest"); + } + + // Use PauseAnywhereCacheRequest.newBuilder() to construct. + private PauseAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PauseAnywhereCacheRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.PauseAnywhereCacheRequest.class, + com.google.storage.control.v2.PauseAnywhereCacheRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.PauseAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.PauseAnywhereCacheRequest other = + (com.google.storage.control.v2.PauseAnywhereCacheRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.PauseAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for PauseAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.PauseAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.PauseAnywhereCacheRequest) + com.google.storage.control.v2.PauseAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.PauseAnywhereCacheRequest.class, + com.google.storage.control.v2.PauseAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.PauseAnywhereCacheRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.PauseAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.PauseAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.PauseAnywhereCacheRequest build() { + com.google.storage.control.v2.PauseAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.PauseAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.PauseAnywhereCacheRequest result = + new com.google.storage.control.v2.PauseAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.PauseAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.PauseAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.PauseAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.PauseAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.PauseAnywhereCacheRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.PauseAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.PauseAnywhereCacheRequest) + private static final com.google.storage.control.v2.PauseAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.PauseAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.PauseAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PauseAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.PauseAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..73c768e92d4d --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PauseAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,94 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface PauseAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.PauseAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfo.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfo.java new file mode 100644 index 000000000000..1495292de563 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfo.java @@ -0,0 +1,596 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Contains information about a pending rename operation.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.PendingRenameInfo} + */ +@com.google.protobuf.Generated +public final class PendingRenameInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.PendingRenameInfo) + PendingRenameInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PendingRenameInfo"); + } + + // Use PendingRenameInfo.newBuilder() to construct. + private PendingRenameInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PendingRenameInfo() { + operation_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PendingRenameInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PendingRenameInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.PendingRenameInfo.class, + com.google.storage.control.v2.PendingRenameInfo.Builder.class); + } + + public static final int OPERATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object operation_ = ""; + + /** + * + * + *
+   * Output only. The name of the rename operation.
+   * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + @java.lang.Override + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The name of the rename operation.
+   * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, operation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, operation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.PendingRenameInfo)) { + return super.equals(obj); + } + com.google.storage.control.v2.PendingRenameInfo other = + (com.google.storage.control.v2.PendingRenameInfo) obj; + + if (!getOperation().equals(other.getOperation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getOperation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.PendingRenameInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.PendingRenameInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains information about a pending rename operation.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.PendingRenameInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.PendingRenameInfo) + com.google.storage.control.v2.PendingRenameInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PendingRenameInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PendingRenameInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.PendingRenameInfo.class, + com.google.storage.control.v2.PendingRenameInfo.Builder.class); + } + + // Construct using com.google.storage.control.v2.PendingRenameInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + operation_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_PendingRenameInfo_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfo getDefaultInstanceForType() { + return com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfo build() { + com.google.storage.control.v2.PendingRenameInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfo buildPartial() { + com.google.storage.control.v2.PendingRenameInfo result = + new com.google.storage.control.v2.PendingRenameInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.PendingRenameInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.operation_ = operation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.PendingRenameInfo) { + return mergeFrom((com.google.storage.control.v2.PendingRenameInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.PendingRenameInfo other) { + if (other == com.google.storage.control.v2.PendingRenameInfo.getDefaultInstance()) + return this; + if (!other.getOperation().isEmpty()) { + operation_ = other.operation_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + operation_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object operation_ = ""; + + /** + * + * + *
+     * Output only. The name of the rename operation.
+     * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The name of the rename operation.
+     * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The name of the rename operation.
+     * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The operation to set. + * @return This builder for chaining. + */ + public Builder setOperation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The name of the rename operation.
+     * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearOperation() { + operation_ = getDefaultInstance().getOperation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The name of the rename operation.
+     * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for operation to set. + * @return This builder for chaining. + */ + public Builder setOperationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.PendingRenameInfo) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.PendingRenameInfo) + private static final com.google.storage.control.v2.PendingRenameInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.PendingRenameInfo(); + } + + public static com.google.storage.control.v2.PendingRenameInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PendingRenameInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.PendingRenameInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfoOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfoOrBuilder.java new file mode 100644 index 000000000000..1eaeacd0ce8a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/PendingRenameInfoOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface PendingRenameInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.PendingRenameInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The name of the rename operation.
+   * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + java.lang.String getOperation(); + + /** + * + * + *
+   * Output only. The name of the rename operation.
+   * 
+ * + * string operation = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + com.google.protobuf.ByteString getOperationBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadata.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadata.java new file mode 100644 index 000000000000..364c7f88ab7b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadata.java @@ -0,0 +1,1097 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Message returned in the metadata field of the Operation resource for
+ * RenameFolder operations.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.RenameFolderMetadata} + */ +@com.google.protobuf.Generated +public final class RenameFolderMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.RenameFolderMetadata) + RenameFolderMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RenameFolderMetadata"); + } + + // Use RenameFolderMetadata.newBuilder() to construct. + private RenameFolderMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RenameFolderMetadata() { + sourceFolderId_ = ""; + destinationFolderId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.RenameFolderMetadata.class, + com.google.storage.control.v2.RenameFolderMetadata.Builder.class); + } + + private int bitField0_; + public static final int COMMON_METADATA_FIELD_NUMBER = 1; + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + @java.lang.Override + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + public static final int SOURCE_FOLDER_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceFolderId_ = ""; + + /** + * + * + *
+   * The path of the source folder.
+   * 
+ * + * string source_folder_id = 2; + * + * @return The sourceFolderId. + */ + @java.lang.Override + public java.lang.String getSourceFolderId() { + java.lang.Object ref = sourceFolderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceFolderId_ = s; + return s; + } + } + + /** + * + * + *
+   * The path of the source folder.
+   * 
+ * + * string source_folder_id = 2; + * + * @return The bytes for sourceFolderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceFolderIdBytes() { + java.lang.Object ref = sourceFolderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_FOLDER_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationFolderId_ = ""; + + /** + * + * + *
+   * The path of the destination folder.
+   * 
+ * + * string destination_folder_id = 3; + * + * @return The destinationFolderId. + */ + @java.lang.Override + public java.lang.String getDestinationFolderId() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationFolderId_ = s; + return s; + } + } + + /** + * + * + *
+   * The path of the destination folder.
+   * 
+ * + * string destination_folder_id = 3; + * + * @return The bytes for destinationFolderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationFolderIdBytes() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommonMetadata()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceFolderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, sourceFolderId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationFolderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, destinationFolderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommonMetadata()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceFolderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, sourceFolderId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationFolderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, destinationFolderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.RenameFolderMetadata)) { + return super.equals(obj); + } + com.google.storage.control.v2.RenameFolderMetadata other = + (com.google.storage.control.v2.RenameFolderMetadata) obj; + + if (hasCommonMetadata() != other.hasCommonMetadata()) return false; + if (hasCommonMetadata()) { + if (!getCommonMetadata().equals(other.getCommonMetadata())) return false; + } + if (!getSourceFolderId().equals(other.getSourceFolderId())) return false; + if (!getDestinationFolderId().equals(other.getDestinationFolderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommonMetadata()) { + hash = (37 * hash) + COMMON_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getCommonMetadata().hashCode(); + } + hash = (37 * hash) + SOURCE_FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getSourceFolderId().hashCode(); + hash = (37 * hash) + DESTINATION_FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getDestinationFolderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.RenameFolderMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message returned in the metadata field of the Operation resource for
+   * RenameFolder operations.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.RenameFolderMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.RenameFolderMetadata) + com.google.storage.control.v2.RenameFolderMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.RenameFolderMetadata.class, + com.google.storage.control.v2.RenameFolderMetadata.Builder.class); + } + + // Construct using com.google.storage.control.v2.RenameFolderMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonMetadataFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + sourceFolderId_ = ""; + destinationFolderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderMetadata getDefaultInstanceForType() { + return com.google.storage.control.v2.RenameFolderMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderMetadata build() { + com.google.storage.control.v2.RenameFolderMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderMetadata buildPartial() { + com.google.storage.control.v2.RenameFolderMetadata result = + new com.google.storage.control.v2.RenameFolderMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.RenameFolderMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commonMetadata_ = + commonMetadataBuilder_ == null ? commonMetadata_ : commonMetadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.sourceFolderId_ = sourceFolderId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.destinationFolderId_ = destinationFolderId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.RenameFolderMetadata) { + return mergeFrom((com.google.storage.control.v2.RenameFolderMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.RenameFolderMetadata other) { + if (other == com.google.storage.control.v2.RenameFolderMetadata.getDefaultInstance()) + return this; + if (other.hasCommonMetadata()) { + mergeCommonMetadata(other.getCommonMetadata()); + } + if (!other.getSourceFolderId().isEmpty()) { + sourceFolderId_ = other.sourceFolderId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDestinationFolderId().isEmpty()) { + destinationFolderId_ = other.destinationFolderId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommonMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + sourceFolderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + destinationFolderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + commonMetadataBuilder_; + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return Whether the commonMetadata field is set. + */ + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return The commonMetadata. + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + if (commonMetadataBuilder_ == null) { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } else { + return commonMetadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonMetadata_ = value; + } else { + commonMetadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder builderForValue) { + if (commonMetadataBuilder_ == null) { + commonMetadata_ = builderForValue.build(); + } else { + commonMetadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder mergeCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commonMetadata_ != null + && commonMetadata_ + != com.google.storage.control.v2.CommonLongRunningOperationMetadata + .getDefaultInstance()) { + getCommonMetadataBuilder().mergeFrom(value); + } else { + commonMetadata_ = value; + } + } else { + commonMetadataBuilder_.mergeFrom(value); + } + if (commonMetadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder clearCommonMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder + getCommonMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommonMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + if (commonMetadataBuilder_ != null) { + return commonMetadataBuilder_.getMessageOrBuilder(); + } else { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + internalGetCommonMetadataFieldBuilder() { + if (commonMetadataBuilder_ == null) { + commonMetadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder>( + getCommonMetadata(), getParentForChildren(), isClean()); + commonMetadata_ = null; + } + return commonMetadataBuilder_; + } + + private java.lang.Object sourceFolderId_ = ""; + + /** + * + * + *
+     * The path of the source folder.
+     * 
+ * + * string source_folder_id = 2; + * + * @return The sourceFolderId. + */ + public java.lang.String getSourceFolderId() { + java.lang.Object ref = sourceFolderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceFolderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The path of the source folder.
+     * 
+ * + * string source_folder_id = 2; + * + * @return The bytes for sourceFolderId. + */ + public com.google.protobuf.ByteString getSourceFolderIdBytes() { + java.lang.Object ref = sourceFolderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The path of the source folder.
+     * 
+ * + * string source_folder_id = 2; + * + * @param value The sourceFolderId to set. + * @return This builder for chaining. + */ + public Builder setSourceFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceFolderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the source folder.
+     * 
+ * + * string source_folder_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearSourceFolderId() { + sourceFolderId_ = getDefaultInstance().getSourceFolderId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the source folder.
+     * 
+ * + * string source_folder_id = 2; + * + * @param value The bytes for sourceFolderId to set. + * @return This builder for chaining. + */ + public Builder setSourceFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceFolderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object destinationFolderId_ = ""; + + /** + * + * + *
+     * The path of the destination folder.
+     * 
+ * + * string destination_folder_id = 3; + * + * @return The destinationFolderId. + */ + public java.lang.String getDestinationFolderId() { + java.lang.Object ref = destinationFolderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationFolderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The path of the destination folder.
+     * 
+ * + * string destination_folder_id = 3; + * + * @return The bytes for destinationFolderId. + */ + public com.google.protobuf.ByteString getDestinationFolderIdBytes() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The path of the destination folder.
+     * 
+ * + * string destination_folder_id = 3; + * + * @param value The destinationFolderId to set. + * @return This builder for chaining. + */ + public Builder setDestinationFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationFolderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the destination folder.
+     * 
+ * + * string destination_folder_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDestinationFolderId() { + destinationFolderId_ = getDefaultInstance().getDestinationFolderId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * The path of the destination folder.
+     * 
+ * + * string destination_folder_id = 3; + * + * @param value The bytes for destinationFolderId to set. + * @return This builder for chaining. + */ + public Builder setDestinationFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationFolderId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.RenameFolderMetadata) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.RenameFolderMetadata) + private static final com.google.storage.control.v2.RenameFolderMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.RenameFolderMetadata(); + } + + public static com.google.storage.control.v2.RenameFolderMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RenameFolderMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadataOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadataOrBuilder.java new file mode 100644 index 000000000000..2e2e656d9820 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderMetadataOrBuilder.java @@ -0,0 +1,118 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface RenameFolderMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.RenameFolderMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + boolean hasCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder(); + + /** + * + * + *
+   * The path of the source folder.
+   * 
+ * + * string source_folder_id = 2; + * + * @return The sourceFolderId. + */ + java.lang.String getSourceFolderId(); + + /** + * + * + *
+   * The path of the source folder.
+   * 
+ * + * string source_folder_id = 2; + * + * @return The bytes for sourceFolderId. + */ + com.google.protobuf.ByteString getSourceFolderIdBytes(); + + /** + * + * + *
+   * The path of the destination folder.
+   * 
+ * + * string destination_folder_id = 3; + * + * @return The destinationFolderId. + */ + java.lang.String getDestinationFolderId(); + + /** + * + * + *
+   * The path of the destination folder.
+   * 
+ * + * string destination_folder_id = 3; + * + * @return The bytes for destinationFolderId. + */ + com.google.protobuf.ByteString getDestinationFolderIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequest.java new file mode 100644 index 000000000000..91c367f406d6 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequest.java @@ -0,0 +1,1310 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for RenameFolder. This operation is only applicable to a
+ * hierarchical namespace enabled bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.RenameFolderRequest} + */ +@com.google.protobuf.Generated +public final class RenameFolderRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.RenameFolderRequest) + RenameFolderRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RenameFolderRequest"); + } + + // Use RenameFolderRequest.newBuilder() to construct. + private RenameFolderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RenameFolderRequest() { + name_ = ""; + destinationFolderId_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.RenameFolderRequest.class, + com.google.storage.control.v2.RenameFolderRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the source folder being renamed.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the source folder being renamed.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_FOLDER_ID_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationFolderId_ = ""; + + /** + * + * + *
+   * Required. The destination folder ID, e.g. `foo/bar/`.
+   * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationFolderId. + */ + @java.lang.Override + public java.lang.String getDestinationFolderId() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationFolderId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The destination folder ID, e.g. `foo/bar/`.
+   * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationFolderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationFolderIdBytes() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(5, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationFolderId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, destinationFolderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, requestId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationFolderId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, destinationFolderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.RenameFolderRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.RenameFolderRequest other = + (com.google.storage.control.v2.RenameFolderRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDestinationFolderId().equals(other.getDestinationFolderId())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DESTINATION_FOLDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getDestinationFolderId().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.RenameFolderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.RenameFolderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for RenameFolder. This operation is only applicable to a
+   * hierarchical namespace enabled bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.RenameFolderRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.RenameFolderRequest) + com.google.storage.control.v2.RenameFolderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.RenameFolderRequest.class, + com.google.storage.control.v2.RenameFolderRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.RenameFolderRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + destinationFolderId_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_RenameFolderRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.RenameFolderRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderRequest build() { + com.google.storage.control.v2.RenameFolderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderRequest buildPartial() { + com.google.storage.control.v2.RenameFolderRequest result = + new com.google.storage.control.v2.RenameFolderRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.RenameFolderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.destinationFolderId_ = destinationFolderId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.RenameFolderRequest) { + return mergeFrom((com.google.storage.control.v2.RenameFolderRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.RenameFolderRequest other) { + if (other == com.google.storage.control.v2.RenameFolderRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDestinationFolderId().isEmpty()) { + destinationFolderId_ = other.destinationFolderId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 32: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 40: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 50: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 50 + case 58: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 58 + case 66: + { + destinationFolderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the source folder being renamed.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source folder being renamed.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source folder being renamed.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source folder being renamed.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source folder being renamed.
+     * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+     * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object destinationFolderId_ = ""; + + /** + * + * + *
+     * Required. The destination folder ID, e.g. `foo/bar/`.
+     * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationFolderId. + */ + public java.lang.String getDestinationFolderId() { + java.lang.Object ref = destinationFolderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationFolderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The destination folder ID, e.g. `foo/bar/`.
+     * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationFolderId. + */ + public com.google.protobuf.ByteString getDestinationFolderIdBytes() { + java.lang.Object ref = destinationFolderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationFolderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The destination folder ID, e.g. `foo/bar/`.
+     * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The destinationFolderId to set. + * @return This builder for chaining. + */ + public Builder setDestinationFolderId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationFolderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The destination folder ID, e.g. `foo/bar/`.
+     * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearDestinationFolderId() { + destinationFolderId_ = getDefaultInstance().getDestinationFolderId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The destination folder ID, e.g. `foo/bar/`.
+     * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for destinationFolderId to set. + * @return This builder for chaining. + */ + public Builder setDestinationFolderIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationFolderId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation only succeed conditional on whether the source
+     * folder's current metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.RenameFolderRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.RenameFolderRequest) + private static final com.google.storage.control.v2.RenameFolderRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.RenameFolderRequest(); + } + + public static com.google.storage.control.v2.RenameFolderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RenameFolderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.RenameFolderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequestOrBuilder.java new file mode 100644 index 000000000000..f091818d4855 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/RenameFolderRequestOrBuilder.java @@ -0,0 +1,176 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface RenameFolderRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.RenameFolderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the source folder being renamed.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the source folder being renamed.
+   * Format: `projects/{project}/buckets/{bucket}/folders/{folder}`
+   * 
+ * + * + * string name = 7 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The destination folder ID, e.g. `foo/bar/`.
+   * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationFolderId. + */ + java.lang.String getDestinationFolderId(); + + /** + * + * + *
+   * Required. The destination folder ID, e.g. `foo/bar/`.
+   * 
+ * + * string destination_folder_id = 8 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationFolderId. + */ + com.google.protobuf.ByteString getDestinationFolderIdBytes(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation only succeed conditional on whether the source
+   * folder's current metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequest.java new file mode 100644 index 000000000000..d84a461e8d95 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequest.java @@ -0,0 +1,835 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for ResumeAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.ResumeAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class ResumeAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.ResumeAnywhereCacheRequest) + ResumeAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ResumeAnywhereCacheRequest"); + } + + // Use ResumeAnywhereCacheRequest.newBuilder() to construct. + private ResumeAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ResumeAnywhereCacheRequest() { + name_ = ""; + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ResumeAnywhereCacheRequest.class, + com.google.storage.control.v2.ResumeAnywhereCacheRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.ResumeAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.ResumeAnywhereCacheRequest other = + (com.google.storage.control.v2.ResumeAnywhereCacheRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.ResumeAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ResumeAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.ResumeAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.ResumeAnywhereCacheRequest) + com.google.storage.control.v2.ResumeAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.ResumeAnywhereCacheRequest.class, + com.google.storage.control.v2.ResumeAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.ResumeAnywhereCacheRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.ResumeAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.ResumeAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.ResumeAnywhereCacheRequest build() { + com.google.storage.control.v2.ResumeAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.ResumeAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.ResumeAnywhereCacheRequest result = + new com.google.storage.control.v2.ResumeAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.ResumeAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.ResumeAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.ResumeAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.ResumeAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.ResumeAnywhereCacheRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name field in the request should be:
+     * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.ResumeAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.ResumeAnywhereCacheRequest) + private static final com.google.storage.control.v2.ResumeAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.ResumeAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.ResumeAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResumeAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.ResumeAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..a8f8b408a211 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/ResumeAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,94 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface ResumeAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.ResumeAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name field in the request should be:
+   * `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}`
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlProto.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlProto.java new file mode 100644 index 000000000000..91ed0a060091 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageControlProto.java @@ -0,0 +1,1113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public final class StorageControlProto extends com.google.protobuf.GeneratedFile { + private StorageControlProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StorageControlProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_PendingRenameInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_PendingRenameInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_Folder_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_Folder_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_CreateFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_CreateFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_DeleteFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListFoldersRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListFoldersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListFoldersResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListFoldersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_RenameFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_RenameFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_RenameFolderMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_StorageLayout_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_StorageLayout_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetStorageLayoutRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ManagedFolder_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ManagedFolder_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetManagedFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_CreateManagedFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_DeleteManagedFolderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListManagedFoldersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListManagedFoldersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_AnywhereCache_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_AnywhereCache_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetAnywhereCacheRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListAnywhereCachesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_ListAnywhereCachesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "/google/storage/control/v2/storage_cont" + + "rol.proto\022\031google.storage.control.v2\032\034go" + + "ogle/api/annotations.proto\032\027google/api/c" + + "lient.proto\032\037google/api/field_behavior.p" + + "roto\032\033google/api/field_info.proto\032\031googl" + + "e/api/resource.proto\032\030google/api/routing" + + ".proto\032\036google/iam/v1/iam_policy.proto\032\032" + + "google/iam/v1/policy.proto\032#google/longr" + + "unning/operations.proto\032\036google/protobuf" + + "/duration.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\037google/protobuf/timestamp.proto\"+\n" + + "\021PendingRenameInfo\022\026\n" + + "\toperation\030\001 \001(\tB\003\340A\003\"\342\002\n" + + "\006Folder\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\010\022\033\n" + + "\016metageneration\030\003 \001(\003B\003\340A\003\0224\n" + + "\013create_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\005 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022N\n" + + "\023pending_rename_info\030\007 \001(\0132,.googl" + + "e.storage.control.v2.PendingRenameInfoB\003\340A\003:l\352Ai\n" + + "\035storage.googleapis.com/Folder\022" + + "7projects/{project}/buckets/{bucket}/folders/{folder=**}*\007folders2\006folder\"\364\001\n" + + "\020GetFolderRequest\0223\n" + + "\004name\030\006 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Folder\022$\n" + + "\027if_metageneration_match\030\003 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\004 \001(\003H\001\210\001\001\022\037\n\n" + + "request_id\030\005 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\325\001\n" + + "\023CreateFolderRequest\0225\n" + + "\006parent\030\001 \001(\t" + + "B%\340A\002\372A\037\022\035storage.googleapis.com/Folder\0226\n" + + "\006folder\030\002" + + " \001(\0132!.google.storage.control.v2.FolderB\003\340A\002\022\026\n" + + "\tfolder_id\030\003 \001(\tB\003\340A\002\022\026\n" + + "\trecursive\030\004 \001(\010B\003\340A\001\022\037\n\n" + + "request_id\030\005 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\367\001\n" + + "\023DeleteFolderRequest\0223\n" + + "\004name\030\006 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Folder\022$\n" + + "\027if_metageneration_match\030\003 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\004 \001(\003H\001\210\001\001\022\037\n\n" + + "request_id\030\005 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\214\002\n" + + "\022ListFoldersRequest\0225\n" + + "\006parent\030\001 \001(" + + "\tB%\340A\002\372A\037\022\035storage.googleapis.com/Folder\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\022\023\n" + + "\006prefix\030\004 \001(\tB\003\340A\001\022\026\n" + + "\tdelimiter\030\010 \001(\tB\003\340A\001\022 \n" + + "\023lexicographic_start\030\006 \001(\tB\003\340A\001\022\036\n" + + "\021lexicographic_end\030\007 \001(\tB\003\340A\001\022\037\n\n" + + "request_id\030\t \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"b\n" + + "\023ListFoldersResponse\0222\n" + + "\007folders\030\001 \003(\0132!.google.storage.control.v2.Folder\022\027\n" + + "\017next_page_token\030\002 \001(\t\"\233\002\n" + + "\023RenameFolderRequest\0223\n" + + "\004name\030\007 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Folder\022\"\n" + + "\025destination_folder_id\030\010 \001(\tB\003\340A\002\022$\n" + + "\027if_metageneration_match\030\004 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\005 \001(\003H\001\210\001\001\022\037\n\n" + + "request_id\030\006 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\212\002\n" + + "\034DeleteFolderRecursiveRequest\0223\n" + + "\004name\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Folder\022)\n" + + "\027if_metageneration_match\030\002 \001(\003B\003\340A\001H\000\210\001\001\022-\n" + + "\033if_metageneration_not_match\030\003" + + " \001(\003B\003\340A\001H\001\210\001\001\022\037\n\n" + + "request_id\030\004 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\232\002\n" + + "\"CommonLongRunningOperationMetadata\0224\n" + + "\013create_time\030\001 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0221\n" + + "\010end_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\003" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\021\n" + + "\004type\030\004 \001(\tB\003\340A\003\022#\n" + + "\026requested_cancellation\030\005 \001(\010B\003\340A\003\022\035\n" + + "\020progress_percent\030\006 \001(\005B\003\340A\003\"\247\001\n" + + "\024RenameFolderMetadata\022V\n" + + "\017common_metadata\030\001" + + " \001(\0132=.google.storage.control.v2.CommonLongRunningOperationMetadata\022\030\n" + + "\020source_folder_id\030\002 \001(\t\022\035\n" + + "\025destination_folder_id\030\003 \001(\t\"\212\001\n" + + "\035DeleteFolderRecursiveMetadata\022V\n" + + "\017common_metadata\030\001 \001(\0132=.google.stor" + + "age.control.v2.CommonLongRunningOperationMetadata\022\021\n" + + "\tfolder_id\030\002 \001(\t\"\370\003\n\r" + + "StorageLayout\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\022\025\n" + + "\010location\030\002 \001(\tB\003\340A\003\022\032\n\r" + + "location_type\030\003 \001(\tB\003\340A\003\022d\n" + + "\027custom_placement_config\030\004 \001(\0132>.google.s" + + "torage.control.v2.StorageLayout.CustomPlacementConfigB\003\340A\003\022c\n" + + "\026hierarchical_namespace\030\005 \001(\0132>.google.storage.control.v2.S" + + "torageLayout.HierarchicalNamespaceB\003\340A\003\032/\n" + + "\025CustomPlacementConfig\022\026\n" + + "\016data_locations\030\001 \003(\t\032(\n" + + "\025HierarchicalNamespace\022\017\n" + + "\007enabled\030\001 \001(\010:{\352Ax\n" + + "$storage.googleapis.com/StorageLayout\0221projects/{project}/bucket" + + "s/{bucket}/storageLayout*\016storageLayouts2\r" + + "storageLayout\"\206\001\n" + + "\027GetStorageLayoutRequest\022:\n" + + "\004name\030\001 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/StorageLayout\022\016\n" + + "\006prefix\030\002 \001(\t\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\277\002\n\r" + + "ManagedFolder\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\010\022\033\n" + + "\016metageneration\030\003 \001(\003B\003\340A\003\0224\n" + + "\013create_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\005" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003:\221\001\352A\215\001\n" + + "$storage.googleapis.com/ManagedFolder\022Fprojects/{project}/buckets/" + + "{bucket}/managedFolders/{managed_folder=**}*\016managedFolders2\r" + + "managedFolder\"\202\002\n" + + "\027GetManagedFolderRequest\022:\n" + + "\004name\030\006 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/ManagedFolder\022$\n" + + "\027if_metageneration_match\030\003 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\004 \001(\003H\001\210\001\001\022\037\n\n" + + "request_id\030\005 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\342\001\n" + + "\032CreateManagedFolderRequest\022<\n" + + "\006parent\030\001 \001(" + + "\tB,\340A\002\372A&\022$storage.googleapis.com/ManagedFolder\022E\n" + + "\016managed_folder\030\002" + + " \001(\0132(.google.storage.control.v2.ManagedFolderB\003\340A\002\022\036\n" + + "\021managed_folder_id\030\003 \001(\tB\003\340A\002\022\037\n\n" + + "request_id\030\004 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\236\002\n" + + "\032DeleteManagedFolderRequest\022:\n" + + "\004name\030\007 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/ManagedFolder\022$\n" + + "\027if_metageneration_match\030\003 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\004 \001(\003H\001\210\001\001\022\027\n" + + "\017allow_non_empty\030\005 \001(\010\022\037\n\n" + + "request_id\030\006 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\300\001\n" + + "\031ListManagedFoldersRequest\022<\n" + + "\006parent\030\001 \001(" + + "\tB,\340A\002\372A&\022$storage.googleapis.com/ManagedFolder\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\022\023\n" + + "\006prefix\030\004 \001(\tB\003\340A\001\022\037\n\n" + + "request_id\030\005 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"x\n" + + "\032ListManagedFoldersResponse\022A\n" + + "\017managed_folders\030\001" + + " \003(\0132(.google.storage.control.v2.ManagedFolder\022\027\n" + + "\017next_page_token\030\002 \001(\t\"\260\002\n" + + "\033CreateAnywhereCacheMetadata\022V\n" + + "\017common_metadata\030\001 \001(\0132=.google.sto" + + "rage.control.v2.CommonLongRunningOperationMetadata\022\036\n" + + "\021anywhere_cache_id\030\002 \001(\tH\000\210\001\001\022\021\n" + + "\004zone\030\006 \001(\tH\001\210\001\001\022+\n" + + "\003ttl\030\003 \001(\0132\031.google.protobuf.DurationH\002\210\001\001\022\035\n" + + "\020admission_policy\030\005 \001(\tH\003\210\001\001B\024\n" + + "\022_anywhere_cache_idB\007\n" + + "\005_zoneB\006\n" + + "\004_ttlB\023\n" + + "\021_admission_policy\"\260\002\n" + + "\033UpdateAnywhereCacheMetadata\022V\n" + + "\017common_metadata\030\001" + + " \001(\0132=.google.storage.control.v2.CommonLongRunningOperationMetadata\022\036\n" + + "\021anywhere_cache_id\030\002 \001(\tH\000\210\001\001\022\021\n" + + "\004zone\030\005 \001(\tH\001\210\001\001\022+\n" + + "\003ttl\030\003 \001(\0132\031.google.protobuf.DurationH\002\210\001\001\022\035\n" + + "\020admission_policy\030\004 \001(\tH\003\210\001\001B\024\n" + + "\022_anywhere_cache_idB\007\n" + + "\005_zoneB\006\n" + + "\004_ttlB\023\n" + + "\021_admission_policy\"\245\003\n\r" + + "AnywhereCache\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\005\022\021\n" + + "\004zone\030\n" + + " \001(\tB\003\340A\005\022&\n" + + "\003ttl\030\003 \001(\0132\031.google.protobuf.Duration\022\030\n" + + "\020admission_policy\030\t \001(\t\022\022\n" + + "\005state\030\005 \001(\tB\003\340A\003\0224\n" + + "\013create_time\030\006 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\007" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\033\n" + + "\016pending_update\030\010 \001(\010B\003\340A\003:\216\001\352A\212\001\n" + + "$storage.googleapis.com/AnywhereCache\022Cprojects/{" + + "project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}*\016anywhereCaches2\r" + + "anywhereCache\"\302\001\n" + + "\032CreateAnywhereCacheRequest\022<\n" + + "\006parent\030\001 \001(" + + "\tB,\340A\002\372A&\022$storage.googleapis.com/AnywhereCache\022E\n" + + "\016anywhere_cache\030\003" + + " \001(\0132(.google.storage.control.v2.AnywhereCacheB\003\340A\002\022\037\n\n" + + "request_id\030\004 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\272\001\n" + + "\032UpdateAnywhereCacheRequest\022E\n" + + "\016anywhere_cache\030\001" + + " \001(\0132(.google.storage.control.v2.AnywhereCacheB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"z\n" + + "\033DisableAnywhereCacheRequest\022:\n" + + "\004name\030\001 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/AnywhereCache\022\037\n\n" + + "request_id\030\002 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"x\n" + + "\031PauseAnywhereCacheRequest\022:\n" + + "\004name\030\001 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/AnywhereCache\022\037\n\n" + + "request_id\030\002 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"y\n" + + "\032ResumeAnywhereCacheRequest\022:\n" + + "\004name\030\001 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/AnywhereCache\022\037\n\n" + + "request_id\030\002 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"v\n" + + "\027GetAnywhereCacheRequest\022:\n" + + "\004name\030\001 \001(\tB,\340A\002\372A&\n" + + "$storage.googleapis.com/AnywhereCache\022\037\n\n" + + "request_id\030\002 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\241\001\n" + + "\031ListAnywhereCachesRequest\022<\n" + + "\006parent\030\001 \001(" + + "\tB,\340A\002\372A&\022$storage.googleapis.com/AnywhereCache\022\021\n" + + "\tpage_size\030\002 \001(\005\022\022\n\n" + + "page_token\030\003 \001(\t\022\037\n\n" + + "request_id\030\004 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"x\n" + + "\032ListAnywhereCachesResponse\022A\n" + + "\017anywhere_caches\030\001" + + " \003(\0132(.google.storage.control.v2.AnywhereCache\022\027\n" + + "\017next_page_token\030\002 \001(\t\"\216\016\n" + + "\022IntelligenceConfig\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\010\022X\n" + + "\016edition_config\030\002 \001(\0162;.go" + + "ogle.storage.control.v2.IntelligenceConfig.EditionConfigB\003\340A\001\0224\n" + + "\013update_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022I\n" + + "\006filter\030\004" + + " \001(\01324.google.storage.control.v2.IntelligenceConfig.FilterB\003\340A\001\022u\n" + + "\035effective_intelligence_config\030\005 \001(\0132I.google.s" + + "torage.control.v2.IntelligenceConfig.EffectiveIntelligenceConfigB\003\340A\003\022O\n" + + "\014trial_config\030\007" + + " \001(\01329.google.storage.control.v2.IntelligenceConfig.TrialConfig\032\374\004\n" + + "\006Filter\022v\n" + + " included_cloud_storage_locations\030\001 " + + "\001(\0132J.google.storage.control.v2.Intellig" + + "enceConfig.Filter.CloudStorageLocationsH\000\022v\n" + + " excluded_cloud_storage_locations\030\002 " + + "\001(\0132J.google.storage.control.v2.Intellig" + + "enceConfig.Filter.CloudStorageLocationsH\000\022r\n" + + "\036included_cloud_storage_buckets\030\003 \001(" + + "\0132H.google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsH\001\022r\n" + + "\036excluded_cloud_storage_buckets\030\004 \001(\0132H." + + "google.storage.control.v2.IntelligenceConfig.Filter.CloudStorageBucketsH\001\032/\n" + + "\025CloudStorageLocations\022\026\n" + + "\tlocations\030\001 \003(\tB\003\340A\001\0325\n" + + "\023CloudStorageBuckets\022\036\n" + + "\021bucket_id_regexes\030\001 \003(\tB\003\340A\001B\031\n" + + "\027cloud_storage_locationsB\027\n" + + "\025cloud_storage_buckets\032\212\002\n" + + "\033EffectiveIntelligenceConfig\022z\n" + + "\021effective_edition\030\001 \001(\0162Z.google.storage.control.v2.Int" + + "elligenceConfig.EffectiveIntelligenceConfig.EffectiveEditionB\003\340A\003\022 \n" + + "\023intelligence_config\030\002 \001(\tB\003\340A\003\"M\n" + + "\020EffectiveEdition\022!\n" + + "\035EFFECTIVE_EDITION_UNSPECIFIED\020\000\022\010\n" + + "\004NONE\020\001\022\014\n" + + "\010STANDARD\020\002\032C\n" + + "\013TrialConfig\0224\n" + + "\013expire_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\"c\n\r" + + "EditionConfig\022\036\n" + + "\032EDITION_CONFIG_UNSPECIFIED\020\000\022\013\n" + + "\007INHERIT\020\001\022\014\n" + + "\010DISABLED\020\002\022\014\n" + + "\010STANDARD\020\003\022\t\n" + + "\005TRIAL\020\005:\213\002\352A\207\002\n" + + ")storage.googleapis.com/IntelligenceConfig\0228" + + "folders/{folder}/locations/{location}/intelligenceConfig\022;organizations/{org}/lo" + + "cations/{location}/intelligenceConfig\022:projects/{project}/locations/{location}/i" + + "ntelligenceConfig*\023intelligenceConfigs2\022intelligenceConfig\"\325\001\n" + + "+UpdateOrganizationIntelligenceConfigRequest\022O\n" + + "\023intelligence_config\030\001" + + " \001(\0132-.google.storage.control.v2.IntelligenceConfigB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\317\001\n" + + "%UpdateFolderIntelligenceConfigRequest\022O\n" + + "\023intelligence_config\030\001" + + " \001(\0132-.google.storage.control.v2.IntelligenceConfigB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\320\001\n" + + "&UpdateProjectIntelligenceConfigRequest\022O\n" + + "\023intelligence_config\030\001 \001(\0132-.g" + + "oogle.storage.control.v2.IntelligenceConfigB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"k\n" + + "(GetOrganizationIntelligenceConfigRequest\022?\n" + + "\004name\030\001 \001(\tB1\340A\002\372A+\n" + + ")storage.googleapis.com/IntelligenceConfig\"e\n" + + "\"GetFolderIntelligenceConfigRequest\022?\n" + + "\004name\030\001 \001(\tB1\340A\002\372A+\n" + + ")storage.googleapis.com/IntelligenceConfig\"f\n" + + "#GetProjectIntelligenceConfigRequest\022?\n" + + "\004name\030\001 \001(\tB1\340A\002\372A+\n" + + ")storage.googleapis.com/IntelligenceConfig2\322,\n" + + "\016StorageControl\022\232\001\n" + + "\014CreateFolder\022..google.storage.control.v2.Crea" + + "teFolderRequest\032!.google.storage.control" + + ".v2.Folder\"7\332A\027parent,folder,folder_id\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\217\001\n" + + "\014DeleteFolder\022..google.storage.control.v2.DeleteF" + + "olderRequest\032\026.google.protobuf.Empty\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\224\001\n" + + "\tGetFolder\022+.google.storage.control.v2.GetFolderRequest\032!.googl" + + "e.storage.control.v2.Folder\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\224\001\n" + + "\013ListFolders\022-.google.storage.control.v2.ListFoldersRequest\032..google.sto" + + "rage.control.v2.ListFoldersResponse\"&\332A\006parent\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\315\001\n" + + "\014RenameFolder\022..google.storage.control.v2" + + ".RenameFolderRequest\032\035.google.longrunning.Operation\"n\312A\036\n" + + "\006Folder\022\024RenameFolderMe" + + "tadata\332A\032name,destination_folder_id\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\341\001\n" + + "\025DeleteFolderRecursive\0227.google.storage.control.v2.DeleteFolderRecursiveR" + + "equest\032\035.google.longrunning.Operation\"p\312A6\n" + + "\025google.protobuf.Empty\022\035DeleteFolderRecursiveMetadata\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\251\001\n" + + "\020GetStorageLayout\0222.google.storage.control.v2" + + ".GetStorageLayoutRequest\032(.google.storag" + + "e.control.v2.StorageLayout\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\277\001\n" + + "\023CreateManagedFolder\0225.google.storage.control.v2.CreateManagedFolderReque" + + "st\032(.google.storage.control.v2.ManagedFo" + + "lder\"G\332A\'parent,managed_folder,managed_folder_id\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\235\001\n" + + "\023DeleteManagedFolder\0225.google.storage.c" + + "ontrol.v2.DeleteManagedFolderRequest\032\026.google.protobuf.Empty\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\251\001\n" + + "\020GetManagedFolder\0222.google.storage.cont" + + "rol.v2.GetManagedFolderRequest\032(.google." + + "storage.control.v2.ManagedFolder\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\251\001\n" + + "\022ListManagedFolders\0224.google.storage.control.v2.ListManagedFoldersR" + + "equest\0325.google.storage.control.v2.ListM" + + "anagedFoldersResponse\"&\332A\006parent\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\321\001\n" + + "\023CreateAnywhereCache\0225.google.storage.control.v2.Create" + + "AnywhereCacheRequest\032\035.google.longrunning.Operation\"d\312A,\n\r" + + "AnywhereCache\022\033CreateA" + + "nywhereCacheMetadata\332A\025parent,anywhere_cache\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\371\001\n" + + "\023UpdateAnywhereCache\0225.google.storage.contr" + + "ol.v2.UpdateAnywhereCacheRequest\032\035.google.longrunning.Operation\"\213\001\312A,\n\r" + + "AnywhereC" + + "ache\022\033UpdateAnywhereCacheMetadata\332A\032anywhere_cache,update_mask\212\323\344\223\0029\0227\n" + + "\023anywhere_cache.name\022 {bucket=projects/*/buckets/*}/**\022\261\001\n" + + "\024DisableAnywhereCache\0226.google.storage.control.v2.DisableAnywhereCacheR" + + "equest\032(.google.storage.control.v2.AnywhereCache\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\255\001\n" + + "\022PauseAnywhereCache\0224.google.storage.control.v2.Pau" + + "seAnywhereCacheRequest\032(.google.storage." + + "control.v2.AnywhereCache\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\257\001\n" + + "\023ResumeAnywhereCache\0225.google.storage.control.v2.ResumeAnywhereCacheRequest" + + "\032(.google.storage.control.v2.AnywhereCache\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\251\001\n" + + "\020GetAnywhereCache\0222.google.storage.control.v2.GetAnywhere" + + "CacheRequest\032(.google.storage.control.v2.AnywhereCache\"7\332A\004name\212\323\344\223\002*\022(\n" + + "\004name\022 {bucket=projects/*/buckets/*}/**\022\251\001\n" + + "\022ListAnywhereCaches\0224.google.storage.control." + + "v2.ListAnywhereCachesRequest\0325.google.st" + + "orage.control.v2.ListAnywhereCachesResponse\"&\332A\006parent\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\322\001\n" + + "\034GetProjectIntelligenceConfig\022>.google.storage.control.v2.GetProjectIntel" + + "ligenceConfigRequest\032-.google.storage.co" + + "ntrol.v2.IntelligenceConfig\"C\332A\004name\202\323\344\223" + + "\0026\0224/v2/{name=projects/*/locations/*/intelligenceConfig}\022\235\002\n" + + "\037UpdateProjectIntelligenceConfig\022A.google.storage.control.v2" + + ".UpdateProjectIntelligenceConfigRequest\032-.google.storage.control.v2.Intelligence" + + "Config\"\207\001\332A\037intelligence_config,update_m" + + "ask\202\323\344\223\002_2H/v2/{intelligence_config.name" + + "=projects/*/locations/*/intelligenceConfig}:\023intelligence_config\022\317\001\n" + + "\033GetFolderIntelligenceConfig\022=.google.storage.contro" + + "l.v2.GetFolderIntelligenceConfigRequest\032-.google.storage.control.v2.Intelligence" + + "Config\"B\332A\004name\202\323\344\223\0025\0223/v2/{name=folders/*/locations/*/intelligenceConfig}\022\232\002\n" + + "\036UpdateFolderIntelligenceConfig\022@.google.s" + + "torage.control.v2.UpdateFolderIntelligenceConfigRequest\032-.google.storage.control" + + ".v2.IntelligenceConfig\"\206\001\332A\037intelligence" + + "_config,update_mask\202\323\344\223\002^2G/v2/{intellig" + + "ence_config.name=folders/*/locations/*/i" + + "ntelligenceConfig}:\023intelligence_config\022\341\001\n" + + "!GetOrganizationIntelligenceConfig\022C.google.storage.control.v2.GetOrganizatio" + + "nIntelligenceConfigRequest\032-.google.stor" + + "age.control.v2.IntelligenceConfig\"H\332A\004na" + + "me\202\323\344\223\002;\0229/v2/{name=organizations/*/locations/*/intelligenceConfig}\022\254\002\n" + + "$UpdateOrganizationIntelligenceConfig\022F.google.st" + + "orage.control.v2.UpdateOrganizationIntelligenceConfigRequest\032-.google.storage.co" + + "ntrol.v2.IntelligenceConfig\"\214\001\332A\037intelli" + + "gence_config,update_mask\202\323\344\223\002d2M/v2/{int" + + "elligence_config.name=organizations/*/lo" + + "cations/*/intelligenceConfig}:\023intelligence_config\022\243\001\n" + + "\014GetIamPolicy\022\".google.iam" + + ".v1.GetIamPolicyRequest\032\025.google.iam.v1.Policy\"X\332A\010resource\212\323\344\223\002G\022\027\n" + + "\010resource\022\013{bucket=**}\022,\n" + + "\010resource\022 {bucket=projects/*/buckets/*}/**\022\252\001\n" + + "\014SetIamPolicy\022\".google.iam.v1.SetIamPolicyRequest\032\025.google.i" + + "am.v1.Policy\"_\332A\017resource,policy\212\323\344\223\002G\022\027\n" + + "\010resource\022\013{bucket=**}\022,\n" + + "\010resource\022 {bucket=projects/*/buckets/*}/**\022\226\002\n" + + "\022TestIamPermissions\022(.google.iam.v1.TestIamPerm" + + "issionsRequest\032).google.iam.v1.TestIamPe" + + "rmissionsResponse\"\252\001\332A\024resource,permissions\212\323\344\223\002\214\001\022\027\n" + + "\010resource\022\013{bucket=**}\0224\n" + + "\010resource\022({bucket=projects/*/buckets/*}/objects/**\022;\n" + + "\010resource\022/{bucket=projects/" + + "*/buckets/*}/managedFolders/**\032\247\002\312A\026stor" + + "age.googleapis.com\322A\212\002https://www.google" + + "apis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read" + + "-only,https://www.googleapis.com/auth/devstorage.full_control,https://www.google" + + "apis.com/auth/devstorage.read_only,https" + + "://www.googleapis.com/auth/devstorage.read_writeB\246\002\n" + + "\035com.google.storage.control.v2B\023StorageControlProtoP\001Z=cloud.google." + + "com/go/storage/control/apiv2/controlpb;c" + + "ontrolpb\252\002\037Google.Cloud.Storage.Control.", + "V2\312\002\037Google\\Cloud\\Storage\\Control\\V2\352\002#G" + + "oogle::Cloud::Storage::Control::V2\352AD\n\035s" + + "torage.googleapis.com/Bucket\022#projects/{" + + "project}/buckets/{bucket}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.FieldInfoProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.api.RoutingProto.getDescriptor(), + com.google.iam.v1.IamPolicyProto.getDescriptor(), + com.google.iam.v1.PolicyProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_storage_control_v2_PendingRenameInfo_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_storage_control_v2_PendingRenameInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_PendingRenameInfo_descriptor, + new java.lang.String[] { + "Operation", + }); + internal_static_google_storage_control_v2_Folder_descriptor = getDescriptor().getMessageType(1); + internal_static_google_storage_control_v2_Folder_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_Folder_descriptor, + new java.lang.String[] { + "Name", "Metageneration", "CreateTime", "UpdateTime", "PendingRenameInfo", + }); + internal_static_google_storage_control_v2_GetFolderRequest_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_storage_control_v2_GetFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetFolderRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", "RequestId", + }); + internal_static_google_storage_control_v2_CreateFolderRequest_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_storage_control_v2_CreateFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_CreateFolderRequest_descriptor, + new java.lang.String[] { + "Parent", "Folder", "FolderId", "Recursive", "RequestId", + }); + internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_storage_control_v2_DeleteFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_DeleteFolderRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", "RequestId", + }); + internal_static_google_storage_control_v2_ListFoldersRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_storage_control_v2_ListFoldersRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListFoldersRequest_descriptor, + new java.lang.String[] { + "Parent", + "PageSize", + "PageToken", + "Prefix", + "Delimiter", + "LexicographicStart", + "LexicographicEnd", + "RequestId", + }); + internal_static_google_storage_control_v2_ListFoldersResponse_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_storage_control_v2_ListFoldersResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListFoldersResponse_descriptor, + new java.lang.String[] { + "Folders", "NextPageToken", + }); + internal_static_google_storage_control_v2_RenameFolderRequest_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_storage_control_v2_RenameFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_RenameFolderRequest_descriptor, + new java.lang.String[] { + "Name", + "DestinationFolderId", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "RequestId", + }); + internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_DeleteFolderRecursiveRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", "RequestId", + }); + internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_CommonLongRunningOperationMetadata_descriptor, + new java.lang.String[] { + "CreateTime", + "EndTime", + "UpdateTime", + "Type", + "RequestedCancellation", + "ProgressPercent", + }); + internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_storage_control_v2_RenameFolderMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_RenameFolderMetadata_descriptor, + new java.lang.String[] { + "CommonMetadata", "SourceFolderId", "DestinationFolderId", + }); + internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_DeleteFolderRecursiveMetadata_descriptor, + new java.lang.String[] { + "CommonMetadata", "FolderId", + }); + internal_static_google_storage_control_v2_StorageLayout_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_storage_control_v2_StorageLayout_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_StorageLayout_descriptor, + new java.lang.String[] { + "Name", "Location", "LocationType", "CustomPlacementConfig", "HierarchicalNamespace", + }); + internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor = + internal_static_google_storage_control_v2_StorageLayout_descriptor.getNestedType(0); + internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor, + new java.lang.String[] { + "DataLocations", + }); + internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor = + internal_static_google_storage_control_v2_StorageLayout_descriptor.getNestedType(1); + internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor, + new java.lang.String[] { + "Enabled", + }); + internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_storage_control_v2_GetStorageLayoutRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetStorageLayoutRequest_descriptor, + new java.lang.String[] { + "Name", "Prefix", "RequestId", + }); + internal_static_google_storage_control_v2_ManagedFolder_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_storage_control_v2_ManagedFolder_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ManagedFolder_descriptor, + new java.lang.String[] { + "Name", "Metageneration", "CreateTime", "UpdateTime", + }); + internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_storage_control_v2_GetManagedFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetManagedFolderRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", "RequestId", + }); + internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_storage_control_v2_CreateManagedFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_CreateManagedFolderRequest_descriptor, + new java.lang.String[] { + "Parent", "ManagedFolder", "ManagedFolderId", "RequestId", + }); + internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_storage_control_v2_DeleteManagedFolderRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_DeleteManagedFolderRequest_descriptor, + new java.lang.String[] { + "Name", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "AllowNonEmpty", + "RequestId", + }); + internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_storage_control_v2_ListManagedFoldersRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListManagedFoldersRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "Prefix", "RequestId", + }); + internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_storage_control_v2_ListManagedFoldersResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListManagedFoldersResponse_descriptor, + new java.lang.String[] { + "ManagedFolders", "NextPageToken", + }); + internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_CreateAnywhereCacheMetadata_descriptor, + new java.lang.String[] { + "CommonMetadata", "AnywhereCacheId", "Zone", "Ttl", "AdmissionPolicy", + }); + internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor, + new java.lang.String[] { + "CommonMetadata", "AnywhereCacheId", "Zone", "Ttl", "AdmissionPolicy", + }); + internal_static_google_storage_control_v2_AnywhereCache_descriptor = + getDescriptor().getMessageType(22); + internal_static_google_storage_control_v2_AnywhereCache_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_AnywhereCache_descriptor, + new java.lang.String[] { + "Name", + "Zone", + "Ttl", + "AdmissionPolicy", + "State", + "CreateTime", + "UpdateTime", + "PendingUpdate", + }); + internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_CreateAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "Parent", "AnywhereCache", "RequestId", + }); + internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "AnywhereCache", "UpdateMask", "RequestId", + }); + internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_DisableAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "Name", "RequestId", + }); + internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_PauseAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "Name", "RequestId", + }); + internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ResumeAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "Name", "RequestId", + }); + internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_storage_control_v2_GetAnywhereCacheRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetAnywhereCacheRequest_descriptor, + new java.lang.String[] { + "Name", "RequestId", + }); + internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor = + getDescriptor().getMessageType(29); + internal_static_google_storage_control_v2_ListAnywhereCachesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListAnywhereCachesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "RequestId", + }); + internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor = + getDescriptor().getMessageType(30); + internal_static_google_storage_control_v2_ListAnywhereCachesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_ListAnywhereCachesResponse_descriptor, + new java.lang.String[] { + "AnywhereCaches", "NextPageToken", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor = + getDescriptor().getMessageType(31); + internal_static_google_storage_control_v2_IntelligenceConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor, + new java.lang.String[] { + "Name", + "EditionConfig", + "UpdateTime", + "Filter", + "EffectiveIntelligenceConfig", + "TrialConfig", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor = + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor.getNestedType(0); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor, + new java.lang.String[] { + "IncludedCloudStorageLocations", + "ExcludedCloudStorageLocations", + "IncludedCloudStorageBuckets", + "ExcludedCloudStorageBuckets", + "CloudStorageLocations", + "CloudStorageBuckets", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor = + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor + .getNestedType(0); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageLocations_descriptor, + new java.lang.String[] { + "Locations", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor = + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_descriptor + .getNestedType(1); + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_Filter_CloudStorageBuckets_descriptor, + new java.lang.String[] { + "BucketIdRegexes", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor = + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor.getNestedType(1); + internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_EffectiveIntelligenceConfig_descriptor, + new java.lang.String[] { + "EffectiveEdition", "IntelligenceConfig", + }); + internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor = + internal_static_google_storage_control_v2_IntelligenceConfig_descriptor.getNestedType(2); + internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_IntelligenceConfig_TrialConfig_descriptor, + new java.lang.String[] { + "ExpireTime", + }); + internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(32); + internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "IntelligenceConfig", "UpdateMask", "RequestId", + }); + internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(33); + internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "IntelligenceConfig", "UpdateMask", "RequestId", + }); + internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(34); + internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "IntelligenceConfig", "UpdateMask", "RequestId", + }); + internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(35); + internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetOrganizationIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(36); + internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetFolderIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor = + getDescriptor().getMessageType(37); + internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_control_v2_GetProjectIntelligenceConfigRequest_descriptor, + new java.lang.String[] { + "Name", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.FieldInfoProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.api.RoutingProto.getDescriptor(); + com.google.iam.v1.IamPolicyProto.getDescriptor(); + com.google.iam.v1.PolicyProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.FieldInfoProto.fieldInfo); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.api.RoutingProto.routing); + registry.add(com.google.longrunning.OperationsProto.operationInfo); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayout.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayout.java new file mode 100644 index 000000000000..dbd90edf88bb --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayout.java @@ -0,0 +1,2927 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * The storage layout configuration of a bucket.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout} + */ +@com.google.protobuf.Generated +public final class StorageLayout extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.StorageLayout) + StorageLayoutOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StorageLayout"); + } + + // Use StorageLayout.newBuilder() to construct. + private StorageLayout(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StorageLayout() { + name_ = ""; + location_ = ""; + locationType_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.class, + com.google.storage.control.v2.StorageLayout.Builder.class); + } + + public interface CustomPlacementConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.StorageLayout.CustomPlacementConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @return A list containing the dataLocations. + */ + java.util.List getDataLocationsList(); + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @return The count of dataLocations. + */ + int getDataLocationsCount(); + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + java.lang.String getDataLocations(int index); + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + com.google.protobuf.ByteString getDataLocationsBytes(int index); + } + + /** + * + * + *
+   * Configuration for Custom Dual Regions.  It should specify precisely two
+   * eligible regions within the same Multiregion. More information on regions
+   * may be found [here](https://cloud.google.com/storage/docs/locations).
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout.CustomPlacementConfig} + */ + public static final class CustomPlacementConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.StorageLayout.CustomPlacementConfig) + CustomPlacementConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CustomPlacementConfig"); + } + + // Use CustomPlacementConfig.newBuilder() to construct. + private CustomPlacementConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CustomPlacementConfig() { + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.class, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder.class); + } + + public static final int DATA_LOCATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList dataLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @return A list containing the dataLocations. + */ + public com.google.protobuf.ProtocolStringList getDataLocationsList() { + return dataLocations_; + } + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @return The count of dataLocations. + */ + public int getDataLocationsCount() { + return dataLocations_.size(); + } + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + public java.lang.String getDataLocations(int index) { + return dataLocations_.get(index); + } + + /** + * + * + *
+     * List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + public com.google.protobuf.ByteString getDataLocationsBytes(int index) { + return dataLocations_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < dataLocations_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, dataLocations_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < dataLocations_.size(); i++) { + dataSize += computeStringSizeNoTag(dataLocations_.getRaw(i)); + } + size += dataSize; + size += 1 * getDataLocationsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.StorageLayout.CustomPlacementConfig)) { + return super.equals(obj); + } + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig other = + (com.google.storage.control.v2.StorageLayout.CustomPlacementConfig) obj; + + if (!getDataLocationsList().equals(other.getDataLocationsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDataLocationsCount() > 0) { + hash = (37 * hash) + DATA_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getDataLocationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Configuration for Custom Dual Regions.  It should specify precisely two
+     * eligible regions within the same Multiregion. More information on regions
+     * may be found [here](https://cloud.google.com/storage/docs/locations).
+     * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout.CustomPlacementConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.StorageLayout.CustomPlacementConfig) + com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.class, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + getDefaultInstanceForType() { + return com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig build() { + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig buildPartial() { + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig result = + new com.google.storage.control.v2.StorageLayout.CustomPlacementConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + dataLocations_.makeImmutable(); + result.dataLocations_ = dataLocations_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.StorageLayout.CustomPlacementConfig) { + return mergeFrom( + (com.google.storage.control.v2.StorageLayout.CustomPlacementConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig other) { + if (other + == com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + .getDefaultInstance()) return this; + if (!other.dataLocations_.isEmpty()) { + if (dataLocations_.isEmpty()) { + dataLocations_ = other.dataLocations_; + bitField0_ |= 0x00000001; + } else { + ensureDataLocationsIsMutable(); + dataLocations_.addAll(other.dataLocations_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureDataLocationsIsMutable(); + dataLocations_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList dataLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureDataLocationsIsMutable() { + if (!dataLocations_.isModifiable()) { + dataLocations_ = new com.google.protobuf.LazyStringArrayList(dataLocations_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @return A list containing the dataLocations. + */ + public com.google.protobuf.ProtocolStringList getDataLocationsList() { + dataLocations_.makeImmutable(); + return dataLocations_; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @return The count of dataLocations. + */ + public int getDataLocationsCount() { + return dataLocations_.size(); + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + public java.lang.String getDataLocations(int index) { + return dataLocations_.get(index); + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + public com.google.protobuf.ByteString getDataLocationsBytes(int index) { + return dataLocations_.getByteString(index); + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param index The index to set the value at. + * @param value The dataLocations to set. + * @return This builder for chaining. + */ + public Builder setDataLocations(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDataLocationsIsMutable(); + dataLocations_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param value The dataLocations to add. + * @return This builder for chaining. + */ + public Builder addDataLocations(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDataLocationsIsMutable(); + dataLocations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param values The dataLocations to add. + * @return This builder for chaining. + */ + public Builder addAllDataLocations(java.lang.Iterable values) { + ensureDataLocationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, dataLocations_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @return This builder for chaining. + */ + public Builder clearDataLocations() { + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1; + * + * @param value The bytes of the dataLocations to add. + * @return This builder for chaining. + */ + public Builder addDataLocationsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureDataLocationsIsMutable(); + dataLocations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.StorageLayout.CustomPlacementConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.StorageLayout.CustomPlacementConfig) + private static final com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.StorageLayout.CustomPlacementConfig(); + } + + public static com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CustomPlacementConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface HierarchicalNamespaceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.StorageLayout.HierarchicalNamespace) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Enables the hierarchical namespace feature.
+     * 
+ * + * bool enabled = 1; + * + * @return The enabled. + */ + boolean getEnabled(); + } + + /** + * + * + *
+   * Configuration for a bucket's hierarchical namespace feature.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout.HierarchicalNamespace} + */ + public static final class HierarchicalNamespace extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.StorageLayout.HierarchicalNamespace) + HierarchicalNamespaceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HierarchicalNamespace"); + } + + // Use HierarchicalNamespace.newBuilder() to construct. + private HierarchicalNamespace(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HierarchicalNamespace() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.class, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder.class); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+     * Enables the hierarchical namespace feature.
+     * 
+ * + * bool enabled = 1; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.StorageLayout.HierarchicalNamespace)) { + return super.equals(obj); + } + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace other = + (com.google.storage.control.v2.StorageLayout.HierarchicalNamespace) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Configuration for a bucket's hierarchical namespace feature.
+     * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout.HierarchicalNamespace} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.StorageLayout.HierarchicalNamespace) + com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.class, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + getDefaultInstanceForType() { + return com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace build() { + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace buildPartial() { + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace result = + new com.google.storage.control.v2.StorageLayout.HierarchicalNamespace(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.StorageLayout.HierarchicalNamespace) { + return mergeFrom( + (com.google.storage.control.v2.StorageLayout.HierarchicalNamespace) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace other) { + if (other + == com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + .getDefaultInstance()) return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+       * Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+       * Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1; + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1; + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.StorageLayout.HierarchicalNamespace) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.StorageLayout.HierarchicalNamespace) + private static final com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.StorageLayout.HierarchicalNamespace(); + } + + public static com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HierarchicalNamespace parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
+   * Output only. The location of the bucket.
+   * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The location of the bucket.
+   * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_TYPE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationType_ = ""; + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + @java.lang.Override + public java.lang.String getLocationType() { + java.lang.Object ref = locationType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationType_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationTypeBytes() { + java.lang.Object ref = locationType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CUSTOM_PLACEMENT_CONFIG_FIELD_NUMBER = 4; + private com.google.storage.control.v2.StorageLayout.CustomPlacementConfig customPlacementConfig_; + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + @java.lang.Override + public boolean hasCustomPlacementConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The customPlacementConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + getCustomPlacementConfig() { + return customPlacementConfig_ == null + ? com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder + getCustomPlacementConfigOrBuilder() { + return customPlacementConfig_ == null + ? com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + + public static final int HIERARCHICAL_NAMESPACE_FIELD_NUMBER = 5; + private com.google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchicalNamespace_; + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + @java.lang.Override + public boolean hasHierarchicalNamespace() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hierarchicalNamespace. + */ + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + getHierarchicalNamespace() { + return hierarchicalNamespace_ == null + ? com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder + getHierarchicalNamespaceOrBuilder() { + return hierarchicalNamespace_ == null + ? com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, location_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, locationType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getCustomPlacementConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getHierarchicalNamespace()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, location_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, locationType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCustomPlacementConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(5, getHierarchicalNamespace()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.StorageLayout)) { + return super.equals(obj); + } + com.google.storage.control.v2.StorageLayout other = + (com.google.storage.control.v2.StorageLayout) obj; + + if (!getName().equals(other.getName())) return false; + if (!getLocation().equals(other.getLocation())) return false; + if (!getLocationType().equals(other.getLocationType())) return false; + if (hasCustomPlacementConfig() != other.hasCustomPlacementConfig()) return false; + if (hasCustomPlacementConfig()) { + if (!getCustomPlacementConfig().equals(other.getCustomPlacementConfig())) return false; + } + if (hasHierarchicalNamespace() != other.hasHierarchicalNamespace()) return false; + if (hasHierarchicalNamespace()) { + if (!getHierarchicalNamespace().equals(other.getHierarchicalNamespace())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (37 * hash) + LOCATION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getLocationType().hashCode(); + if (hasCustomPlacementConfig()) { + hash = (37 * hash) + CUSTOM_PLACEMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCustomPlacementConfig().hashCode(); + } + if (hasHierarchicalNamespace()) { + hash = (37 * hash) + HIERARCHICAL_NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getHierarchicalNamespace().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.StorageLayout parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.StorageLayout parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.control.v2.StorageLayout prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The storage layout configuration of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.StorageLayout} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.StorageLayout) + com.google.storage.control.v2.StorageLayoutOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.StorageLayout.class, + com.google.storage.control.v2.StorageLayout.Builder.class); + } + + // Construct using com.google.storage.control.v2.StorageLayout.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCustomPlacementConfigFieldBuilder(); + internalGetHierarchicalNamespaceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + location_ = ""; + locationType_ = ""; + customPlacementConfig_ = null; + if (customPlacementConfigBuilder_ != null) { + customPlacementConfigBuilder_.dispose(); + customPlacementConfigBuilder_ = null; + } + hierarchicalNamespace_ = null; + if (hierarchicalNamespaceBuilder_ != null) { + hierarchicalNamespaceBuilder_.dispose(); + hierarchicalNamespaceBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_StorageLayout_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout getDefaultInstanceForType() { + return com.google.storage.control.v2.StorageLayout.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout build() { + com.google.storage.control.v2.StorageLayout result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout buildPartial() { + com.google.storage.control.v2.StorageLayout result = + new com.google.storage.control.v2.StorageLayout(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.StorageLayout result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.location_ = location_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.locationType_ = locationType_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.customPlacementConfig_ = + customPlacementConfigBuilder_ == null + ? customPlacementConfig_ + : customPlacementConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.hierarchicalNamespace_ = + hierarchicalNamespaceBuilder_ == null + ? hierarchicalNamespace_ + : hierarchicalNamespaceBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.StorageLayout) { + return mergeFrom((com.google.storage.control.v2.StorageLayout) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.StorageLayout other) { + if (other == com.google.storage.control.v2.StorageLayout.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getLocationType().isEmpty()) { + locationType_ = other.locationType_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasCustomPlacementConfig()) { + mergeCustomPlacementConfig(other.getCustomPlacementConfig()); + } + if (other.hasHierarchicalNamespace()) { + mergeHierarchicalNamespace(other.getHierarchicalNamespace()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + locationType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCustomPlacementConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetHierarchicalNamespaceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The name of the StorageLayout resource.
+     * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object location_ = ""; + + /** + * + * + *
+     * Output only. The location of the bucket.
+     * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The location of the bucket.
+     * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The location of the bucket.
+     * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location of the bucket.
+     * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location of the bucket.
+     * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object locationType_ = ""; + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + public java.lang.String getLocationType() { + java.lang.Object ref = locationType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + public com.google.protobuf.ByteString getLocationTypeBytes() { + java.lang.Object ref = locationType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The locationType to set. + * @return This builder for chaining. + */ + public Builder setLocationType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationType_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearLocationType() { + locationType_ = getDefaultInstance().getLocationType(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for locationType to set. + * @return This builder for chaining. + */ + public Builder setLocationTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationType_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + customPlacementConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder> + customPlacementConfigBuilder_; + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + public boolean hasCustomPlacementConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The customPlacementConfig. + */ + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + getCustomPlacementConfig() { + if (customPlacementConfigBuilder_ == null) { + return customPlacementConfig_ == null + ? com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } else { + return customPlacementConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCustomPlacementConfig( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig value) { + if (customPlacementConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customPlacementConfig_ = value; + } else { + customPlacementConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCustomPlacementConfig( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder builderForValue) { + if (customPlacementConfigBuilder_ == null) { + customPlacementConfig_ = builderForValue.build(); + } else { + customPlacementConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCustomPlacementConfig( + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig value) { + if (customPlacementConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && customPlacementConfig_ != null + && customPlacementConfig_ + != com.google.storage.control.v2.StorageLayout.CustomPlacementConfig + .getDefaultInstance()) { + getCustomPlacementConfigBuilder().mergeFrom(value); + } else { + customPlacementConfig_ = value; + } + } else { + customPlacementConfigBuilder_.mergeFrom(value); + } + if (customPlacementConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCustomPlacementConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + customPlacementConfig_ = null; + if (customPlacementConfigBuilder_ != null) { + customPlacementConfigBuilder_.dispose(); + customPlacementConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder + getCustomPlacementConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCustomPlacementConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder + getCustomPlacementConfigOrBuilder() { + if (customPlacementConfigBuilder_ != null) { + return customPlacementConfigBuilder_.getMessageOrBuilder(); + } else { + return customPlacementConfig_ == null + ? com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + } + + /** + * + * + *
+     * Output only. The data placement configuration for custom dual region. If
+     * there is no configuration, this is not a custom dual region bucket.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder> + internalGetCustomPlacementConfigFieldBuilder() { + if (customPlacementConfigBuilder_ == null) { + customPlacementConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig.Builder, + com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder>( + getCustomPlacementConfig(), getParentForChildren(), isClean()); + customPlacementConfig_ = null; + } + return customPlacementConfigBuilder_; + } + + private com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + hierarchicalNamespace_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder> + hierarchicalNamespaceBuilder_; + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + public boolean hasHierarchicalNamespace() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hierarchicalNamespace. + */ + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + getHierarchicalNamespace() { + if (hierarchicalNamespaceBuilder_ == null) { + return hierarchicalNamespace_ == null + ? com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } else { + return hierarchicalNamespaceBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setHierarchicalNamespace( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace value) { + if (hierarchicalNamespaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hierarchicalNamespace_ = value; + } else { + hierarchicalNamespaceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setHierarchicalNamespace( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder builderForValue) { + if (hierarchicalNamespaceBuilder_ == null) { + hierarchicalNamespace_ = builderForValue.build(); + } else { + hierarchicalNamespaceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeHierarchicalNamespace( + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace value) { + if (hierarchicalNamespaceBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && hierarchicalNamespace_ != null + && hierarchicalNamespace_ + != com.google.storage.control.v2.StorageLayout.HierarchicalNamespace + .getDefaultInstance()) { + getHierarchicalNamespaceBuilder().mergeFrom(value); + } else { + hierarchicalNamespace_ = value; + } + } else { + hierarchicalNamespaceBuilder_.mergeFrom(value); + } + if (hierarchicalNamespace_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearHierarchicalNamespace() { + bitField0_ = (bitField0_ & ~0x00000010); + hierarchicalNamespace_ = null; + if (hierarchicalNamespaceBuilder_ != null) { + hierarchicalNamespaceBuilder_.dispose(); + hierarchicalNamespaceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder + getHierarchicalNamespaceBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetHierarchicalNamespaceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder + getHierarchicalNamespaceOrBuilder() { + if (hierarchicalNamespaceBuilder_ != null) { + return hierarchicalNamespaceBuilder_.getMessageOrBuilder(); + } else { + return hierarchicalNamespace_ == null + ? com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + } + + /** + * + * + *
+     * Output only. The bucket's hierarchical namespace configuration. If there is
+     * no configuration, the hierarchical namespace is disabled.
+     * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder> + internalGetHierarchicalNamespaceFieldBuilder() { + if (hierarchicalNamespaceBuilder_ == null) { + hierarchicalNamespaceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace.Builder, + com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder>( + getHierarchicalNamespace(), getParentForChildren(), isClean()); + hierarchicalNamespace_ = null; + } + return hierarchicalNamespaceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.StorageLayout) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.StorageLayout) + private static final com.google.storage.control.v2.StorageLayout DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.StorageLayout(); + } + + public static com.google.storage.control.v2.StorageLayout getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageLayout parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.StorageLayout getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutName.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutName.java new file mode 100644 index 000000000000..5039f7ad4dce --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutName.java @@ -0,0 +1,191 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.control.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class StorageLayoutName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET = + PathTemplate.createWithoutUrlEncoding("projects/{project}/buckets/{bucket}/storageLayout"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + + @Deprecated + protected StorageLayoutName() { + project = null; + bucket = null; + } + + private StorageLayoutName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static StorageLayoutName of(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build(); + } + + public static String format(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build().toString(); + } + + public static StorageLayoutName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET.validatedMatch( + formattedString, "StorageLayoutName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (StorageLayoutName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET.instantiate("project", project, "bucket", bucket); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + StorageLayoutName that = ((StorageLayoutName) o); + return Objects.equals(this.project, that.project) && Objects.equals(this.bucket, that.bucket); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}/storageLayout. */ + public static class Builder { + private String project; + private String bucket; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + private Builder(StorageLayoutName storageLayoutName) { + this.project = storageLayoutName.project; + this.bucket = storageLayoutName.bucket; + } + + public StorageLayoutName build() { + return new StorageLayoutName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutOrBuilder.java new file mode 100644 index 000000000000..a5c90592ca26 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/StorageLayoutOrBuilder.java @@ -0,0 +1,204 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface StorageLayoutOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.StorageLayout) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. The name of the StorageLayout resource.
+   * Format: `projects/{project}/buckets/{bucket}/storageLayout`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. The location of the bucket.
+   * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
+   * Output only. The location of the bucket.
+   * 
+ * + * string location = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + java.lang.String getLocationType(); + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + com.google.protobuf.ByteString getLocationTypeBytes(); + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + boolean hasCustomPlacementConfig(); + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The customPlacementConfig. + */ + com.google.storage.control.v2.StorageLayout.CustomPlacementConfig getCustomPlacementConfig(); + + /** + * + * + *
+   * Output only. The data placement configuration for custom dual region. If
+   * there is no configuration, this is not a custom dual region bucket.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.CustomPlacementConfig custom_placement_config = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.storage.control.v2.StorageLayout.CustomPlacementConfigOrBuilder + getCustomPlacementConfigOrBuilder(); + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + boolean hasHierarchicalNamespace(); + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hierarchicalNamespace. + */ + com.google.storage.control.v2.StorageLayout.HierarchicalNamespace getHierarchicalNamespace(); + + /** + * + * + *
+   * Output only. The bucket's hierarchical namespace configuration. If there is
+   * no configuration, the hierarchical namespace is disabled.
+   * 
+ * + * + * .google.storage.control.v2.StorageLayout.HierarchicalNamespace hierarchical_namespace = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.storage.control.v2.StorageLayout.HierarchicalNamespaceOrBuilder + getHierarchicalNamespaceOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadata.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadata.java new file mode 100644 index 000000000000..2c427b85e93d --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadata.java @@ -0,0 +1,1720 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Message returned in the metadata field of the Operation resource for
+ * UpdateAnywhereCache operation.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateAnywhereCacheMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateAnywhereCacheMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.UpdateAnywhereCacheMetadata) + UpdateAnywhereCacheMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateAnywhereCacheMetadata"); + } + + // Use UpdateAnywhereCacheMetadata.newBuilder() to construct. + private UpdateAnywhereCacheMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateAnywhereCacheMetadata() { + anywhereCacheId_ = ""; + zone_ = ""; + admissionPolicy_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateAnywhereCacheMetadata.class, + com.google.storage.control.v2.UpdateAnywhereCacheMetadata.Builder.class); + } + + private int bitField0_; + public static final int COMMON_METADATA_FIELD_NUMBER = 1; + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + @java.lang.Override + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + @java.lang.Override + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + + public static final int ANYWHERE_CACHE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object anywhereCacheId_ = ""; + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + @java.lang.Override + public boolean hasAnywhereCacheId() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + @java.lang.Override + public java.lang.String getAnywhereCacheId() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + anywhereCacheId_ = s; + return s; + } + } + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getAnywhereCacheIdBytes() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + anywhereCacheId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ZONE_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object zone_ = ""; + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return Whether the zone field is set. + */ + @java.lang.Override + public boolean hasZone() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return The zone. + */ + @java.lang.Override + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } + } + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return The bytes for zone. + */ + @java.lang.Override + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TTL_FIELD_NUMBER = 3; + private com.google.protobuf.Duration ttl_; + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + @java.lang.Override + public boolean hasTtl() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + @java.lang.Override + public com.google.protobuf.Duration getTtl() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + + public static final int ADMISSION_POLICY_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return Whether the admissionPolicy field is set. + */ + @java.lang.Override + public boolean hasAdmissionPolicy() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return The admissionPolicy. + */ + @java.lang.Override + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } + } + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return The bytes for admissionPolicy. + */ + @java.lang.Override + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommonMetadata()); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, anywhereCacheId_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(3, getTtl()); + } + if (((bitField0_ & 0x00000010) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, admissionPolicy_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, zone_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommonMetadata()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, anywhereCacheId_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getTtl()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, admissionPolicy_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, zone_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.UpdateAnywhereCacheMetadata)) { + return super.equals(obj); + } + com.google.storage.control.v2.UpdateAnywhereCacheMetadata other = + (com.google.storage.control.v2.UpdateAnywhereCacheMetadata) obj; + + if (hasCommonMetadata() != other.hasCommonMetadata()) return false; + if (hasCommonMetadata()) { + if (!getCommonMetadata().equals(other.getCommonMetadata())) return false; + } + if (hasAnywhereCacheId() != other.hasAnywhereCacheId()) return false; + if (hasAnywhereCacheId()) { + if (!getAnywhereCacheId().equals(other.getAnywhereCacheId())) return false; + } + if (hasZone() != other.hasZone()) return false; + if (hasZone()) { + if (!getZone().equals(other.getZone())) return false; + } + if (hasTtl() != other.hasTtl()) return false; + if (hasTtl()) { + if (!getTtl().equals(other.getTtl())) return false; + } + if (hasAdmissionPolicy() != other.hasAdmissionPolicy()) return false; + if (hasAdmissionPolicy()) { + if (!getAdmissionPolicy().equals(other.getAdmissionPolicy())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommonMetadata()) { + hash = (37 * hash) + COMMON_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getCommonMetadata().hashCode(); + } + if (hasAnywhereCacheId()) { + hash = (37 * hash) + ANYWHERE_CACHE_ID_FIELD_NUMBER; + hash = (53 * hash) + getAnywhereCacheId().hashCode(); + } + if (hasZone()) { + hash = (37 * hash) + ZONE_FIELD_NUMBER; + hash = (53 * hash) + getZone().hashCode(); + } + if (hasTtl()) { + hash = (37 * hash) + TTL_FIELD_NUMBER; + hash = (53 * hash) + getTtl().hashCode(); + } + if (hasAdmissionPolicy()) { + hash = (37 * hash) + ADMISSION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getAdmissionPolicy().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.UpdateAnywhereCacheMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message returned in the metadata field of the Operation resource for
+   * UpdateAnywhereCache operation.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateAnywhereCacheMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.UpdateAnywhereCacheMetadata) + com.google.storage.control.v2.UpdateAnywhereCacheMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateAnywhereCacheMetadata.class, + com.google.storage.control.v2.UpdateAnywhereCacheMetadata.Builder.class); + } + + // Construct using com.google.storage.control.v2.UpdateAnywhereCacheMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonMetadataFieldBuilder(); + internalGetTtlFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + anywhereCacheId_ = ""; + zone_ = ""; + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + admissionPolicy_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheMetadata_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheMetadata getDefaultInstanceForType() { + return com.google.storage.control.v2.UpdateAnywhereCacheMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheMetadata build() { + com.google.storage.control.v2.UpdateAnywhereCacheMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheMetadata buildPartial() { + com.google.storage.control.v2.UpdateAnywhereCacheMetadata result = + new com.google.storage.control.v2.UpdateAnywhereCacheMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.UpdateAnywhereCacheMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commonMetadata_ = + commonMetadataBuilder_ == null ? commonMetadata_ : commonMetadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.anywhereCacheId_ = anywhereCacheId_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.zone_ = zone_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ttl_ = ttlBuilder_ == null ? ttl_ : ttlBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.admissionPolicy_ = admissionPolicy_; + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.UpdateAnywhereCacheMetadata) { + return mergeFrom((com.google.storage.control.v2.UpdateAnywhereCacheMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.UpdateAnywhereCacheMetadata other) { + if (other == com.google.storage.control.v2.UpdateAnywhereCacheMetadata.getDefaultInstance()) + return this; + if (other.hasCommonMetadata()) { + mergeCommonMetadata(other.getCommonMetadata()); + } + if (other.hasAnywhereCacheId()) { + anywhereCacheId_ = other.anywhereCacheId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasZone()) { + zone_ = other.zone_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasTtl()) { + mergeTtl(other.getTtl()); + } + if (other.hasAdmissionPolicy()) { + admissionPolicy_ = other.admissionPolicy_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommonMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + anywhereCacheId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetTtlFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 26 + case 34: + { + admissionPolicy_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 42: + { + zone_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.CommonLongRunningOperationMetadata commonMetadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + commonMetadataBuilder_; + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return Whether the commonMetadata field is set. + */ + public boolean hasCommonMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * + * @return The commonMetadata. + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata() { + if (commonMetadataBuilder_ == null) { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } else { + return commonMetadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonMetadata_ = value; + } else { + commonMetadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder setCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder builderForValue) { + if (commonMetadataBuilder_ == null) { + commonMetadata_ = builderForValue.build(); + } else { + commonMetadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder mergeCommonMetadata( + com.google.storage.control.v2.CommonLongRunningOperationMetadata value) { + if (commonMetadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commonMetadata_ != null + && commonMetadata_ + != com.google.storage.control.v2.CommonLongRunningOperationMetadata + .getDefaultInstance()) { + getCommonMetadataBuilder().mergeFrom(value); + } else { + commonMetadata_ = value; + } + } else { + commonMetadataBuilder_.mergeFrom(value); + } + if (commonMetadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public Builder clearCommonMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + commonMetadata_ = null; + if (commonMetadataBuilder_ != null) { + commonMetadataBuilder_.dispose(); + commonMetadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder + getCommonMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommonMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + public com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder() { + if (commonMetadataBuilder_ != null) { + return commonMetadataBuilder_.getMessageOrBuilder(); + } else { + return commonMetadata_ == null + ? com.google.storage.control.v2.CommonLongRunningOperationMetadata.getDefaultInstance() + : commonMetadata_; + } + } + + /** + * + * + *
+     * Generic metadata for the long running operation.
+     * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder> + internalGetCommonMetadataFieldBuilder() { + if (commonMetadataBuilder_ == null) { + commonMetadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.CommonLongRunningOperationMetadata, + com.google.storage.control.v2.CommonLongRunningOperationMetadata.Builder, + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder>( + getCommonMetadata(), getParentForChildren(), isClean()); + commonMetadata_ = null; + } + return commonMetadataBuilder_; + } + + private java.lang.Object anywhereCacheId_ = ""; + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + public boolean hasAnywhereCacheId() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + public java.lang.String getAnywhereCacheId() { + java.lang.Object ref = anywhereCacheId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + anywhereCacheId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + public com.google.protobuf.ByteString getAnywhereCacheIdBytes() { + java.lang.Object ref = anywhereCacheId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + anywhereCacheId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @param value The anywhereCacheId to set. + * @return This builder for chaining. + */ + public Builder setAnywhereCacheId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + anywhereCacheId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearAnywhereCacheId() { + anywhereCacheId_ = getDefaultInstance().getAnywhereCacheId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache ID.
+     * 
+ * + * optional string anywhere_cache_id = 2; + * + * @param value The bytes for anywhereCacheId to set. + * @return This builder for chaining. + */ + public Builder setAnywhereCacheIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + anywhereCacheId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object zone_ = ""; + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @return Whether the zone field is set. + */ + public boolean hasZone() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @return The zone. + */ + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @return The bytes for zone. + */ + public com.google.protobuf.ByteString getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @param value The zone to set. + * @return This builder for chaining. + */ + public Builder setZone(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + zone_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @return This builder for chaining. + */ + public Builder clearZone() { + zone_ = getDefaultInstance().getZone(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * The zone in which the cache instance is running. For example,
+     * us-central1-a.
+     * 
+ * + * optional string zone = 5; + * + * @param value The bytes for zone to set. + * @return This builder for chaining. + */ + public Builder setZoneBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + zone_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Duration ttl_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + ttlBuilder_; + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + public com.google.protobuf.Duration getTtl() { + if (ttlBuilder_ == null) { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } else { + return ttlBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + } else { + ttlBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder setTtl(com.google.protobuf.Duration.Builder builderForValue) { + if (ttlBuilder_ == null) { + ttl_ = builderForValue.build(); + } else { + ttlBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder mergeTtl(com.google.protobuf.Duration value) { + if (ttlBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && ttl_ != null + && ttl_ != com.google.protobuf.Duration.getDefaultInstance()) { + getTtlBuilder().mergeFrom(value); + } else { + ttl_ = value; + } + } else { + ttlBuilder_.mergeFrom(value); + } + if (ttl_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public Builder clearTtl() { + bitField0_ = (bitField0_ & ~0x00000008); + ttl_ = null; + if (ttlBuilder_ != null) { + ttlBuilder_.dispose(); + ttlBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.Duration.Builder getTtlBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetTtlFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + public com.google.protobuf.DurationOrBuilder getTtlOrBuilder() { + if (ttlBuilder_ != null) { + return ttlBuilder_.getMessageOrBuilder(); + } else { + return ttl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : ttl_; + } + } + + /** + * + * + *
+     * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+     * is applied to all new cache entries on admission. If `ttl` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetTtlFieldBuilder() { + if (ttlBuilder_ == null) { + ttlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>(getTtl(), getParentForChildren(), isClean()); + ttl_ = null; + } + return ttlBuilder_; + } + + private java.lang.Object admissionPolicy_ = ""; + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @return Whether the admissionPolicy field is set. + */ + public boolean hasAdmissionPolicy() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @return The admissionPolicy. + */ + public java.lang.String getAdmissionPolicy() { + java.lang.Object ref = admissionPolicy_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + admissionPolicy_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @return The bytes for admissionPolicy. + */ + public com.google.protobuf.ByteString getAdmissionPolicyBytes() { + java.lang.Object ref = admissionPolicy_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + admissionPolicy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @param value The admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicy(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + admissionPolicy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @return This builder for chaining. + */ + public Builder clearAdmissionPolicy() { + admissionPolicy_ = getDefaultInstance().getAdmissionPolicy(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * L4 Cache entry Admission Policy in kebab-case (e.g.,
+     * "admit-on-first-miss"). If `admission_policy` is pending
+     * update, this field equals to the new value specified in the Update request.
+     * 
+ * + * optional string admission_policy = 4; + * + * @param value The bytes for admissionPolicy to set. + * @return This builder for chaining. + */ + public Builder setAdmissionPolicyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + admissionPolicy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.UpdateAnywhereCacheMetadata) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.UpdateAnywhereCacheMetadata) + private static final com.google.storage.control.v2.UpdateAnywhereCacheMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.UpdateAnywhereCacheMetadata(); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateAnywhereCacheMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadataOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadataOrBuilder.java new file mode 100644 index 000000000000..2cc4c5d36a64 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheMetadataOrBuilder.java @@ -0,0 +1,235 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface UpdateAnywhereCacheMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.UpdateAnywhereCacheMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return Whether the commonMetadata field is set. + */ + boolean hasCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + * + * @return The commonMetadata. + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadata getCommonMetadata(); + + /** + * + * + *
+   * Generic metadata for the long running operation.
+   * 
+ * + * .google.storage.control.v2.CommonLongRunningOperationMetadata common_metadata = 1; + */ + com.google.storage.control.v2.CommonLongRunningOperationMetadataOrBuilder + getCommonMetadataOrBuilder(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return Whether the anywhereCacheId field is set. + */ + boolean hasAnywhereCacheId(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The anywhereCacheId. + */ + java.lang.String getAnywhereCacheId(); + + /** + * + * + *
+   * Anywhere Cache ID.
+   * 
+ * + * optional string anywhere_cache_id = 2; + * + * @return The bytes for anywhereCacheId. + */ + com.google.protobuf.ByteString getAnywhereCacheIdBytes(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return Whether the zone field is set. + */ + boolean hasZone(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return The zone. + */ + java.lang.String getZone(); + + /** + * + * + *
+   * The zone in which the cache instance is running. For example,
+   * us-central1-a.
+   * 
+ * + * optional string zone = 5; + * + * @return The bytes for zone. + */ + com.google.protobuf.ByteString getZoneBytes(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return Whether the ttl field is set. + */ + boolean hasTtl(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + * + * @return The ttl. + */ + com.google.protobuf.Duration getTtl(); + + /** + * + * + *
+   * Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that
+   * is applied to all new cache entries on admission. If `ttl` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional .google.protobuf.Duration ttl = 3; + */ + com.google.protobuf.DurationOrBuilder getTtlOrBuilder(); + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return Whether the admissionPolicy field is set. + */ + boolean hasAdmissionPolicy(); + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return The admissionPolicy. + */ + java.lang.String getAdmissionPolicy(); + + /** + * + * + *
+   * L4 Cache entry Admission Policy in kebab-case (e.g.,
+   * "admit-on-first-miss"). If `admission_policy` is pending
+   * update, this field equals to the new value specified in the Update request.
+   * 
+ * + * optional string admission_policy = 4; + * + * @return The bytes for admissionPolicy. + */ + com.google.protobuf.ByteString getAdmissionPolicyBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequest.java new file mode 100644 index 000000000000..3f2d54241086 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequest.java @@ -0,0 +1,1328 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message for UpdateAnywhereCache.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateAnywhereCacheRequest} + */ +@com.google.protobuf.Generated +public final class UpdateAnywhereCacheRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.UpdateAnywhereCacheRequest) + UpdateAnywhereCacheRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateAnywhereCacheRequest"); + } + + // Use UpdateAnywhereCacheRequest.newBuilder() to construct. + private UpdateAnywhereCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateAnywhereCacheRequest() { + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateAnywhereCacheRequest.class, + com.google.storage.control.v2.UpdateAnywhereCacheRequest.Builder.class); + } + + private int bitField0_; + public static final int ANYWHERE_CACHE_FIELD_NUMBER = 1; + private com.google.storage.control.v2.AnywhereCache anywhereCache_; + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + @java.lang.Override + public boolean hasAnywhereCache() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCache getAnywhereCache() { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder() { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAnywhereCache()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAnywhereCache()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.UpdateAnywhereCacheRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.UpdateAnywhereCacheRequest other = + (com.google.storage.control.v2.UpdateAnywhereCacheRequest) obj; + + if (hasAnywhereCache() != other.hasAnywhereCache()) return false; + if (hasAnywhereCache()) { + if (!getAnywhereCache().equals(other.getAnywhereCache())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAnywhereCache()) { + hash = (37 * hash) + ANYWHERE_CACHE_FIELD_NUMBER; + hash = (53 * hash) + getAnywhereCache().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.UpdateAnywhereCacheRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for UpdateAnywhereCache.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateAnywhereCacheRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.UpdateAnywhereCacheRequest) + com.google.storage.control.v2.UpdateAnywhereCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateAnywhereCacheRequest.class, + com.google.storage.control.v2.UpdateAnywhereCacheRequest.Builder.class); + } + + // Construct using com.google.storage.control.v2.UpdateAnywhereCacheRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAnywhereCacheFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + anywhereCache_ = null; + if (anywhereCacheBuilder_ != null) { + anywhereCacheBuilder_.dispose(); + anywhereCacheBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateAnywhereCacheRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheRequest getDefaultInstanceForType() { + return com.google.storage.control.v2.UpdateAnywhereCacheRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheRequest build() { + com.google.storage.control.v2.UpdateAnywhereCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheRequest buildPartial() { + com.google.storage.control.v2.UpdateAnywhereCacheRequest result = + new com.google.storage.control.v2.UpdateAnywhereCacheRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.control.v2.UpdateAnywhereCacheRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.anywhereCache_ = + anywhereCacheBuilder_ == null ? anywhereCache_ : anywhereCacheBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.UpdateAnywhereCacheRequest) { + return mergeFrom((com.google.storage.control.v2.UpdateAnywhereCacheRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.control.v2.UpdateAnywhereCacheRequest other) { + if (other == com.google.storage.control.v2.UpdateAnywhereCacheRequest.getDefaultInstance()) + return this; + if (other.hasAnywhereCache()) { + mergeAnywhereCache(other.getAnywhereCache()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetAnywhereCacheFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.AnywhereCache anywhereCache_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + anywhereCacheBuilder_; + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + public boolean hasAnywhereCache() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + public com.google.storage.control.v2.AnywhereCache getAnywhereCache() { + if (anywhereCacheBuilder_ == null) { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } else { + return anywhereCacheBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAnywhereCache(com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCacheBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + anywhereCache_ = value; + } else { + anywhereCacheBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAnywhereCache( + com.google.storage.control.v2.AnywhereCache.Builder builderForValue) { + if (anywhereCacheBuilder_ == null) { + anywhereCache_ = builderForValue.build(); + } else { + anywhereCacheBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeAnywhereCache(com.google.storage.control.v2.AnywhereCache value) { + if (anywhereCacheBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && anywhereCache_ != null + && anywhereCache_ != com.google.storage.control.v2.AnywhereCache.getDefaultInstance()) { + getAnywhereCacheBuilder().mergeFrom(value); + } else { + anywhereCache_ = value; + } + } else { + anywhereCacheBuilder_.mergeFrom(value); + } + if (anywhereCache_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearAnywhereCache() { + bitField0_ = (bitField0_ & ~0x00000001); + anywhereCache_ = null; + if (anywhereCacheBuilder_ != null) { + anywhereCacheBuilder_.dispose(); + anywhereCacheBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.AnywhereCache.Builder getAnywhereCacheBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetAnywhereCacheFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder() { + if (anywhereCacheBuilder_ != null) { + return anywhereCacheBuilder_.getMessageOrBuilder(); + } else { + return anywhereCache_ == null + ? com.google.storage.control.v2.AnywhereCache.getDefaultInstance() + : anywhereCache_; + } + } + + /** + * + * + *
+     * Required. The Anywhere Cache instance to be updated.
+     * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder> + internalGetAnywhereCacheFieldBuilder() { + if (anywhereCacheBuilder_ == null) { + anywhereCacheBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.AnywhereCache, + com.google.storage.control.v2.AnywhereCache.Builder, + com.google.storage.control.v2.AnywhereCacheOrBuilder>( + getAnywhereCache(), getParentForChildren(), isClean()); + anywhereCache_ = null; + } + return anywhereCacheBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. List of fields to be updated. Mutable fields of AnywhereCache
+     * include `ttl` and `admission_policy`.
+     *
+     * To specify ALL fields, specify a single field with the value `*`. Note: We
+     * recommend against doing this. If a new field is introduced at a later time,
+     * an older client updating with the `*` may accidentally reset the new
+     * field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A unique identifier for this request. UUID is the recommended
+     * format, but other formats are still accepted. This request is only
+     * idempotent if a `request_id` is provided.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.UpdateAnywhereCacheRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.UpdateAnywhereCacheRequest) + private static final com.google.storage.control.v2.UpdateAnywhereCacheRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.UpdateAnywhereCacheRequest(); + } + + public static com.google.storage.control.v2.UpdateAnywhereCacheRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateAnywhereCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateAnywhereCacheRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequestOrBuilder.java new file mode 100644 index 000000000000..bc2ed9652c31 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateAnywhereCacheRequestOrBuilder.java @@ -0,0 +1,169 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface UpdateAnywhereCacheRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.UpdateAnywhereCacheRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the anywhereCache field is set. + */ + boolean hasAnywhereCache(); + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The anywhereCache. + */ + com.google.storage.control.v2.AnywhereCache getAnywhereCache(); + + /** + * + * + *
+   * Required. The Anywhere Cache instance to be updated.
+   * 
+ * + * + * .google.storage.control.v2.AnywhereCache anywhere_cache = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.AnywhereCacheOrBuilder getAnywhereCacheOrBuilder(); + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated. Mutable fields of AnywhereCache
+   * include `ttl` and `admission_policy`.
+   *
+   * To specify ALL fields, specify a single field with the value `*`. Note: We
+   * recommend against doing this. If a new field is introduced at a later time,
+   * an older client updating with the `*` may accidentally reset the new
+   * field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. A unique identifier for this request. UUID is the recommended
+   * format, but other formats are still accepted. This request is only
+   * idempotent if a `request_id` is provided.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequest.java new file mode 100644 index 000000000000..954e279b9b94 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequest.java @@ -0,0 +1,1281 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to update the `IntelligenceConfig` resource associated with
+ * your folder.
+ *
+ * **IAM Permissions**:
+ *
+ * Requires `storage.intelligenceConfigs.update`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+ * the folder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateFolderIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class UpdateFolderIntelligenceConfigRequest + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) + UpdateFolderIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateFolderIntelligenceConfigRequest"); + } + + // Use UpdateFolderIntelligenceConfigRequest.newBuilder() to construct. + private UpdateFolderIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateFolderIntelligenceConfigRequest() { + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.Builder.class); + } + + private int bitField0_; + public static final int INTELLIGENCE_CONFIG_FIELD_NUMBER = 1; + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + @java.lang.Override + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest other = + (com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) obj; + + if (hasIntelligenceConfig() != other.hasIntelligenceConfig()) return false; + if (hasIntelligenceConfig()) { + if (!getIntelligenceConfig().equals(other.getIntelligenceConfig())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIntelligenceConfig()) { + hash = (37 * hash) + INTELLIGENCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIntelligenceConfig().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to update the `IntelligenceConfig` resource associated with
+   * your folder.
+   *
+   * **IAM Permissions**:
+   *
+   * Requires `storage.intelligenceConfigs.update`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+   * the folder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateFolderIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetIntelligenceConfigFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateFolderIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest build() { + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest buildPartial() { + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest result = + new com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.intelligenceConfig_ = + intelligenceConfigBuilder_ == null + ? intelligenceConfig_ + : intelligenceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) { + return mergeFrom( + (com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + .getDefaultInstance()) return this; + if (other.hasIntelligenceConfig()) { + mergeIntelligenceConfig(other.getIntelligenceConfig()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIntelligenceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + intelligenceConfigBuilder_; + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + if (intelligenceConfigBuilder_ == null) { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } else { + return intelligenceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + intelligenceConfig_ = value; + } else { + intelligenceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.Builder builderForValue) { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfig_ = builderForValue.build(); + } else { + intelligenceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && intelligenceConfig_ != null + && intelligenceConfig_ + != com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance()) { + getIntelligenceConfigBuilder().mergeFrom(value); + } else { + intelligenceConfig_ = value; + } + } else { + intelligenceConfigBuilder_.mergeFrom(value); + } + if (intelligenceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearIntelligenceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Builder getIntelligenceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetIntelligenceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + if (intelligenceConfigBuilder_ != null) { + return intelligenceConfigBuilder_.getMessageOrBuilder(); + } else { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + internalGetIntelligenceConfigFieldBuilder() { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder>( + getIntelligenceConfig(), getParentForChildren(), isClean()); + intelligenceConfig_ = null; + } + return intelligenceConfigBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) + private static final com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateFolderIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateFolderIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..bd99decdc99c --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateFolderIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface UpdateFolderIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.UpdateFolderIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + boolean hasIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.IntelligenceConfigOrBuilder getIntelligenceConfigOrBuilder(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequest.java new file mode 100644 index 000000000000..dd04e8a5eca4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequest.java @@ -0,0 +1,1291 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to update the `IntelligenceConfig` resource associated with
+ * your organization.
+ *
+ * **IAM Permissions**:
+ *
+ * Requires `storage.intelligenceConfigs.update`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+ * the organization.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class UpdateOrganizationIntelligenceConfigRequest + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) + UpdateOrganizationIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateOrganizationIntelligenceConfigRequest"); + } + + // Use UpdateOrganizationIntelligenceConfigRequest.newBuilder() to construct. + private UpdateOrganizationIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateOrganizationIntelligenceConfigRequest() { + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.Builder + .class); + } + + private int bitField0_; + public static final int INTELLIGENCE_CONFIG_FIELD_NUMBER = 1; + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + @java.lang.Override + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest other = + (com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) obj; + + if (hasIntelligenceConfig() != other.hasIntelligenceConfig()) return false; + if (hasIntelligenceConfig()) { + if (!getIntelligenceConfig().equals(other.getIntelligenceConfig())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIntelligenceConfig()) { + hash = (37 * hash) + INTELLIGENCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIntelligenceConfig().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to update the `IntelligenceConfig` resource associated with
+   * your organization.
+   *
+   * **IAM Permissions**:
+   *
+   * Requires `storage.intelligenceConfigs.update`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+   * the organization.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.Builder + .class); + } + + // Construct using + // com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetIntelligenceConfigFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateOrganizationIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest build() { + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + buildPartial() { + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest result = + new com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.intelligenceConfig_ = + intelligenceConfigBuilder_ == null + ? intelligenceConfig_ + : intelligenceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) { + return mergeFrom( + (com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + .getDefaultInstance()) return this; + if (other.hasIntelligenceConfig()) { + mergeIntelligenceConfig(other.getIntelligenceConfig()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIntelligenceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + intelligenceConfigBuilder_; + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + if (intelligenceConfigBuilder_ == null) { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } else { + return intelligenceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + intelligenceConfig_ = value; + } else { + intelligenceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.Builder builderForValue) { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfig_ = builderForValue.build(); + } else { + intelligenceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && intelligenceConfig_ != null + && intelligenceConfig_ + != com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance()) { + getIntelligenceConfigBuilder().mergeFrom(value); + } else { + intelligenceConfig_ = value; + } + } else { + intelligenceConfigBuilder_.mergeFrom(value); + } + if (intelligenceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearIntelligenceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Builder getIntelligenceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetIntelligenceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + if (intelligenceConfigBuilder_ != null) { + return intelligenceConfigBuilder_.getMessageOrBuilder(); + } else { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + internalGetIntelligenceConfigFieldBuilder() { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder>( + getIntelligenceConfig(), getParentForChildren(), isClean()); + intelligenceConfig_ = null; + } + return intelligenceConfigBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) + private static final com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateOrganizationIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..2a734d479417 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateOrganizationIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface UpdateOrganizationIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.UpdateOrganizationIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + boolean hasIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.IntelligenceConfigOrBuilder getIntelligenceConfigOrBuilder(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequest.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequest.java new file mode 100644 index 000000000000..04df490c4600 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequest.java @@ -0,0 +1,1281 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +/** + * + * + *
+ * Request message to update the `IntelligenceConfig` resource associated with
+ * your project.
+ *
+ * **IAM Permissions**:
+ *
+ * Requires `storage.intelligenceConfigs.update`
+ * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+ * the folder.
+ * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateProjectIntelligenceConfigRequest} + */ +@com.google.protobuf.Generated +public final class UpdateProjectIntelligenceConfigRequest + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) + UpdateProjectIntelligenceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateProjectIntelligenceConfigRequest"); + } + + // Use UpdateProjectIntelligenceConfigRequest.newBuilder() to construct. + private UpdateProjectIntelligenceConfigRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateProjectIntelligenceConfigRequest() { + requestId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.Builder.class); + } + + private int bitField0_; + public static final int INTELLIGENCE_CONFIG_FIELD_NUMBER = 1; + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + @java.lang.Override + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getIntelligenceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest)) { + return super.equals(obj); + } + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest other = + (com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) obj; + + if (hasIntelligenceConfig() != other.hasIntelligenceConfig()) return false; + if (hasIntelligenceConfig()) { + if (!getIntelligenceConfig().equals(other.getIntelligenceConfig())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIntelligenceConfig()) { + hash = (37 * hash) + INTELLIGENCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIntelligenceConfig().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message to update the `IntelligenceConfig` resource associated with
+   * your project.
+   *
+   * **IAM Permissions**:
+   *
+   * Requires `storage.intelligenceConfigs.update`
+   * [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on
+   * the folder.
+   * 
+ * + * Protobuf type {@code google.storage.control.v2.UpdateProjectIntelligenceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.class, + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.Builder.class); + } + + // Construct using + // com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetIntelligenceConfigFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.control.v2.StorageControlProto + .internal_static_google_storage_control_v2_UpdateProjectIntelligenceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + getDefaultInstanceForType() { + return com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest build() { + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest buildPartial() { + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest result = + new com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.intelligenceConfig_ = + intelligenceConfigBuilder_ == null + ? intelligenceConfig_ + : intelligenceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) { + return mergeFrom( + (com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest other) { + if (other + == com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + .getDefaultInstance()) return this; + if (other.hasIntelligenceConfig()) { + mergeIntelligenceConfig(other.getIntelligenceConfig()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIntelligenceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.control.v2.IntelligenceConfig intelligenceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + intelligenceConfigBuilder_; + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + public boolean hasIntelligenceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + public com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig() { + if (intelligenceConfigBuilder_ == null) { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } else { + return intelligenceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + intelligenceConfig_ = value; + } else { + intelligenceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setIntelligenceConfig( + com.google.storage.control.v2.IntelligenceConfig.Builder builderForValue) { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfig_ = builderForValue.build(); + } else { + intelligenceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeIntelligenceConfig(com.google.storage.control.v2.IntelligenceConfig value) { + if (intelligenceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && intelligenceConfig_ != null + && intelligenceConfig_ + != com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance()) { + getIntelligenceConfigBuilder().mergeFrom(value); + } else { + intelligenceConfig_ = value; + } + } else { + intelligenceConfigBuilder_.mergeFrom(value); + } + if (intelligenceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearIntelligenceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + intelligenceConfig_ = null; + if (intelligenceConfigBuilder_ != null) { + intelligenceConfigBuilder_.dispose(); + intelligenceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfig.Builder getIntelligenceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetIntelligenceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.control.v2.IntelligenceConfigOrBuilder + getIntelligenceConfigOrBuilder() { + if (intelligenceConfigBuilder_ != null) { + return intelligenceConfigBuilder_.getMessageOrBuilder(); + } else { + return intelligenceConfig_ == null + ? com.google.storage.control.v2.IntelligenceConfig.getDefaultInstance() + : intelligenceConfig_; + } + } + + /** + * + * + *
+     * Required. The `IntelligenceConfig` resource to be updated.
+     * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder> + internalGetIntelligenceConfigFieldBuilder() { + if (intelligenceConfigBuilder_ == null) { + intelligenceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.control.v2.IntelligenceConfig, + com.google.storage.control.v2.IntelligenceConfig.Builder, + com.google.storage.control.v2.IntelligenceConfigOrBuilder>( + getIntelligenceConfig(), getParentForChildren(), isClean()); + intelligenceConfig_ = null; + } + return intelligenceConfigBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. The `update_mask` that specifies the fields within the
+     * `IntelligenceConfig` resource that should be modified by this update. Only
+     * the listed fields are updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private java.lang.Object requestId_ = ""; + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID that uniquely identifies the request, preventing duplicate
+     * processing.
+     * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) + private static final com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest(); + } + + public static com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateProjectIntelligenceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.control.v2.UpdateProjectIntelligenceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..763927383b7f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/java/com/google/storage/control/v2/UpdateProjectIntelligenceConfigRequestOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/control/v2/storage_control.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.control.v2; + +@com.google.protobuf.Generated +public interface UpdateProjectIntelligenceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.control.v2.UpdateProjectIntelligenceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the intelligenceConfig field is set. + */ + boolean hasIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The intelligenceConfig. + */ + com.google.storage.control.v2.IntelligenceConfig getIntelligenceConfig(); + + /** + * + * + *
+   * Required. The `IntelligenceConfig` resource to be updated.
+   * 
+ * + * + * .google.storage.control.v2.IntelligenceConfig intelligence_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.control.v2.IntelligenceConfigOrBuilder getIntelligenceConfigOrBuilder(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. The `update_mask` that specifies the fields within the
+   * `IntelligenceConfig` resource that should be modified by this update. Only
+   * the listed fields are updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + + /** + * + * + *
+   * Optional. The ID that uniquely identifies the request, preventing duplicate
+   * processing.
+   * 
+ * + * + * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-control-v2/src/main/proto/google/storage/control/v2/storage_control.proto b/java-storage/proto-google-cloud-storage-control-v2/src/main/proto/google/storage/control/v2/storage_control.proto new file mode 100644 index 000000000000..015ad010b03f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-control-v2/src/main/proto/google/storage/control/v2/storage_control.proto @@ -0,0 +1,1464 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.storage.control.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/field_info.proto"; +import "google/api/resource.proto"; +import "google/api/routing.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Storage.Control.V2"; +option go_package = "cloud.google.com/go/storage/control/apiv2/controlpb;controlpb"; +option java_multiple_files = true; +option java_outer_classname = "StorageControlProto"; +option java_package = "com.google.storage.control.v2"; +option php_namespace = "Google\\Cloud\\Storage\\Control\\V2"; +option ruby_package = "Google::Cloud::Storage::Control::V2"; +option (google.api.resource_definition) = { + type: "storage.googleapis.com/Bucket" + pattern: "projects/{project}/buckets/{bucket}" +}; + +// StorageControl service includes selected control plane operations. +service StorageControl { + option (google.api.default_host) = "storage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/devstorage.full_control," + "https://www.googleapis.com/auth/devstorage.read_only," + "https://www.googleapis.com/auth/devstorage.read_write"; + + // Creates a new folder. This operation is only applicable to a hierarchical + // namespace enabled bucket. + rpc CreateFolder(CreateFolderRequest) returns (Folder) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent,folder,folder_id"; + } + + // Permanently deletes an empty folder. This operation is only applicable to a + // hierarchical namespace enabled bucket. + rpc DeleteFolder(DeleteFolderRequest) returns (google.protobuf.Empty) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Returns metadata for the specified folder. This operation is only + // applicable to a hierarchical namespace enabled bucket. + rpc GetFolder(GetFolderRequest) returns (Folder) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Retrieves a list of folders. This operation is only applicable to a + // hierarchical namespace enabled bucket. + rpc ListFolders(ListFoldersRequest) returns (ListFoldersResponse) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent"; + } + + // Renames a source folder to a destination folder. This operation is only + // applicable to a hierarchical namespace enabled bucket. During a rename, the + // source and destination folders are locked until the long running operation + // completes. + rpc RenameFolder(RenameFolderRequest) returns (google.longrunning.Operation) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name,destination_folder_id"; + option (google.longrunning.operation_info) = { + response_type: "Folder" + metadata_type: "RenameFolderMetadata" + }; + } + + // Deletes a folder recursively. This operation is only applicable to a + // hierarchical namespace enabled bucket. + rpc DeleteFolderRecursive(DeleteFolderRecursiveRequest) + returns (google.longrunning.Operation) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteFolderRecursiveMetadata" + }; + } + + // Returns the storage layout configuration for a given bucket. + rpc GetStorageLayout(GetStorageLayoutRequest) returns (StorageLayout) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new managed folder. + rpc CreateManagedFolder(CreateManagedFolderRequest) returns (ManagedFolder) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = + "parent,managed_folder,managed_folder_id"; + } + + // Permanently deletes an empty managed folder. + rpc DeleteManagedFolder(DeleteManagedFolderRequest) + returns (google.protobuf.Empty) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Returns metadata for the specified managed folder. + rpc GetManagedFolder(GetManagedFolderRequest) returns (ManagedFolder) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Retrieves a list of managed folders for a given bucket. + rpc ListManagedFolders(ListManagedFoldersRequest) + returns (ListManagedFoldersResponse) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent"; + } + + // Creates an Anywhere Cache instance. + rpc CreateAnywhereCache(CreateAnywhereCacheRequest) + returns (google.longrunning.Operation) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent,anywhere_cache"; + option (google.longrunning.operation_info) = { + response_type: "AnywhereCache" + metadata_type: "CreateAnywhereCacheMetadata" + }; + } + + // Updates an Anywhere Cache instance. Mutable fields include `ttl` and + // `admission_policy`. + rpc UpdateAnywhereCache(UpdateAnywhereCacheRequest) + returns (google.longrunning.Operation) { + option (google.api.routing) = { + routing_parameters { + field: "anywhere_cache.name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "anywhere_cache,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "AnywhereCache" + metadata_type: "UpdateAnywhereCacheMetadata" + }; + } + + // Disables an Anywhere Cache instance. A disabled instance is read-only. The + // disablement could be revoked by calling ResumeAnywhereCache. The cache + // instance will be deleted automatically if it remains in the disabled state + // for at least one hour. + rpc DisableAnywhereCache(DisableAnywhereCacheRequest) + returns (AnywhereCache) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Pauses an Anywhere Cache instance. + rpc PauseAnywhereCache(PauseAnywhereCacheRequest) returns (AnywhereCache) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Resumes a disabled or paused Anywhere Cache instance. + rpc ResumeAnywhereCache(ResumeAnywhereCacheRequest) returns (AnywhereCache) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Gets an Anywhere Cache instance. + rpc GetAnywhereCache(GetAnywhereCacheRequest) returns (AnywhereCache) { + option (google.api.routing) = { + routing_parameters { + field: "name" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "name"; + } + + // Lists Anywhere Cache instances for a given bucket. + rpc ListAnywhereCaches(ListAnywhereCachesRequest) + returns (ListAnywhereCachesResponse) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent"; + } + + // Returns the Project scoped singleton IntelligenceConfig resource. + rpc GetProjectIntelligenceConfig(GetProjectIntelligenceConfigRequest) + returns (IntelligenceConfig) { + option (google.api.http) = { + get: "/v2/{name=projects/*/locations/*/intelligenceConfig}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates the Project scoped singleton IntelligenceConfig resource. + rpc UpdateProjectIntelligenceConfig(UpdateProjectIntelligenceConfigRequest) + returns (IntelligenceConfig) { + option (google.api.http) = { + patch: "/v2/{intelligence_config.name=projects/*/locations/*/intelligenceConfig}" + body: "intelligence_config" + }; + option (google.api.method_signature) = "intelligence_config,update_mask"; + } + + // Returns the Folder scoped singleton IntelligenceConfig resource. + rpc GetFolderIntelligenceConfig(GetFolderIntelligenceConfigRequest) + returns (IntelligenceConfig) { + option (google.api.http) = { + get: "/v2/{name=folders/*/locations/*/intelligenceConfig}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates the Folder scoped singleton IntelligenceConfig resource. + rpc UpdateFolderIntelligenceConfig(UpdateFolderIntelligenceConfigRequest) + returns (IntelligenceConfig) { + option (google.api.http) = { + patch: "/v2/{intelligence_config.name=folders/*/locations/*/intelligenceConfig}" + body: "intelligence_config" + }; + option (google.api.method_signature) = "intelligence_config,update_mask"; + } + + // Returns the Organization scoped singleton IntelligenceConfig resource. + rpc GetOrganizationIntelligenceConfig( + GetOrganizationIntelligenceConfigRequest) returns (IntelligenceConfig) { + option (google.api.http) = { + get: "/v2/{name=organizations/*/locations/*/intelligenceConfig}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates the Organization scoped singleton IntelligenceConfig resource. + rpc UpdateOrganizationIntelligenceConfig( + UpdateOrganizationIntelligenceConfigRequest) + returns (IntelligenceConfig) { + option (google.api.http) = { + patch: "/v2/{intelligence_config.name=organizations/*/locations/*/intelligenceConfig}" + body: "intelligence_config" + }; + option (google.api.method_signature) = "intelligence_config,update_mask"; + } + + // Gets the IAM policy for a specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "resource"; + } + + // Updates an IAM policy for the specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "resource,policy"; + } + + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/objects/**" + } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/managedFolders/**" + } + }; + option (google.api.method_signature) = "resource,permissions"; + } +} + +// Contains information about a pending rename operation. +message PendingRenameInfo { + // Output only. The name of the rename operation. + string operation = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A folder resource. This resource can only exist in a hierarchical namespace +// enabled bucket. +message Folder { + option (google.api.resource) = { + type: "storage.googleapis.com/Folder" + pattern: "projects/{project}/buckets/{bucket}/folders/{folder=**}" + plural: "folders" + singular: "folder" + }; + + // Identifier. The name of this folder. + // Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Output only. The version of the metadata for this folder. Used for + // preconditions and for detecting changes in metadata. + int64 metageneration = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The creation time of the folder. + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The modification time of the folder. + google.protobuf.Timestamp update_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Only present if the folder is part of an ongoing RenameFolder + // operation. Contains information which can be used to query the operation + // status. The presence of this field also indicates all write operations are + // blocked for this folder, including folder, managed folder, and object + // operations. + PendingRenameInfo pending_rename_info = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for GetFolder. This operation is only applicable to a +// hierarchical namespace enabled bucket. +message GetFolderRequest { + // Required. Name of the folder. + // Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + string name = 6 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Folder" } + ]; + + // Makes the operation only succeed conditional on whether the folder's + // current metageneration matches the given value. + optional int64 if_metageneration_match = 3; + + // Makes the operation only succeed conditional on whether the folder's + // current metageneration does not match the given value. + optional int64 if_metageneration_not_match = 4; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 5 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for CreateFolder. This operation is only applicable to a +// hierarchical namespace enabled bucket. +message CreateFolderRequest { + // Required. Name of the bucket in which the folder will reside. The bucket + // must be a hierarchical namespace enabled bucket. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/Folder" + } + ]; + + // Required. Properties of the new folder being created. + // The bucket and name of the folder are specified in the parent and folder_id + // fields, respectively. Populating those fields in `folder` will result in an + // error. + Folder folder = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The full name of a folder, including all its parent folders. + // Folders use single '/' characters as a delimiter. + // The folder_id must end with a slash. + // For example, the folder_id of "books/biographies/" would create a new + // "biographies/" folder under the "books/" folder. + string folder_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If true, parent folder doesn't have to be present and all missing + // ancestor folders will be created atomically. + bool recursive = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 5 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for DeleteFolder. This operation is only applicable to a +// hierarchical namespace enabled bucket. +message DeleteFolderRequest { + // Required. Name of the folder. + // Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + string name = 6 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Folder" } + ]; + + // Makes the operation only succeed conditional on whether the folder's + // current metageneration matches the given value. + optional int64 if_metageneration_match = 3; + + // Makes the operation only succeed conditional on whether the folder's + // current metageneration does not match the given value. + optional int64 if_metageneration_not_match = 4; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 5 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for ListFolders. This operation is only applicable to a +// hierarchical namespace enabled bucket. +message ListFoldersRequest { + // Required. Name of the bucket in which to look for folders. The bucket must + // be a hierarchical namespace enabled bucket. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/Folder" + } + ]; + + // Optional. Maximum number of folders to return in a single response. The + // service will use this parameter or 1,000 items, whichever is smaller. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A previously-returned page token representing part of the larger + // set of results to view. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to folders whose names begin with this prefix. + // If set, the value must either be an empty string or end with a '/'. + string prefix = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, returns results in a directory-like mode. The results + // will only include folders that either exactly match the above prefix, or + // are one level below the prefix. The only supported value is '/'. + string delimiter = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to folders whose names are lexicographically equal + // to or after lexicographic_start. If lexicographic_end is also set, the + // folders listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + string lexicographic_start = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to folders whose names are lexicographically + // before lexicographic_end. If lexicographic_start is also set, the folders + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + string lexicographic_end = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 9 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Response message for ListFolders. +message ListFoldersResponse { + // The list of child folders + repeated Folder folders = 1; + + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + string next_page_token = 2; +} + +// Request message for RenameFolder. This operation is only applicable to a +// hierarchical namespace enabled bucket. +message RenameFolderRequest { + // Required. Name of the source folder being renamed. + // Format: `projects/{project}/buckets/{bucket}/folders/{folder}` + string name = 7 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Folder" } + ]; + + // Required. The destination folder ID, e.g. `foo/bar/`. + string destination_folder_id = 8 [(google.api.field_behavior) = REQUIRED]; + + // Makes the operation only succeed conditional on whether the source + // folder's current metageneration matches the given value. + optional int64 if_metageneration_match = 4; + + // Makes the operation only succeed conditional on whether the source + // folder's current metageneration does not match the given value. + optional int64 if_metageneration_not_match = 5; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 6 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for DeleteFolderRecursive. +message DeleteFolderRecursiveRequest { + // Required. Name of the folder being deleted, however all of its contents + // will be deleted too. Format: + // `projects/{project}/buckets/{bucket}/folders/{folder}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Folder" } + ]; + + // Optional. Makes the operation only succeed conditional on whether the root + // folder's current metageneration matches the given value. + optional int64 if_metageneration_match = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation only succeed conditional on whether the root + // folder's current metageneration does not match the given value. + optional int64 if_metageneration_not_match = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 4 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// The message contains metadata that is common to all Storage Control +// long-running operations, present in its `google.longrunning.Operation` +// messages, and accessible via `metadata.common_metadata`. +message CommonLongRunningOperationMetadata { + // Output only. The time the operation was created. + google.protobuf.Timestamp create_time = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time the operation finished running. + google.protobuf.Timestamp end_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time the operation was last modified. + google.protobuf.Timestamp update_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The type of operation invoked. + string type = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Identifies whether the user has requested cancellation. + bool requested_cancellation = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The estimated progress of the operation in percentage [0, + // 100]. The value -1 means the progress is unknown. + int32 progress_percent = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Message returned in the metadata field of the Operation resource for +// RenameFolder operations. +message RenameFolderMetadata { + // Generic metadata for the long running operation. + CommonLongRunningOperationMetadata common_metadata = 1; + + // The path of the source folder. + string source_folder_id = 2; + + // The path of the destination folder. + string destination_folder_id = 3; +} + +// Message returned in the metadata field of the Operation resource for +// DeleteFolderRecursive operations. +message DeleteFolderRecursiveMetadata { + // Generic metadata for the long running operation. + CommonLongRunningOperationMetadata common_metadata = 1; + + // The path of the folder recursively deleted. + string folder_id = 2; +} + +// The storage layout configuration of a bucket. +message StorageLayout { + option (google.api.resource) = { + type: "storage.googleapis.com/StorageLayout" + pattern: "projects/{project}/buckets/{bucket}/storageLayout" + plural: "storageLayouts" + singular: "storageLayout" + }; + + // Configuration for Custom Dual Regions. It should specify precisely two + // eligible regions within the same Multiregion. More information on regions + // may be found [here](https://cloud.google.com/storage/docs/locations). + message CustomPlacementConfig { + // List of locations to use for data placement. + repeated string data_locations = 1; + } + + // Configuration for a bucket's hierarchical namespace feature. + message HierarchicalNamespace { + // Enables the hierarchical namespace feature. + bool enabled = 1; + } + + // Output only. The name of the StorageLayout resource. + // Format: `projects/{project}/buckets/{bucket}/storageLayout` + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The location of the bucket. + string location = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The location type of the bucket (region, dual-region, + // multi-region, etc). + string location_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The data placement configuration for custom dual region. If + // there is no configuration, this is not a custom dual region bucket. + CustomPlacementConfig custom_placement_config = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The bucket's hierarchical namespace configuration. If there is + // no configuration, the hierarchical namespace is disabled. + HierarchicalNamespace hierarchical_namespace = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for GetStorageLayout. +message GetStorageLayoutRequest { + // Required. The name of the StorageLayout resource. + // Format: `projects/{project}/buckets/{bucket}/storageLayout` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/StorageLayout" + } + ]; + + // An optional prefix used for permission check. It is useful when the caller + // only has limited permissions under a specific prefix. + string prefix = 2; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// A managed folder. +message ManagedFolder { + option (google.api.resource) = { + type: "storage.googleapis.com/ManagedFolder" + pattern: "projects/{project}/buckets/{bucket}/managedFolders/{managed_folder=**}" + plural: "managedFolders" + singular: "managedFolder" + }; + + // Identifier. The name of this managed folder. + // Format: + // `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Output only. The metadata version of this managed folder. It increases + // whenever the metadata is updated. Used for preconditions and for detecting + // changes in metadata. Managed folders don't have a generation number. + int64 metageneration = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The creation time of the managed folder. + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The modification time of the managed folder. + google.protobuf.Timestamp update_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for GetManagedFolder. +message GetManagedFolderRequest { + // Required. Name of the managed folder. + // Format: + // `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + string name = 6 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/ManagedFolder" + } + ]; + + // The operation succeeds conditional on the managed folder's current + // metageneration matching the value here specified. + optional int64 if_metageneration_match = 3; + + // The operation succeeds conditional on the managed folder's current + // metageneration NOT matching the value here specified. + optional int64 if_metageneration_not_match = 4; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 5 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for CreateManagedFolder. +message CreateManagedFolderRequest { + // Required. Name of the bucket this managed folder belongs to. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/ManagedFolder" + } + ]; + + // Required. Properties of the managed folder being created. + // The bucket and managed folder names are specified in the `parent` and + // `managed_folder_id` fields. Populating these fields in `managed_folder` + // will result in an error. + ManagedFolder managed_folder = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The name of the managed folder. It uses a single `/` as delimiter + // and leading and trailing `/` are allowed. + string managed_folder_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 4 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// DeleteManagedFolder RPC request message. +message DeleteManagedFolderRequest { + // Required. Name of the managed folder. + // Format: + // `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder}` + string name = 7 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/ManagedFolder" + } + ]; + + // The operation succeeds conditional on the managed folder's current + // metageneration matching the value here specified. + optional int64 if_metageneration_match = 3; + + // The operation succeeds conditional on the managed folder's current + // metageneration NOT matching the value here specified. + optional int64 if_metageneration_not_match = 4; + + // Allows deletion of a managed folder even if it is not empty. + // A managed folder is empty if it manages no child managed folders or + // objects. Caller must have permission for + // storage.managedFolders.setIamPolicy. + bool allow_non_empty = 5; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 6 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for ListManagedFolders. +message ListManagedFoldersRequest { + // Required. Name of the bucket this managed folder belongs to. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/ManagedFolder" + } + ]; + + // Optional. Maximum number of managed folders to return in a single response. + // The service will use this parameter or 1,000 items, whichever is smaller. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A previously-returned page token representing part of the larger + // set of results to view. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to match managed folders with name starting with + // this prefix. + string prefix = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 5 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Response message for ListManagedFolders. +message ListManagedFoldersResponse { + // The list of matching managed folders + repeated ManagedFolder managed_folders = 1; + + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + string next_page_token = 2; +} + +// Message returned in the metadata field of the Operation resource for +// CreateAnywhereCache operations. +message CreateAnywhereCacheMetadata { + // Generic metadata for the long running operation. + CommonLongRunningOperationMetadata common_metadata = 1; + + // Anywhere Cache ID. + optional string anywhere_cache_id = 2; + + // The zone in which the cache instance is running. For example, + // us-central1-a. + optional string zone = 6; + + // Anywhere Cache entry's TTL. A cache-level config that is applied to all new + // cache entries on admission. Default ttl value (24hrs) is applied if not + // specified in the create request. + optional google.protobuf.Duration ttl = 3; + + // Anywhere Cache entry Admission Policy in kebab-case (e.g., + // "admit-on-first-miss"). Default admission policy (admit-on-first-miss) is + // applied if not specified in the create request. + optional string admission_policy = 5; +} + +// Message returned in the metadata field of the Operation resource for +// UpdateAnywhereCache operation. +message UpdateAnywhereCacheMetadata { + // Generic metadata for the long running operation. + CommonLongRunningOperationMetadata common_metadata = 1; + + // Anywhere Cache ID. + optional string anywhere_cache_id = 2; + + // The zone in which the cache instance is running. For example, + // us-central1-a. + optional string zone = 5; + + // Anywhere Cache entry's TTL between 1h and 7days. A cache-level config that + // is applied to all new cache entries on admission. If `ttl` is pending + // update, this field equals to the new value specified in the Update request. + optional google.protobuf.Duration ttl = 3; + + // L4 Cache entry Admission Policy in kebab-case (e.g., + // "admit-on-first-miss"). If `admission_policy` is pending + // update, this field equals to the new value specified in the Update request. + optional string admission_policy = 4; +} + +// An Anywhere Cache Instance. +message AnywhereCache { + option (google.api.resource) = { + type: "storage.googleapis.com/AnywhereCache" + pattern: "projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}" + plural: "anywhereCaches" + singular: "anywhereCache" + }; + + // Immutable. The resource name of this AnywhereCache. + // Format: + // `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + string name = 1 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. The zone in which the cache instance is running. For example, + // us-central1-a. + string zone = 10 [(google.api.field_behavior) = IMMUTABLE]; + + // Cache entry TTL (ranges between 1h to 7d). This is a cache-level config + // that defines how long a cache entry can live. Default ttl value (24hrs) + // is applied if not specified in the create request. TTL must be in whole + // seconds. + google.protobuf.Duration ttl = 3; + + // Cache admission policy. Valid policies includes: + // `admit-on-first-miss` and `admit-on-second-miss`. Defaults to + // `admit-on-first-miss`. Default value is applied if not specified in the + // create request. + string admission_policy = 9; + + // Output only. Cache state including RUNNING, CREATING, DISABLED and PAUSED. + string state = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when Anywhere cache instance is allocated. + google.protobuf.Timestamp create_time = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when Anywhere cache instance is last updated, including + // creation. + google.protobuf.Timestamp update_time = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if there is an active update operation against this cache + // instance. Subsequential update requests will be rejected if this field is + // true. Output only. + bool pending_update = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request message for CreateAnywhereCache. +message CreateAnywhereCacheRequest { + // Required. The bucket to which this cache belongs. + // Format: `projects/{project}/buckets/{bucket}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Required. Properties of the Anywhere Cache instance being created. + // The parent bucket name is specified in the `parent` field. Server uses the + // default value of `ttl` or `admission_policy` if not specified in + // request. + AnywhereCache anywhere_cache = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 4 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for UpdateAnywhereCache. +message UpdateAnywhereCacheRequest { + // Required. The Anywhere Cache instance to be updated. + AnywhereCache anywhere_cache = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. List of fields to be updated. Mutable fields of AnywhereCache + // include `ttl` and `admission_policy`. + // + // To specify ALL fields, specify a single field with the value `*`. Note: We + // recommend against doing this. If a new field is introduced at a later time, + // an older client updating with the `*` may accidentally reset the new + // field's value. + // + // Not specifying any fields is an error. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for DisableAnywhereCache. +message DisableAnywhereCacheRequest { + // Required. The name field in the request should be: + // `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 2 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for PauseAnywhereCache. +message PauseAnywhereCacheRequest { + // Required. The name field in the request should be: + // `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 2 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for ResumeAnywhereCache. +message ResumeAnywhereCacheRequest { + // Required. The name field in the request should be: + // `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. This request is only + // idempotent if a `request_id` is provided. + string request_id = 2 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for GetAnywhereCache. +message GetAnywhereCacheRequest { + // Required. The name field in the request should be: + // `projects/{project}/buckets/{bucket}/anywhereCaches/{anywhere_cache}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 2 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message for ListAnywhereCaches. +message ListAnywhereCachesRequest { + // Required. The bucket to which this cache belongs. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/AnywhereCache" + } + ]; + + // Maximum number of caches to return in a single response. + // The service will use this parameter or 1,000 items, whichever is smaller. + int32 page_size = 2; + + // A previously-returned page token representing part of the larger set of + // results to view. + string page_token = 3; + + // Optional. A unique identifier for this request. UUID is the recommended + // format, but other formats are still accepted. + string request_id = 4 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Response message for ListAnywhereCaches. +message ListAnywhereCachesResponse { + // The list of items. + repeated AnywhereCache anywhere_caches = 1; + + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + string next_page_token = 2; +} + +// The `IntelligenceConfig` resource associated with your organization, folder, +// or project. +message IntelligenceConfig { + option (google.api.resource) = { + type: "storage.googleapis.com/IntelligenceConfig" + pattern: "folders/{folder}/locations/{location}/intelligenceConfig" + pattern: "organizations/{org}/locations/{location}/intelligenceConfig" + pattern: "projects/{project}/locations/{location}/intelligenceConfig" + plural: "intelligenceConfigs" + singular: "intelligenceConfig" + }; + + // The edition configuration of the `IntelligenceConfig` resource. This + // signifies the edition used for configuring the `IntelligenceConfig` + // resource and can only take the following values: + // `EDITION_CONFIG_UNSPECIFIED`, `INHERIT`, `DISABLED`, `STANDARD` and + // `TRIAL`. + enum EditionConfig { + // This is an unknown edition of the resource. + EDITION_CONFIG_UNSPECIFIED = 0; + + // The inherited edition from the parent and filters. This is the default + // edition when there is no `IntelligenceConfig` setup for a GCP resource. + INHERIT = 1; + + // The edition configuration is disabled for the `IntelligenceConfig` + // resource and its children. Filters are not applicable. + DISABLED = 2; + + // The `IntelligenceConfig` resource is of STANDARD edition. + STANDARD = 3; + + // The `IntelligenceConfig` resource is available in `TRIAL` edition. During + // the trial period, Cloud Storage does not charge for Storage Intelligence + // usage. You can specify the buckets to include in the trial period by + // using filters. At the end of the trial period, the `IntelligenceConfig` + // resource is upgraded to `STANDARD` edition. + TRIAL = 5; + } + + // Filter over location and bucket using include or exclude semantics. + // Resources that match the include or exclude filter are exclusively included + // or excluded from the Storage Intelligence plan. + message Filter { + // Collection of bucket locations. + message CloudStorageLocations { + // Optional. Bucket locations. Location can be any of the Cloud Storage + // regions specified in lower case format. For example, `us-east1`, + // `us-west1`. + repeated string locations = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Collection of buckets. + message CloudStorageBuckets { + // Optional. A regex pattern for matching bucket names. Regex should + // follow the syntax specified in + // [google/re2](https://github.com/google/re2). For example, + // `^sample_.*` matches all buckets of the form + // `gs://sample_bucket-1`, `gs://sample_bucket-2`, + // `gs://sample_bucket-n` but not `gs://test_sample_bucket`. + // If you want to match a single bucket, say `gs://sample_bucket`, + // use `sample_bucket`. + repeated string bucket_id_regexes = 1 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Bucket locations to include or exclude. + oneof cloud_storage_locations { + // Bucket locations to include. + CloudStorageLocations included_cloud_storage_locations = 1; + + // Bucket locations to exclude. + CloudStorageLocations excluded_cloud_storage_locations = 2; + } + + // Buckets to include or exclude. + oneof cloud_storage_buckets { + // Buckets to include. + CloudStorageBuckets included_cloud_storage_buckets = 3; + + // Buckets to exclude. + CloudStorageBuckets excluded_cloud_storage_buckets = 4; + } + } + + // The effective `IntelligenceConfig` for the resource. + message EffectiveIntelligenceConfig { + // The effective edition of the `IntelligenceConfig` resource. + enum EffectiveEdition { + // This is an unknown edition of the resource. + EFFECTIVE_EDITION_UNSPECIFIED = 0; + + // No edition. + NONE = 1; + + // The `IntelligenceConfig` resource is of STANDARD edition. + STANDARD = 2; + } + + // Output only. The `IntelligenceConfig` edition that is applicable for the + // resource. + EffectiveEdition effective_edition = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The `IntelligenceConfig` resource that is applied for the + // target resource. Format: + // `{organizations|folders|projects}/{id}/locations/{location}/intelligenceConfig` + string intelligence_config = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // The trial configuration of the `IntelligenceConfig` resource. + message TrialConfig { + // Output only. The time at which the trial expires. + google.protobuf.Timestamp expire_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Identifier. The name of the `IntelligenceConfig` resource associated with + // your organization, folder, or project. + // + // The name format varies based on the GCP resource hierarchy as follows: + // + // * For project: + // `projects/{project_number}/locations/global/intelligenceConfig` + // * For organization: + // `organizations/{org_id}/locations/global/intelligenceConfig` + // * For folder: `folders/{folder_id}/locations/global/intelligenceConfig` + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Optional. The edition configuration of the `IntelligenceConfig` resource. + EditionConfig edition_config = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time at which the `IntelligenceConfig` resource is last + // updated. + google.protobuf.Timestamp update_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Filter over location and bucket. + Filter filter = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The `IntelligenceConfig` resource that is applicable for the + // resource. + EffectiveIntelligenceConfig effective_intelligence_config = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The trial configuration of the `IntelligenceConfig` resource. + TrialConfig trial_config = 7; +} + +// Request message to update the `IntelligenceConfig` resource associated with +// your organization. +// +// **IAM Permissions**: +// +// Requires `storage.intelligenceConfigs.update` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on +// the organization. +message UpdateOrganizationIntelligenceConfigRequest { + // Required. The `IntelligenceConfig` resource to be updated. + IntelligenceConfig intelligence_config = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The `update_mask` that specifies the fields within the + // `IntelligenceConfig` resource that should be modified by this update. Only + // the listed fields are updated. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID that uniquely identifies the request, preventing duplicate + // processing. + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message to update the `IntelligenceConfig` resource associated with +// your folder. +// +// **IAM Permissions**: +// +// Requires `storage.intelligenceConfigs.update` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on +// the folder. +message UpdateFolderIntelligenceConfigRequest { + // Required. The `IntelligenceConfig` resource to be updated. + IntelligenceConfig intelligence_config = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The `update_mask` that specifies the fields within the + // `IntelligenceConfig` resource that should be modified by this update. Only + // the listed fields are updated. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID that uniquely identifies the request, preventing duplicate + // processing. + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message to update the `IntelligenceConfig` resource associated with +// your project. +// +// **IAM Permissions**: +// +// Requires `storage.intelligenceConfigs.update` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on +// the folder. +message UpdateProjectIntelligenceConfigRequest { + // Required. The `IntelligenceConfig` resource to be updated. + IntelligenceConfig intelligence_config = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The `update_mask` that specifies the fields within the + // `IntelligenceConfig` resource that should be modified by this update. Only + // the listed fields are updated. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID that uniquely identifies the request, preventing duplicate + // processing. + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request message to get the `IntelligenceConfig` resource associated with your +// organization. +// +// **IAM Permissions** +// +// Requires `storage.intelligenceConfigs.get` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on +// the organization. +message GetOrganizationIntelligenceConfigRequest { + // Required. The name of the `IntelligenceConfig` resource associated with + // your organization. + // + // Format: `organizations/{org_id}/locations/global/intelligenceConfig` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/IntelligenceConfig" + } + ]; +} + +// Request message to get the `IntelligenceConfig` resource associated with your +// folder. +// +// **IAM Permissions** +// +// Requires `storage.intelligenceConfigs.get` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission on +// the folder. +message GetFolderIntelligenceConfigRequest { + // Required. The name of the `IntelligenceConfig` resource associated with + // your folder. + // + // Format: `folders/{id}/locations/global/intelligenceConfig` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/IntelligenceConfig" + } + ]; +} + +// Request message to get the `IntelligenceConfig` resource associated with your +// project. +// +// **IAM Permissions**: +// +// Requires `storage.intelligenceConfigs.get` +// [IAM](https://cloud.google.com/iam/docs/overview#permissions) permission +// on the project. +message GetProjectIntelligenceConfigRequest { + // Required. The name of the `IntelligenceConfig` resource associated with + // your project. + // + // Format: `projects/{id}/locations/global/intelligenceConfig` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "storage.googleapis.com/IntelligenceConfig" + } + ]; +} diff --git a/java-storage/proto-google-cloud-storage-v2/clirr-ignored-differences.xml b/java-storage/proto-google-cloud-storage-v2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..8004339fc86d --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/clirr-ignored-differences.xml @@ -0,0 +1,111 @@ + + + + + 7012 + com/google/storage/v2/*OrBuilder + * get*(*) + + + 7012 + com/google/storage/v2/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/storage/v2/*OrBuilder + boolean has*(*) + + + + + 6011 + com/google/storage/v2/** + *_FIELD_NUMBER + + + 7002 + com/google/storage/v2/** + * clear*(*) + + + 7002 + com/google/storage/v2/** + * get*(*) + + + 7002 + com/google/storage/v2/** + * has*(*) + + + 7002 + com/google/storage/v2/** + * set*(*) + + + + + 7006 + com/google/storage/v2/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/storage/v2/** + * addRepeatedField(*) + ** + + + 7006 + com/google/storage/v2/** + * clear() + ** + + + 7006 + com/google/storage/v2/** + * clearField(*) + ** + + + 7006 + com/google/storage/v2/** + * clearOneof(*) + ** + + + 7006 + com/google/storage/v2/** + * clone() + ** + + + 7006 + com/google/storage/v2/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/storage/v2/** + * setField(*) + ** + + + 7006 + com/google/storage/v2/** + * setRepeatedField(*) + ** + + + 7006 + com/google/storage/v2/** + * setUnknownFields(*) + ** + + diff --git a/java-storage/proto-google-cloud-storage-v2/pom.xml b/java-storage/proto-google-cloud-storage-v2/pom.xml new file mode 100644 index 000000000000..86f29b6dde2b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/pom.xml @@ -0,0 +1,37 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-storage-v2 + 2.64.1-SNAPSHOT + proto-google-cloud-storage-v2 + PROTO library for proto-google-cloud-storage-v2 + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api + api-common + + + com.google.guava + guava + + + diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpec.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpec.java new file mode 100644 index 000000000000..7c273a490990 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpec.java @@ -0,0 +1,1788 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Describes an attempt to append to an object, possibly over multiple requests.
+ * 
+ * + * Protobuf type {@code google.storage.v2.AppendObjectSpec} + */ +@com.google.protobuf.Generated +public final class AppendObjectSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.AppendObjectSpec) + AppendObjectSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AppendObjectSpec"); + } + + // Use AppendObjectSpec.newBuilder() to construct. + private AppendObjectSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AppendObjectSpec() { + bucket_ = ""; + object_ = ""; + routingToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_AppendObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_AppendObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.AppendObjectSpec.class, + com.google.storage.v2.AppendObjectSpec.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. The name of the bucket containing the object to write.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the bucket containing the object to write.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. The name of the object to open for writing.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the object to open for writing.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Required. The generation number of the object to open for writing.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int ROUTING_TOKEN_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object routingToken_ = ""; + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return Whether the routingToken field is set. + */ + @java.lang.Override + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return The routingToken. + */ + @java.lang.Override + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } + } + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return The bytes for routingToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_HANDLE_FIELD_NUMBER = 7; + private com.google.storage.v2.BidiWriteHandle writeHandle_; + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return Whether the writeHandle field is set. + */ + @java.lang.Override + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return The writeHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(5, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, routingToken_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(7, getWriteHandle()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, routingToken_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getWriteHandle()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.AppendObjectSpec)) { + return super.equals(obj); + } + com.google.storage.v2.AppendObjectSpec other = (com.google.storage.v2.AppendObjectSpec) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasRoutingToken() != other.hasRoutingToken()) return false; + if (hasRoutingToken()) { + if (!getRoutingToken().equals(other.getRoutingToken())) return false; + } + if (hasWriteHandle() != other.hasWriteHandle()) return false; + if (hasWriteHandle()) { + if (!getWriteHandle().equals(other.getWriteHandle())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasRoutingToken()) { + hash = (37 * hash) + ROUTING_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRoutingToken().hashCode(); + } + if (hasWriteHandle()) { + hash = (37 * hash) + WRITE_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getWriteHandle().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.AppendObjectSpec parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.AppendObjectSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.AppendObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.AppendObjectSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Describes an attempt to append to an object, possibly over multiple requests.
+   * 
+ * + * Protobuf type {@code google.storage.v2.AppendObjectSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.AppendObjectSpec) + com.google.storage.v2.AppendObjectSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_AppendObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_AppendObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.AppendObjectSpec.class, + com.google.storage.v2.AppendObjectSpec.Builder.class); + } + + // Construct using com.google.storage.v2.AppendObjectSpec.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetWriteHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + routingToken_ = ""; + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_AppendObjectSpec_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec getDefaultInstanceForType() { + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec build() { + com.google.storage.v2.AppendObjectSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec buildPartial() { + com.google.storage.v2.AppendObjectSpec result = + new com.google.storage.v2.AppendObjectSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.AppendObjectSpec result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.routingToken_ = routingToken_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.writeHandle_ = + writeHandleBuilder_ == null ? writeHandle_ : writeHandleBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.AppendObjectSpec) { + return mergeFrom((com.google.storage.v2.AppendObjectSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.AppendObjectSpec other) { + if (other == com.google.storage.v2.AppendObjectSpec.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasRoutingToken()) { + routingToken_ = other.routingToken_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasWriteHandle()) { + mergeWriteHandle(other.getWriteHandle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + routingToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetWriteHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. The name of the bucket containing the object to write.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to write.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to write.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to write.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to write.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. The name of the object to open for writing.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to open for writing.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to open for writing.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to open for writing.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to open for writing.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Required. The generation number of the object to open for writing.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Required. The generation number of the object to open for writing.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The generation number of the object to open for writing.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object routingToken_ = ""; + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @return Whether the routingToken field is set. + */ + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @return The routingToken. + */ + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @return The bytes for routingToken. + */ + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @param value The routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + routingToken_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @return This builder for chaining. + */ + public Builder clearRoutingToken() { + routingToken_ = getDefaultInstance().getRoutingToken(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional routing token that influences request routing for the stream.
+     * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+     * 
+ * + * optional string routing_token = 6; + * + * @param value The bytes for routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + routingToken_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.storage.v2.BidiWriteHandle writeHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + writeHandleBuilder_; + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return Whether the writeHandle field is set. + */ + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return The writeHandle. + */ + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + if (writeHandleBuilder_ == null) { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } else { + return writeHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeHandle_ = value; + } else { + writeHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle.Builder builderForValue) { + if (writeHandleBuilder_ == null) { + writeHandle_ = builderForValue.build(); + } else { + writeHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public Builder mergeWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && writeHandle_ != null + && writeHandle_ != com.google.storage.v2.BidiWriteHandle.getDefaultInstance()) { + getWriteHandleBuilder().mergeFrom(value); + } else { + writeHandle_ = value; + } + } else { + writeHandleBuilder_.mergeFrom(value); + } + if (writeHandle_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public Builder clearWriteHandle() { + bitField0_ = (bitField0_ & ~0x00000040); + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public com.google.storage.v2.BidiWriteHandle.Builder getWriteHandleBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetWriteHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + if (writeHandleBuilder_ != null) { + return writeHandleBuilder_.getMessageOrBuilder(); + } else { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + } + + /** + * + * + *
+     * An optional write handle returned from a previous BidiWriteObjectResponse
+     * message or a BidiWriteObjectRedirectedError error.
+     *
+     * Note that metageneration preconditions are only checked if `write_handle`
+     * is empty.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + internalGetWriteHandleFieldBuilder() { + if (writeHandleBuilder_ == null) { + writeHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder>( + getWriteHandle(), getParentForChildren(), isClean()); + writeHandle_ = null; + } + return writeHandleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.AppendObjectSpec) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.AppendObjectSpec) + private static final com.google.storage.v2.AppendObjectSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.AppendObjectSpec(); + } + + public static com.google.storage.v2.AppendObjectSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendObjectSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpecOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpecOrBuilder.java new file mode 100644 index 000000000000..220c350bfb72 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/AppendObjectSpecOrBuilder.java @@ -0,0 +1,256 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface AppendObjectSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.AppendObjectSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the bucket containing the object to write.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. The name of the bucket containing the object to write.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. The name of the object to open for writing.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. The name of the object to open for writing.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Required. The generation number of the object to open for writing.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return Whether the routingToken field is set. + */ + boolean hasRoutingToken(); + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return The routingToken. + */ + java.lang.String getRoutingToken(); + + /** + * + * + *
+   * An optional routing token that influences request routing for the stream.
+   * Must be provided if a `BidiWriteObjectRedirectedError` is returned.
+   * 
+ * + * optional string routing_token = 6; + * + * @return The bytes for routingToken. + */ + com.google.protobuf.ByteString getRoutingTokenBytes(); + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return Whether the writeHandle field is set. + */ + boolean hasWriteHandle(); + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + * + * @return The writeHandle. + */ + com.google.storage.v2.BidiWriteHandle getWriteHandle(); + + /** + * + * + *
+   * An optional write handle returned from a previous BidiWriteObjectResponse
+   * message or a BidiWriteObjectRedirectedError error.
+   *
+   * Note that metageneration preconditions are only checked if `write_handle`
+   * is empty.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 7; + */ + com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandle.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandle.java new file mode 100644 index 000000000000..37d8fc04bd98 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandle.java @@ -0,0 +1,507 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * `BidiReadHandle` contains a handle from a previous `BiDiReadObject`
+ * invocation. The client can use this instead of `BidiReadObjectSpec` as an
+ * optimized way of opening subsequent bidirectional streams to the same object.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadHandle} + */ +@com.google.protobuf.Generated +public final class BidiReadHandle extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadHandle) + BidiReadHandleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadHandle"); + } + + // Use BidiReadHandle.newBuilder() to construct. + private BidiReadHandle(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadHandle() { + handle_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadHandle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadHandle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadHandle.class, + com.google.storage.v2.BidiReadHandle.Builder.class); + } + + public static final int HANDLE_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString handle_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Required. Opaque value describing a previous read.
+   * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHandle() { + return handle_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!handle_.isEmpty()) { + output.writeBytes(1, handle_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!handle_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, handle_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadHandle)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadHandle other = (com.google.storage.v2.BidiReadHandle) obj; + + if (!getHandle().equals(other.getHandle())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getHandle().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadHandle parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadHandle parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadHandle parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadHandle parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadHandle prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * `BidiReadHandle` contains a handle from a previous `BiDiReadObject`
+   * invocation. The client can use this instead of `BidiReadObjectSpec` as an
+   * optimized way of opening subsequent bidirectional streams to the same object.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadHandle} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadHandle) + com.google.storage.v2.BidiReadHandleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadHandle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadHandle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadHandle.class, + com.google.storage.v2.BidiReadHandle.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadHandle.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + handle_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadHandle_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadHandle getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadHandle.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadHandle build() { + com.google.storage.v2.BidiReadHandle result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadHandle buildPartial() { + com.google.storage.v2.BidiReadHandle result = new com.google.storage.v2.BidiReadHandle(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiReadHandle result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.handle_ = handle_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadHandle) { + return mergeFrom((com.google.storage.v2.BidiReadHandle) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadHandle other) { + if (other == com.google.storage.v2.BidiReadHandle.getDefaultInstance()) return this; + if (!other.getHandle().isEmpty()) { + setHandle(other.getHandle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + handle_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString handle_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Required. Opaque value describing a previous read.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHandle() { + return handle_; + } + + /** + * + * + *
+     * Required. Opaque value describing a previous read.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The handle to set. + * @return This builder for chaining. + */ + public Builder setHandle(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + handle_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Opaque value describing a previous read.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearHandle() { + bitField0_ = (bitField0_ & ~0x00000001); + handle_ = getDefaultInstance().getHandle(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadHandle) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadHandle) + private static final com.google.storage.v2.BidiReadHandle DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadHandle(); + } + + public static com.google.storage.v2.BidiReadHandle getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadHandle parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadHandle getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandleOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandleOrBuilder.java new file mode 100644 index 000000000000..e10b91efd0c2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadHandleOrBuilder.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadHandleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadHandle) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Opaque value describing a previous read.
+   * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + com.google.protobuf.ByteString getHandle(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectError.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectError.java new file mode 100644 index 000000000000..30df9f38ea36 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectError.java @@ -0,0 +1,927 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Error extension proto containing details for all outstanding reads on the
+ * failed stream
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectError} + */ +@com.google.protobuf.Generated +public final class BidiReadObjectError extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadObjectError) + BidiReadObjectErrorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadObjectError"); + } + + // Use BidiReadObjectError.newBuilder() to construct. + private BidiReadObjectError(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadObjectError() { + readRangeErrors_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectError.class, + com.google.storage.v2.BidiReadObjectError.Builder.class); + } + + public static final int READ_RANGE_ERRORS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List readRangeErrors_; + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + @java.lang.Override + public java.util.List getReadRangeErrorsList() { + return readRangeErrors_; + } + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + @java.lang.Override + public java.util.List + getReadRangeErrorsOrBuilderList() { + return readRangeErrors_; + } + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + @java.lang.Override + public int getReadRangeErrorsCount() { + return readRangeErrors_.size(); + } + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + @java.lang.Override + public com.google.storage.v2.ReadRangeError getReadRangeErrors(int index) { + return readRangeErrors_.get(index); + } + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + @java.lang.Override + public com.google.storage.v2.ReadRangeErrorOrBuilder getReadRangeErrorsOrBuilder(int index) { + return readRangeErrors_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < readRangeErrors_.size(); i++) { + output.writeMessage(1, readRangeErrors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < readRangeErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, readRangeErrors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadObjectError)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadObjectError other = + (com.google.storage.v2.BidiReadObjectError) obj; + + if (!getReadRangeErrorsList().equals(other.getReadRangeErrorsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getReadRangeErrorsCount() > 0) { + hash = (37 * hash) + READ_RANGE_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getReadRangeErrorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadObjectError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Error extension proto containing details for all outstanding reads on the
+   * failed stream
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadObjectError) + com.google.storage.v2.BidiReadObjectErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectError.class, + com.google.storage.v2.BidiReadObjectError.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadObjectError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (readRangeErrorsBuilder_ == null) { + readRangeErrors_ = java.util.Collections.emptyList(); + } else { + readRangeErrors_ = null; + readRangeErrorsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectError_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectError getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadObjectError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectError build() { + com.google.storage.v2.BidiReadObjectError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectError buildPartial() { + com.google.storage.v2.BidiReadObjectError result = + new com.google.storage.v2.BidiReadObjectError(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.BidiReadObjectError result) { + if (readRangeErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + readRangeErrors_ = java.util.Collections.unmodifiableList(readRangeErrors_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.readRangeErrors_ = readRangeErrors_; + } else { + result.readRangeErrors_ = readRangeErrorsBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.BidiReadObjectError result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadObjectError) { + return mergeFrom((com.google.storage.v2.BidiReadObjectError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadObjectError other) { + if (other == com.google.storage.v2.BidiReadObjectError.getDefaultInstance()) return this; + if (readRangeErrorsBuilder_ == null) { + if (!other.readRangeErrors_.isEmpty()) { + if (readRangeErrors_.isEmpty()) { + readRangeErrors_ = other.readRangeErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.addAll(other.readRangeErrors_); + } + onChanged(); + } + } else { + if (!other.readRangeErrors_.isEmpty()) { + if (readRangeErrorsBuilder_.isEmpty()) { + readRangeErrorsBuilder_.dispose(); + readRangeErrorsBuilder_ = null; + readRangeErrors_ = other.readRangeErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + readRangeErrorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReadRangeErrorsFieldBuilder() + : null; + } else { + readRangeErrorsBuilder_.addAllMessages(other.readRangeErrors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.v2.ReadRangeError m = + input.readMessage( + com.google.storage.v2.ReadRangeError.parser(), extensionRegistry); + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.add(m); + } else { + readRangeErrorsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List readRangeErrors_ = + java.util.Collections.emptyList(); + + private void ensureReadRangeErrorsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + readRangeErrors_ = + new java.util.ArrayList(readRangeErrors_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRangeError, + com.google.storage.v2.ReadRangeError.Builder, + com.google.storage.v2.ReadRangeErrorOrBuilder> + readRangeErrorsBuilder_; + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public java.util.List getReadRangeErrorsList() { + if (readRangeErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(readRangeErrors_); + } else { + return readRangeErrorsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public int getReadRangeErrorsCount() { + if (readRangeErrorsBuilder_ == null) { + return readRangeErrors_.size(); + } else { + return readRangeErrorsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public com.google.storage.v2.ReadRangeError getReadRangeErrors(int index) { + if (readRangeErrorsBuilder_ == null) { + return readRangeErrors_.get(index); + } else { + return readRangeErrorsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder setReadRangeErrors(int index, com.google.storage.v2.ReadRangeError value) { + if (readRangeErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.set(index, value); + onChanged(); + } else { + readRangeErrorsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder setReadRangeErrors( + int index, com.google.storage.v2.ReadRangeError.Builder builderForValue) { + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + readRangeErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder addReadRangeErrors(com.google.storage.v2.ReadRangeError value) { + if (readRangeErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.add(value); + onChanged(); + } else { + readRangeErrorsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder addReadRangeErrors(int index, com.google.storage.v2.ReadRangeError value) { + if (readRangeErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.add(index, value); + onChanged(); + } else { + readRangeErrorsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder addReadRangeErrors( + com.google.storage.v2.ReadRangeError.Builder builderForValue) { + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.add(builderForValue.build()); + onChanged(); + } else { + readRangeErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder addReadRangeErrors( + int index, com.google.storage.v2.ReadRangeError.Builder builderForValue) { + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + readRangeErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder addAllReadRangeErrors( + java.lang.Iterable values) { + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, readRangeErrors_); + onChanged(); + } else { + readRangeErrorsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder clearReadRangeErrors() { + if (readRangeErrorsBuilder_ == null) { + readRangeErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + readRangeErrorsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public Builder removeReadRangeErrors(int index) { + if (readRangeErrorsBuilder_ == null) { + ensureReadRangeErrorsIsMutable(); + readRangeErrors_.remove(index); + onChanged(); + } else { + readRangeErrorsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public com.google.storage.v2.ReadRangeError.Builder getReadRangeErrorsBuilder(int index) { + return internalGetReadRangeErrorsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public com.google.storage.v2.ReadRangeErrorOrBuilder getReadRangeErrorsOrBuilder(int index) { + if (readRangeErrorsBuilder_ == null) { + return readRangeErrors_.get(index); + } else { + return readRangeErrorsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public java.util.List + getReadRangeErrorsOrBuilderList() { + if (readRangeErrorsBuilder_ != null) { + return readRangeErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(readRangeErrors_); + } + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public com.google.storage.v2.ReadRangeError.Builder addReadRangeErrorsBuilder() { + return internalGetReadRangeErrorsFieldBuilder() + .addBuilder(com.google.storage.v2.ReadRangeError.getDefaultInstance()); + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public com.google.storage.v2.ReadRangeError.Builder addReadRangeErrorsBuilder(int index) { + return internalGetReadRangeErrorsFieldBuilder() + .addBuilder(index, com.google.storage.v2.ReadRangeError.getDefaultInstance()); + } + + /** + * + * + *
+     * The error code for each outstanding read_range
+     * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + public java.util.List + getReadRangeErrorsBuilderList() { + return internalGetReadRangeErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRangeError, + com.google.storage.v2.ReadRangeError.Builder, + com.google.storage.v2.ReadRangeErrorOrBuilder> + internalGetReadRangeErrorsFieldBuilder() { + if (readRangeErrorsBuilder_ == null) { + readRangeErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRangeError, + com.google.storage.v2.ReadRangeError.Builder, + com.google.storage.v2.ReadRangeErrorOrBuilder>( + readRangeErrors_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + readRangeErrors_ = null; + } + return readRangeErrorsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadObjectError) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadObjectError) + private static final com.google.storage.v2.BidiReadObjectError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadObjectError(); + } + + public static com.google.storage.v2.BidiReadObjectError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadObjectError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectErrorOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectErrorOrBuilder.java new file mode 100644 index 000000000000..bc99e5c887fc --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectErrorOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadObjectErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadObjectError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + java.util.List getReadRangeErrorsList(); + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + com.google.storage.v2.ReadRangeError getReadRangeErrors(int index); + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + int getReadRangeErrorsCount(); + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + java.util.List + getReadRangeErrorsOrBuilderList(); + + /** + * + * + *
+   * The error code for each outstanding read_range
+   * 
+ * + * repeated .google.storage.v2.ReadRangeError read_range_errors = 1; + */ + com.google.storage.v2.ReadRangeErrorOrBuilder getReadRangeErrorsOrBuilder(int index); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedError.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedError.java new file mode 100644 index 000000000000..be2a27448a20 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedError.java @@ -0,0 +1,957 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Error proto containing details for a redirected read. This error might be
+ * attached as details for an ABORTED response to BidiReadObject.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectRedirectedError} + */ +@com.google.protobuf.Generated +public final class BidiReadObjectRedirectedError extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadObjectRedirectedError) + BidiReadObjectRedirectedErrorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadObjectRedirectedError"); + } + + // Use BidiReadObjectRedirectedError.newBuilder() to construct. + private BidiReadObjectRedirectedError(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadObjectRedirectedError() { + routingToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRedirectedError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectRedirectedError.class, + com.google.storage.v2.BidiReadObjectRedirectedError.Builder.class); + } + + private int bitField0_; + public static final int READ_HANDLE_FIELD_NUMBER = 1; + private com.google.storage.v2.BidiReadHandle readHandle_; + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return Whether the readHandle field is set. + */ + @java.lang.Override + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return The readHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandle getReadHandle() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + public static final int ROUTING_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object routingToken_ = ""; + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return Whether the routingToken field is set. + */ + @java.lang.Override + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return The routingToken. + */ + @java.lang.Override + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return The bytes for routingToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReadHandle()); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, routingToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReadHandle()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, routingToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadObjectRedirectedError)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadObjectRedirectedError other = + (com.google.storage.v2.BidiReadObjectRedirectedError) obj; + + if (hasReadHandle() != other.hasReadHandle()) return false; + if (hasReadHandle()) { + if (!getReadHandle().equals(other.getReadHandle())) return false; + } + if (hasRoutingToken() != other.hasRoutingToken()) return false; + if (hasRoutingToken()) { + if (!getRoutingToken().equals(other.getRoutingToken())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReadHandle()) { + hash = (37 * hash) + READ_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getReadHandle().hashCode(); + } + if (hasRoutingToken()) { + hash = (37 * hash) + ROUTING_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRoutingToken().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadObjectRedirectedError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Error proto containing details for a redirected read. This error might be
+   * attached as details for an ABORTED response to BidiReadObject.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectRedirectedError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadObjectRedirectedError) + com.google.storage.v2.BidiReadObjectRedirectedErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRedirectedError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectRedirectedError.class, + com.google.storage.v2.BidiReadObjectRedirectedError.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadObjectRedirectedError.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + routingToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRedirectedError getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadObjectRedirectedError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRedirectedError build() { + com.google.storage.v2.BidiReadObjectRedirectedError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRedirectedError buildPartial() { + com.google.storage.v2.BidiReadObjectRedirectedError result = + new com.google.storage.v2.BidiReadObjectRedirectedError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiReadObjectRedirectedError result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readHandle_ = readHandleBuilder_ == null ? readHandle_ : readHandleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.routingToken_ = routingToken_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadObjectRedirectedError) { + return mergeFrom((com.google.storage.v2.BidiReadObjectRedirectedError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadObjectRedirectedError other) { + if (other == com.google.storage.v2.BidiReadObjectRedirectedError.getDefaultInstance()) + return this; + if (other.hasReadHandle()) { + mergeReadHandle(other.getReadHandle()); + } + if (other.hasRoutingToken()) { + routingToken_ = other.routingToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetReadHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + routingToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.BidiReadHandle readHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + readHandleBuilder_; + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return Whether the readHandle field is set. + */ + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return The readHandle. + */ + public com.google.storage.v2.BidiReadHandle getReadHandle() { + if (readHandleBuilder_ == null) { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } else { + return readHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readHandle_ = value; + } else { + readHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle.Builder builderForValue) { + if (readHandleBuilder_ == null) { + readHandle_ = builderForValue.build(); + } else { + readHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public Builder mergeReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && readHandle_ != null + && readHandle_ != com.google.storage.v2.BidiReadHandle.getDefaultInstance()) { + getReadHandleBuilder().mergeFrom(value); + } else { + readHandle_ = value; + } + } else { + readHandleBuilder_.mergeFrom(value); + } + if (readHandle_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public Builder clearReadHandle() { + bitField0_ = (bitField0_ & ~0x00000001); + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public com.google.storage.v2.BidiReadHandle.Builder getReadHandleBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetReadHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + if (readHandleBuilder_ != null) { + return readHandleBuilder_.getMessageOrBuilder(); + } else { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + } + + /** + * + * + *
+     * The read handle for the redirected read. If set, the client might use this
+     * in the BidiReadObjectSpec when retrying the read stream.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + internalGetReadHandleFieldBuilder() { + if (readHandleBuilder_ == null) { + readHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder>( + getReadHandle(), getParentForChildren(), isClean()); + readHandle_ = null; + } + return readHandleBuilder_; + } + + private java.lang.Object routingToken_ = ""; + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @return Whether the routingToken field is set. + */ + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @return The routingToken. + */ + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @return The bytes for routingToken. + */ + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @param value The routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + routingToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearRoutingToken() { + routingToken_ = getDefaultInstance().getRoutingToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token the client must use when retrying the read stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 2; + * + * @param value The bytes for routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + routingToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadObjectRedirectedError) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadObjectRedirectedError) + private static final com.google.storage.v2.BidiReadObjectRedirectedError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadObjectRedirectedError(); + } + + public static com.google.storage.v2.BidiReadObjectRedirectedError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadObjectRedirectedError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRedirectedError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedErrorOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedErrorOrBuilder.java new file mode 100644 index 000000000000..396618c63ec1 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRedirectedErrorOrBuilder.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadObjectRedirectedErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadObjectRedirectedError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return Whether the readHandle field is set. + */ + boolean hasReadHandle(); + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + * + * @return The readHandle. + */ + com.google.storage.v2.BidiReadHandle getReadHandle(); + + /** + * + * + *
+   * The read handle for the redirected read. If set, the client might use this
+   * in the BidiReadObjectSpec when retrying the read stream.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 1; + */ + com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder(); + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return Whether the routingToken field is set. + */ + boolean hasRoutingToken(); + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return The routingToken. + */ + java.lang.String getRoutingToken(); + + /** + * + * + *
+   * The routing token the client must use when retrying the read stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 2; + * + * @return The bytes for routingToken. + */ + com.google.protobuf.ByteString getRoutingTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequest.java new file mode 100644 index 000000000000..e3cea310d92b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequest.java @@ -0,0 +1,1424 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [BidiReadObject][google.storage.v2.Storage.BidiReadObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectRequest} + */ +@com.google.protobuf.Generated +public final class BidiReadObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadObjectRequest) + BidiReadObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadObjectRequest"); + } + + // Use BidiReadObjectRequest.newBuilder() to construct. + private BidiReadObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadObjectRequest() { + readRanges_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectRequest.class, + com.google.storage.v2.BidiReadObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int READ_OBJECT_SPEC_FIELD_NUMBER = 1; + private com.google.storage.v2.BidiReadObjectSpec readObjectSpec_; + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readObjectSpec field is set. + */ + @java.lang.Override + public boolean hasReadObjectSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpec getReadObjectSpec() { + return readObjectSpec_ == null + ? com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance() + : readObjectSpec_; + } + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpecOrBuilder getReadObjectSpecOrBuilder() { + return readObjectSpec_ == null + ? com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance() + : readObjectSpec_; + } + + public static final int READ_RANGES_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private java.util.List readRanges_; + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getReadRangesList() { + return readRanges_; + } + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getReadRangesOrBuilderList() { + return readRanges_; + } + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getReadRangesCount() { + return readRanges_.size(); + } + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ReadRange getReadRanges(int index) { + return readRanges_.get(index); + } + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ReadRangeOrBuilder getReadRangesOrBuilder(int index) { + return readRanges_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReadObjectSpec()); + } + for (int i = 0; i < readRanges_.size(); i++) { + output.writeMessage(8, readRanges_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReadObjectSpec()); + } + for (int i = 0; i < readRanges_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, readRanges_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadObjectRequest other = + (com.google.storage.v2.BidiReadObjectRequest) obj; + + if (hasReadObjectSpec() != other.hasReadObjectSpec()) return false; + if (hasReadObjectSpec()) { + if (!getReadObjectSpec().equals(other.getReadObjectSpec())) return false; + } + if (!getReadRangesList().equals(other.getReadRangesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReadObjectSpec()) { + hash = (37 * hash) + READ_OBJECT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getReadObjectSpec().hashCode(); + } + if (getReadRangesCount() > 0) { + hash = (37 * hash) + READ_RANGES_FIELD_NUMBER; + hash = (53 * hash) + getReadRangesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [BidiReadObject][google.storage.v2.Storage.BidiReadObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadObjectRequest) + com.google.storage.v2.BidiReadObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectRequest.class, + com.google.storage.v2.BidiReadObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadObjectSpecFieldBuilder(); + internalGetReadRangesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readObjectSpec_ = null; + if (readObjectSpecBuilder_ != null) { + readObjectSpecBuilder_.dispose(); + readObjectSpecBuilder_ = null; + } + if (readRangesBuilder_ == null) { + readRanges_ = java.util.Collections.emptyList(); + } else { + readRanges_ = null; + readRangesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRequest build() { + com.google.storage.v2.BidiReadObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRequest buildPartial() { + com.google.storage.v2.BidiReadObjectRequest result = + new com.google.storage.v2.BidiReadObjectRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.BidiReadObjectRequest result) { + if (readRangesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + readRanges_ = java.util.Collections.unmodifiableList(readRanges_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.readRanges_ = readRanges_; + } else { + result.readRanges_ = readRangesBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.BidiReadObjectRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readObjectSpec_ = + readObjectSpecBuilder_ == null ? readObjectSpec_ : readObjectSpecBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadObjectRequest) { + return mergeFrom((com.google.storage.v2.BidiReadObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadObjectRequest other) { + if (other == com.google.storage.v2.BidiReadObjectRequest.getDefaultInstance()) return this; + if (other.hasReadObjectSpec()) { + mergeReadObjectSpec(other.getReadObjectSpec()); + } + if (readRangesBuilder_ == null) { + if (!other.readRanges_.isEmpty()) { + if (readRanges_.isEmpty()) { + readRanges_ = other.readRanges_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureReadRangesIsMutable(); + readRanges_.addAll(other.readRanges_); + } + onChanged(); + } + } else { + if (!other.readRanges_.isEmpty()) { + if (readRangesBuilder_.isEmpty()) { + readRangesBuilder_.dispose(); + readRangesBuilder_ = null; + readRanges_ = other.readRanges_; + bitField0_ = (bitField0_ & ~0x00000002); + readRangesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReadRangesFieldBuilder() + : null; + } else { + readRangesBuilder_.addAllMessages(other.readRanges_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetReadObjectSpecFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 66: + { + com.google.storage.v2.ReadRange m = + input.readMessage(com.google.storage.v2.ReadRange.parser(), extensionRegistry); + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + readRanges_.add(m); + } else { + readRangesBuilder_.addMessage(m); + } + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.BidiReadObjectSpec readObjectSpec_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadObjectSpec, + com.google.storage.v2.BidiReadObjectSpec.Builder, + com.google.storage.v2.BidiReadObjectSpecOrBuilder> + readObjectSpecBuilder_; + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readObjectSpec field is set. + */ + public boolean hasReadObjectSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readObjectSpec. + */ + public com.google.storage.v2.BidiReadObjectSpec getReadObjectSpec() { + if (readObjectSpecBuilder_ == null) { + return readObjectSpec_ == null + ? com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance() + : readObjectSpec_; + } else { + return readObjectSpecBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadObjectSpec(com.google.storage.v2.BidiReadObjectSpec value) { + if (readObjectSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readObjectSpec_ = value; + } else { + readObjectSpecBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadObjectSpec( + com.google.storage.v2.BidiReadObjectSpec.Builder builderForValue) { + if (readObjectSpecBuilder_ == null) { + readObjectSpec_ = builderForValue.build(); + } else { + readObjectSpecBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeReadObjectSpec(com.google.storage.v2.BidiReadObjectSpec value) { + if (readObjectSpecBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && readObjectSpec_ != null + && readObjectSpec_ != com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance()) { + getReadObjectSpecBuilder().mergeFrom(value); + } else { + readObjectSpec_ = value; + } + } else { + readObjectSpecBuilder_.mergeFrom(value); + } + if (readObjectSpec_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReadObjectSpec() { + bitField0_ = (bitField0_ & ~0x00000001); + readObjectSpec_ = null; + if (readObjectSpecBuilder_ != null) { + readObjectSpecBuilder_.dispose(); + readObjectSpecBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BidiReadObjectSpec.Builder getReadObjectSpecBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetReadObjectSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BidiReadObjectSpecOrBuilder getReadObjectSpecOrBuilder() { + if (readObjectSpecBuilder_ != null) { + return readObjectSpecBuilder_.getMessageOrBuilder(); + } else { + return readObjectSpec_ == null + ? com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance() + : readObjectSpec_; + } + } + + /** + * + * + *
+     * Optional. The first message of each stream should set this field. If this
+     * is not the first message, an error is returned. Describes the object to
+     * read.
+     * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadObjectSpec, + com.google.storage.v2.BidiReadObjectSpec.Builder, + com.google.storage.v2.BidiReadObjectSpecOrBuilder> + internalGetReadObjectSpecFieldBuilder() { + if (readObjectSpecBuilder_ == null) { + readObjectSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadObjectSpec, + com.google.storage.v2.BidiReadObjectSpec.Builder, + com.google.storage.v2.BidiReadObjectSpecOrBuilder>( + getReadObjectSpec(), getParentForChildren(), isClean()); + readObjectSpec_ = null; + } + return readObjectSpecBuilder_; + } + + private java.util.List readRanges_ = + java.util.Collections.emptyList(); + + private void ensureReadRangesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + readRanges_ = new java.util.ArrayList(readRanges_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder> + readRangesBuilder_; + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getReadRangesList() { + if (readRangesBuilder_ == null) { + return java.util.Collections.unmodifiableList(readRanges_); + } else { + return readRangesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getReadRangesCount() { + if (readRangesBuilder_ == null) { + return readRanges_.size(); + } else { + return readRangesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ReadRange getReadRanges(int index) { + if (readRangesBuilder_ == null) { + return readRanges_.get(index); + } else { + return readRangesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadRanges(int index, com.google.storage.v2.ReadRange value) { + if (readRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangesIsMutable(); + readRanges_.set(index, value); + onChanged(); + } else { + readRangesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadRanges( + int index, com.google.storage.v2.ReadRange.Builder builderForValue) { + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + readRanges_.set(index, builderForValue.build()); + onChanged(); + } else { + readRangesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addReadRanges(com.google.storage.v2.ReadRange value) { + if (readRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangesIsMutable(); + readRanges_.add(value); + onChanged(); + } else { + readRangesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addReadRanges(int index, com.google.storage.v2.ReadRange value) { + if (readRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadRangesIsMutable(); + readRanges_.add(index, value); + onChanged(); + } else { + readRangesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addReadRanges(com.google.storage.v2.ReadRange.Builder builderForValue) { + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + readRanges_.add(builderForValue.build()); + onChanged(); + } else { + readRangesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addReadRanges( + int index, com.google.storage.v2.ReadRange.Builder builderForValue) { + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + readRanges_.add(index, builderForValue.build()); + onChanged(); + } else { + readRangesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllReadRanges( + java.lang.Iterable values) { + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, readRanges_); + onChanged(); + } else { + readRangesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReadRanges() { + if (readRangesBuilder_ == null) { + readRanges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + readRangesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeReadRanges(int index) { + if (readRangesBuilder_ == null) { + ensureReadRangesIsMutable(); + readRanges_.remove(index); + onChanged(); + } else { + readRangesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ReadRange.Builder getReadRangesBuilder(int index) { + return internalGetReadRangesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ReadRangeOrBuilder getReadRangesOrBuilder(int index) { + if (readRangesBuilder_ == null) { + return readRanges_.get(index); + } else { + return readRangesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getReadRangesOrBuilderList() { + if (readRangesBuilder_ != null) { + return readRangesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(readRanges_); + } + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ReadRange.Builder addReadRangesBuilder() { + return internalGetReadRangesFieldBuilder() + .addBuilder(com.google.storage.v2.ReadRange.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ReadRange.Builder addReadRangesBuilder(int index) { + return internalGetReadRangesFieldBuilder() + .addBuilder(index, com.google.storage.v2.ReadRange.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+     * single range is large enough to require multiple responses, they are
+     * delivered in increasing offset order. There are no ordering guarantees
+     * across ranges. When no ranges are provided, the response message
+     * doesn't  include `ObjectRangeData`. For full object downloads, the
+     * offset and size can be set to `0`.
+     * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getReadRangesBuilderList() { + return internalGetReadRangesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder> + internalGetReadRangesFieldBuilder() { + if (readRangesBuilder_ == null) { + readRangesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder>( + readRanges_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + readRanges_ = null; + } + return readRangesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadObjectRequest) + private static final com.google.storage.v2.BidiReadObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadObjectRequest(); + } + + public static com.google.storage.v2.BidiReadObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequestOrBuilder.java new file mode 100644 index 000000000000..e21822928968 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectRequestOrBuilder.java @@ -0,0 +1,167 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readObjectSpec field is set. + */ + boolean hasReadObjectSpec(); + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readObjectSpec. + */ + com.google.storage.v2.BidiReadObjectSpec getReadObjectSpec(); + + /** + * + * + *
+   * Optional. The first message of each stream should set this field. If this
+   * is not the first message, an error is returned. Describes the object to
+   * read.
+   * 
+ * + * + * .google.storage.v2.BidiReadObjectSpec read_object_spec = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.BidiReadObjectSpecOrBuilder getReadObjectSpecOrBuilder(); + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getReadRangesList(); + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ReadRange getReadRanges(int index); + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getReadRangesCount(); + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getReadRangesOrBuilderList(); + + /** + * + * + *
+   * Optional. Provides a list of 0 or more (up to 100) ranges to read. If a
+   * single range is large enough to require multiple responses, they are
+   * delivered in increasing offset order. There are no ordering guarantees
+   * across ranges. When no ranges are provided, the response message
+   * doesn't  include `ObjectRangeData`. For full object downloads, the
+   * offset and size can be set to `0`.
+   * 
+ * + * + * repeated .google.storage.v2.ReadRange read_ranges = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ReadRangeOrBuilder getReadRangesOrBuilder(int index); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponse.java new file mode 100644 index 000000000000..14e9ec26a814 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponse.java @@ -0,0 +1,1679 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response message for
+ * [BidiReadObject][google.storage.v2.Storage.BidiReadObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectResponse} + */ +@com.google.protobuf.Generated +public final class BidiReadObjectResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadObjectResponse) + BidiReadObjectResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadObjectResponse"); + } + + // Use BidiReadObjectResponse.newBuilder() to construct. + private BidiReadObjectResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadObjectResponse() { + objectDataRanges_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectResponse.class, + com.google.storage.v2.BidiReadObjectResponse.Builder.class); + } + + private int bitField0_; + public static final int OBJECT_DATA_RANGES_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private java.util.List objectDataRanges_; + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + @java.lang.Override + public java.util.List getObjectDataRangesList() { + return objectDataRanges_; + } + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + @java.lang.Override + public java.util.List + getObjectDataRangesOrBuilderList() { + return objectDataRanges_; + } + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + @java.lang.Override + public int getObjectDataRangesCount() { + return objectDataRanges_.size(); + } + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + @java.lang.Override + public com.google.storage.v2.ObjectRangeData getObjectDataRanges(int index) { + return objectDataRanges_.get(index); + } + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + @java.lang.Override + public com.google.storage.v2.ObjectRangeDataOrBuilder getObjectDataRangesOrBuilder(int index) { + return objectDataRanges_.get(index); + } + + public static final int METADATA_FIELD_NUMBER = 4; + private com.google.storage.v2.Object metadata_; + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + @java.lang.Override + public boolean hasMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + @java.lang.Override + public com.google.storage.v2.Object getMetadata() { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder() { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + + public static final int READ_HANDLE_FIELD_NUMBER = 7; + private com.google.storage.v2.BidiReadHandle readHandle_; + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return Whether the readHandle field is set. + */ + @java.lang.Override + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return The readHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandle getReadHandle() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getMetadata()); + } + for (int i = 0; i < objectDataRanges_.size(); i++) { + output.writeMessage(6, objectDataRanges_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(7, getReadHandle()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getMetadata()); + } + for (int i = 0; i < objectDataRanges_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, objectDataRanges_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getReadHandle()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadObjectResponse)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadObjectResponse other = + (com.google.storage.v2.BidiReadObjectResponse) obj; + + if (!getObjectDataRangesList().equals(other.getObjectDataRangesList())) return false; + if (hasMetadata() != other.hasMetadata()) return false; + if (hasMetadata()) { + if (!getMetadata().equals(other.getMetadata())) return false; + } + if (hasReadHandle() != other.hasReadHandle()) return false; + if (hasReadHandle()) { + if (!getReadHandle().equals(other.getReadHandle())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getObjectDataRangesCount() > 0) { + hash = (37 * hash) + OBJECT_DATA_RANGES_FIELD_NUMBER; + hash = (53 * hash) + getObjectDataRangesList().hashCode(); + } + if (hasMetadata()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadata().hashCode(); + } + if (hasReadHandle()) { + hash = (37 * hash) + READ_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getReadHandle().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadObjectResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for
+   * [BidiReadObject][google.storage.v2.Storage.BidiReadObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadObjectResponse) + com.google.storage.v2.BidiReadObjectResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectResponse.class, + com.google.storage.v2.BidiReadObjectResponse.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadObjectResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetObjectDataRangesFieldBuilder(); + internalGetMetadataFieldBuilder(); + internalGetReadHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (objectDataRangesBuilder_ == null) { + objectDataRanges_ = java.util.Collections.emptyList(); + } else { + objectDataRanges_ = null; + objectDataRangesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectResponse getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadObjectResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectResponse build() { + com.google.storage.v2.BidiReadObjectResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectResponse buildPartial() { + com.google.storage.v2.BidiReadObjectResponse result = + new com.google.storage.v2.BidiReadObjectResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.BidiReadObjectResponse result) { + if (objectDataRangesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + objectDataRanges_ = java.util.Collections.unmodifiableList(objectDataRanges_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.objectDataRanges_ = objectDataRanges_; + } else { + result.objectDataRanges_ = objectDataRangesBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.BidiReadObjectResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.readHandle_ = readHandleBuilder_ == null ? readHandle_ : readHandleBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadObjectResponse) { + return mergeFrom((com.google.storage.v2.BidiReadObjectResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadObjectResponse other) { + if (other == com.google.storage.v2.BidiReadObjectResponse.getDefaultInstance()) return this; + if (objectDataRangesBuilder_ == null) { + if (!other.objectDataRanges_.isEmpty()) { + if (objectDataRanges_.isEmpty()) { + objectDataRanges_ = other.objectDataRanges_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.addAll(other.objectDataRanges_); + } + onChanged(); + } + } else { + if (!other.objectDataRanges_.isEmpty()) { + if (objectDataRangesBuilder_.isEmpty()) { + objectDataRangesBuilder_.dispose(); + objectDataRangesBuilder_ = null; + objectDataRanges_ = other.objectDataRanges_; + bitField0_ = (bitField0_ & ~0x00000001); + objectDataRangesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetObjectDataRangesFieldBuilder() + : null; + } else { + objectDataRangesBuilder_.addAllMessages(other.objectDataRanges_); + } + } + } + if (other.hasMetadata()) { + mergeMetadata(other.getMetadata()); + } + if (other.hasReadHandle()) { + mergeReadHandle(other.getReadHandle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 34: + { + input.readMessage( + internalGetMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 34 + case 50: + { + com.google.storage.v2.ObjectRangeData m = + input.readMessage( + com.google.storage.v2.ObjectRangeData.parser(), extensionRegistry); + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.add(m); + } else { + objectDataRangesBuilder_.addMessage(m); + } + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetReadHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List objectDataRanges_ = + java.util.Collections.emptyList(); + + private void ensureObjectDataRangesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + objectDataRanges_ = + new java.util.ArrayList(objectDataRanges_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectRangeData, + com.google.storage.v2.ObjectRangeData.Builder, + com.google.storage.v2.ObjectRangeDataOrBuilder> + objectDataRangesBuilder_; + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public java.util.List getObjectDataRangesList() { + if (objectDataRangesBuilder_ == null) { + return java.util.Collections.unmodifiableList(objectDataRanges_); + } else { + return objectDataRangesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public int getObjectDataRangesCount() { + if (objectDataRangesBuilder_ == null) { + return objectDataRanges_.size(); + } else { + return objectDataRangesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public com.google.storage.v2.ObjectRangeData getObjectDataRanges(int index) { + if (objectDataRangesBuilder_ == null) { + return objectDataRanges_.get(index); + } else { + return objectDataRangesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder setObjectDataRanges(int index, com.google.storage.v2.ObjectRangeData value) { + if (objectDataRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectDataRangesIsMutable(); + objectDataRanges_.set(index, value); + onChanged(); + } else { + objectDataRangesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder setObjectDataRanges( + int index, com.google.storage.v2.ObjectRangeData.Builder builderForValue) { + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.set(index, builderForValue.build()); + onChanged(); + } else { + objectDataRangesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder addObjectDataRanges(com.google.storage.v2.ObjectRangeData value) { + if (objectDataRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectDataRangesIsMutable(); + objectDataRanges_.add(value); + onChanged(); + } else { + objectDataRangesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder addObjectDataRanges(int index, com.google.storage.v2.ObjectRangeData value) { + if (objectDataRangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectDataRangesIsMutable(); + objectDataRanges_.add(index, value); + onChanged(); + } else { + objectDataRangesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder addObjectDataRanges( + com.google.storage.v2.ObjectRangeData.Builder builderForValue) { + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.add(builderForValue.build()); + onChanged(); + } else { + objectDataRangesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder addObjectDataRanges( + int index, com.google.storage.v2.ObjectRangeData.Builder builderForValue) { + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.add(index, builderForValue.build()); + onChanged(); + } else { + objectDataRangesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder addAllObjectDataRanges( + java.lang.Iterable values) { + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, objectDataRanges_); + onChanged(); + } else { + objectDataRangesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder clearObjectDataRanges() { + if (objectDataRangesBuilder_ == null) { + objectDataRanges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + objectDataRangesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public Builder removeObjectDataRanges(int index) { + if (objectDataRangesBuilder_ == null) { + ensureObjectDataRangesIsMutable(); + objectDataRanges_.remove(index); + onChanged(); + } else { + objectDataRangesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public com.google.storage.v2.ObjectRangeData.Builder getObjectDataRangesBuilder(int index) { + return internalGetObjectDataRangesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public com.google.storage.v2.ObjectRangeDataOrBuilder getObjectDataRangesOrBuilder(int index) { + if (objectDataRangesBuilder_ == null) { + return objectDataRanges_.get(index); + } else { + return objectDataRangesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public java.util.List + getObjectDataRangesOrBuilderList() { + if (objectDataRangesBuilder_ != null) { + return objectDataRangesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(objectDataRanges_); + } + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public com.google.storage.v2.ObjectRangeData.Builder addObjectDataRangesBuilder() { + return internalGetObjectDataRangesFieldBuilder() + .addBuilder(com.google.storage.v2.ObjectRangeData.getDefaultInstance()); + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public com.google.storage.v2.ObjectRangeData.Builder addObjectDataRangesBuilder(int index) { + return internalGetObjectDataRangesFieldBuilder() + .addBuilder(index, com.google.storage.v2.ObjectRangeData.getDefaultInstance()); + } + + /** + * + * + *
+     * A portion of the object's data. The service might leave data
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * The service might pipeline multiple responses belonging to different read
+     * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+     * to the same value as the corresponding source read request.
+     * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + public java.util.List + getObjectDataRangesBuilderList() { + return internalGetObjectDataRangesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectRangeData, + com.google.storage.v2.ObjectRangeData.Builder, + com.google.storage.v2.ObjectRangeDataOrBuilder> + internalGetObjectDataRangesFieldBuilder() { + if (objectDataRangesBuilder_ == null) { + objectDataRangesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectRangeData, + com.google.storage.v2.ObjectRangeData.Builder, + com.google.storage.v2.ObjectRangeDataOrBuilder>( + objectDataRanges_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + objectDataRanges_ = null; + } + return objectDataRangesBuilder_; + } + + private com.google.storage.v2.Object metadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + metadataBuilder_; + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + public boolean hasMetadata() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + public com.google.storage.v2.Object getMetadata() { + if (metadataBuilder_ == null) { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } else { + return metadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder setMetadata(com.google.storage.v2.Object value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metadata_ = value; + } else { + metadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder setMetadata(com.google.storage.v2.Object.Builder builderForValue) { + if (metadataBuilder_ == null) { + metadata_ = builderForValue.build(); + } else { + metadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder mergeMetadata(com.google.storage.v2.Object value) { + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && metadata_ != null + && metadata_ != com.google.storage.v2.Object.getDefaultInstance()) { + getMetadataBuilder().mergeFrom(value); + } else { + metadata_ = value; + } + } else { + metadataBuilder_.mergeFrom(value); + } + if (metadata_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x00000002); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public com.google.storage.v2.Object.Builder getMetadataBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilder(); + } else { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream and not populated when
+     * the stream is opened with a read handle.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getMetadata(), getParentForChildren(), isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + private com.google.storage.v2.BidiReadHandle readHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + readHandleBuilder_; + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return Whether the readHandle field is set. + */ + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return The readHandle. + */ + public com.google.storage.v2.BidiReadHandle getReadHandle() { + if (readHandleBuilder_ == null) { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } else { + return readHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readHandle_ = value; + } else { + readHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle.Builder builderForValue) { + if (readHandleBuilder_ == null) { + readHandle_ = builderForValue.build(); + } else { + readHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public Builder mergeReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && readHandle_ != null + && readHandle_ != com.google.storage.v2.BidiReadHandle.getDefaultInstance()) { + getReadHandleBuilder().mergeFrom(value); + } else { + readHandle_ = value; + } + } else { + readHandleBuilder_.mergeFrom(value); + } + if (readHandle_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public Builder clearReadHandle() { + bitField0_ = (bitField0_ & ~0x00000004); + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public com.google.storage.v2.BidiReadHandle.Builder getReadHandleBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetReadHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + if (readHandleBuilder_ != null) { + return readHandleBuilder_.getMessageOrBuilder(); + } else { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + } + + /** + * + * + *
+     * This field is periodically refreshed, however it might not be set in
+     * every response. It allows the client to more efficiently open subsequent
+     * bidirectional streams to the same object.
+     * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + internalGetReadHandleFieldBuilder() { + if (readHandleBuilder_ == null) { + readHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder>( + getReadHandle(), getParentForChildren(), isClean()); + readHandle_ = null; + } + return readHandleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadObjectResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadObjectResponse) + private static final com.google.storage.v2.BidiReadObjectResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadObjectResponse(); + } + + public static com.google.storage.v2.BidiReadObjectResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadObjectResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponseOrBuilder.java new file mode 100644 index 000000000000..05981180405d --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectResponseOrBuilder.java @@ -0,0 +1,200 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadObjectResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadObjectResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + java.util.List getObjectDataRangesList(); + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + com.google.storage.v2.ObjectRangeData getObjectDataRanges(int index); + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + int getObjectDataRangesCount(); + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + java.util.List + getObjectDataRangesOrBuilderList(); + + /** + * + * + *
+   * A portion of the object's data. The service might leave data
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * The service might pipeline multiple responses belonging to different read
+   * requests. Each `ObjectRangeData` entry has a `read_id` that is set
+   * to the same value as the corresponding source read request.
+   * 
+ * + * repeated .google.storage.v2.ObjectRangeData object_data_ranges = 6; + */ + com.google.storage.v2.ObjectRangeDataOrBuilder getObjectDataRangesOrBuilder(int index); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + boolean hasMetadata(); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + com.google.storage.v2.Object getMetadata(); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream and not populated when
+   * the stream is opened with a read handle.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder(); + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return Whether the readHandle field is set. + */ + boolean hasReadHandle(); + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + * + * @return The readHandle. + */ + com.google.storage.v2.BidiReadHandle getReadHandle(); + + /** + * + * + *
+   * This field is periodically refreshed, however it might not be set in
+   * every response. It allows the client to more efficiently open subsequent
+   * bidirectional streams to the same object.
+   * 
+ * + * .google.storage.v2.BidiReadHandle read_handle = 7; + */ + com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpec.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpec.java new file mode 100644 index 000000000000..7acba521fc40 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpec.java @@ -0,0 +1,2726 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Describes the object to read in a BidiReadObject request.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectSpec} + */ +@com.google.protobuf.Generated +public final class BidiReadObjectSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiReadObjectSpec) + BidiReadObjectSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiReadObjectSpec"); + } + + // Use BidiReadObjectSpec.newBuilder() to construct. + private BidiReadObjectSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiReadObjectSpec() { + bucket_ = ""; + object_ = ""; + routingToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectSpec.class, + com.google.storage.v2.BidiReadObjectSpec.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 4; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 6; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 7; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 8; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int READ_MASK_FIELD_NUMBER = 12; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return Whether the readMask field is set. + */ + @java.lang.Override + @java.lang.Deprecated + public boolean hasReadMask() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return The readMask. + */ + @java.lang.Override + @java.lang.Deprecated + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Override + @java.lang.Deprecated + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + public static final int READ_HANDLE_FIELD_NUMBER = 13; + private com.google.storage.v2.BidiReadHandle readHandle_; + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return Whether the readHandle field is set. + */ + @java.lang.Override + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return The readHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandle getReadHandle() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + @java.lang.Override + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + + public static final int ROUTING_TOKEN_FIELD_NUMBER = 14; + + @SuppressWarnings("serial") + private volatile java.lang.Object routingToken_ = ""; + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return Whether the routingToken field is set. + */ + @java.lang.Override + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return The routingToken. + */ + @java.lang.Override + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return The bytes for routingToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(12, getReadMask()); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(13, getReadHandle()); + } + if (((bitField0_ & 0x00000080) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 14, routingToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getReadMask()); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getReadHandle()); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(14, routingToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiReadObjectSpec)) { + return super.equals(obj); + } + com.google.storage.v2.BidiReadObjectSpec other = (com.google.storage.v2.BidiReadObjectSpec) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (hasReadHandle() != other.hasReadHandle()) return false; + if (hasReadHandle()) { + if (!getReadHandle().equals(other.getReadHandle())) return false; + } + if (hasRoutingToken() != other.hasRoutingToken()) return false; + if (hasRoutingToken()) { + if (!getRoutingToken().equals(other.getRoutingToken())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + if (hasReadHandle()) { + hash = (37 * hash) + READ_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getReadHandle().hashCode(); + } + if (hasRoutingToken()) { + hash = (37 * hash) + ROUTING_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRoutingToken().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiReadObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiReadObjectSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Describes the object to read in a BidiReadObject request.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiReadObjectSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiReadObjectSpec) + com.google.storage.v2.BidiReadObjectSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiReadObjectSpec.class, + com.google.storage.v2.BidiReadObjectSpec.Builder.class); + } + + // Construct using com.google.storage.v2.BidiReadObjectSpec.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetReadMaskFieldBuilder(); + internalGetReadHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + routingToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiReadObjectSpec_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpec getDefaultInstanceForType() { + return com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpec build() { + com.google.storage.v2.BidiReadObjectSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpec buildPartial() { + com.google.storage.v2.BidiReadObjectSpec result = + new com.google.storage.v2.BidiReadObjectSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiReadObjectSpec result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.readHandle_ = readHandleBuilder_ == null ? readHandle_ : readHandleBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.routingToken_ = routingToken_; + to_bitField0_ |= 0x00000080; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiReadObjectSpec) { + return mergeFrom((com.google.storage.v2.BidiReadObjectSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiReadObjectSpec other) { + if (other == com.google.storage.v2.BidiReadObjectSpec.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + if (other.hasReadHandle()) { + mergeReadHandle(other.getReadHandle()); + } + if (other.hasRoutingToken()) { + routingToken_ = other.routingToken_; + bitField0_ |= 0x00000400; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 98: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 98 + case 106: + { + input.readMessage( + internalGetReadHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 106 + case 114: + { + routingToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000400; + break; + } // case 114 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000080); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return Whether the readMask field is set. + */ + @java.lang.Deprecated + public boolean hasReadMask() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return The readMask. + */ + @java.lang.Deprecated + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000100); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * As per https://google.aip.dev/161, this field is deprecated.
+     * As an alternative, `grpc metadata` can be used:
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + private com.google.storage.v2.BidiReadHandle readHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + readHandleBuilder_; + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return Whether the readHandle field is set. + */ + public boolean hasReadHandle() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return The readHandle. + */ + public com.google.storage.v2.BidiReadHandle getReadHandle() { + if (readHandleBuilder_ == null) { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } else { + return readHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readHandle_ = value; + } else { + readHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public Builder setReadHandle(com.google.storage.v2.BidiReadHandle.Builder builderForValue) { + if (readHandleBuilder_ == null) { + readHandle_ = builderForValue.build(); + } else { + readHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public Builder mergeReadHandle(com.google.storage.v2.BidiReadHandle value) { + if (readHandleBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && readHandle_ != null + && readHandle_ != com.google.storage.v2.BidiReadHandle.getDefaultInstance()) { + getReadHandleBuilder().mergeFrom(value); + } else { + readHandle_ = value; + } + } else { + readHandleBuilder_.mergeFrom(value); + } + if (readHandle_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public Builder clearReadHandle() { + bitField0_ = (bitField0_ & ~0x00000200); + readHandle_ = null; + if (readHandleBuilder_ != null) { + readHandleBuilder_.dispose(); + readHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public com.google.storage.v2.BidiReadHandle.Builder getReadHandleBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetReadHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + public com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder() { + if (readHandleBuilder_ != null) { + return readHandleBuilder_.getMessageOrBuilder(); + } else { + return readHandle_ == null + ? com.google.storage.v2.BidiReadHandle.getDefaultInstance() + : readHandle_; + } + } + + /** + * + * + *
+     * The client can optionally set this field. The read handle is an optimized
+     * way of creating new streams. Read handles are generated and periodically
+     * refreshed from prior reads.
+     * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder> + internalGetReadHandleFieldBuilder() { + if (readHandleBuilder_ == null) { + readHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiReadHandle, + com.google.storage.v2.BidiReadHandle.Builder, + com.google.storage.v2.BidiReadHandleOrBuilder>( + getReadHandle(), getParentForChildren(), isClean()); + readHandle_ = null; + } + return readHandleBuilder_; + } + + private java.lang.Object routingToken_ = ""; + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @return Whether the routingToken field is set. + */ + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @return The routingToken. + */ + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @return The bytes for routingToken. + */ + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @param value The routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + routingToken_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @return This builder for chaining. + */ + public Builder clearRoutingToken() { + routingToken_ = getDefaultInstance().getRoutingToken(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token that influences request routing for the stream. Must be
+     * provided if a BidiReadObjectRedirectedError is returned.
+     * 
+ * + * optional string routing_token = 14; + * + * @param value The bytes for routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + routingToken_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiReadObjectSpec) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiReadObjectSpec) + private static final com.google.storage.v2.BidiReadObjectSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiReadObjectSpec(); + } + + public static com.google.storage.v2.BidiReadObjectSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiReadObjectSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiReadObjectSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpecOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpecOrBuilder.java new file mode 100644 index 000000000000..334f9313ccb3 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiReadObjectSpecOrBuilder.java @@ -0,0 +1,409 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiReadObjectSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiReadObjectSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return Whether the readMask field is set. + */ + @java.lang.Deprecated + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + * + * @deprecated google.storage.v2.BidiReadObjectSpec.read_mask is deprecated. See + * google/storage/v2/storage.proto;l=1189 + * @return The readMask. + */ + @java.lang.Deprecated + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * As per https://google.aip.dev/161, this field is deprecated.
+   * As an alternative, `grpc metadata` can be used:
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + */ + @java.lang.Deprecated + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return Whether the readHandle field is set. + */ + boolean hasReadHandle(); + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + * + * @return The readHandle. + */ + com.google.storage.v2.BidiReadHandle getReadHandle(); + + /** + * + * + *
+   * The client can optionally set this field. The read handle is an optimized
+   * way of creating new streams. Read handles are generated and periodically
+   * refreshed from prior reads.
+   * 
+ * + * optional .google.storage.v2.BidiReadHandle read_handle = 13; + */ + com.google.storage.v2.BidiReadHandleOrBuilder getReadHandleOrBuilder(); + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return Whether the routingToken field is set. + */ + boolean hasRoutingToken(); + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return The routingToken. + */ + java.lang.String getRoutingToken(); + + /** + * + * + *
+   * The routing token that influences request routing for the stream. Must be
+   * provided if a BidiReadObjectRedirectedError is returned.
+   * 
+ * + * optional string routing_token = 14; + * + * @return The bytes for routingToken. + */ + com.google.protobuf.ByteString getRoutingTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandle.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandle.java new file mode 100644 index 000000000000..98865e7381df --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandle.java @@ -0,0 +1,508 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * `BidiWriteHandle` contains a handle from a previous `BidiWriteObject`
+ * invocation. The client can use this instead of `BidiReadObjectSpec` as an
+ * optimized way of opening subsequent bidirectional streams to the same object.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteHandle} + */ +@com.google.protobuf.Generated +public final class BidiWriteHandle extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiWriteHandle) + BidiWriteHandleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiWriteHandle"); + } + + // Use BidiWriteHandle.newBuilder() to construct. + private BidiWriteHandle(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiWriteHandle() { + handle_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteHandle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteHandle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteHandle.class, + com.google.storage.v2.BidiWriteHandle.Builder.class); + } + + public static final int HANDLE_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString handle_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Required. Opaque value describing a previous write.
+   * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHandle() { + return handle_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!handle_.isEmpty()) { + output.writeBytes(1, handle_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!handle_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, handle_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiWriteHandle)) { + return super.equals(obj); + } + com.google.storage.v2.BidiWriteHandle other = (com.google.storage.v2.BidiWriteHandle) obj; + + if (!getHandle().equals(other.getHandle())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getHandle().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteHandle parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteHandle parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteHandle parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiWriteHandle prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * `BidiWriteHandle` contains a handle from a previous `BidiWriteObject`
+   * invocation. The client can use this instead of `BidiReadObjectSpec` as an
+   * optimized way of opening subsequent bidirectional streams to the same object.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteHandle} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiWriteHandle) + com.google.storage.v2.BidiWriteHandleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteHandle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteHandle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteHandle.class, + com.google.storage.v2.BidiWriteHandle.Builder.class); + } + + // Construct using com.google.storage.v2.BidiWriteHandle.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + handle_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteHandle_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle getDefaultInstanceForType() { + return com.google.storage.v2.BidiWriteHandle.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle build() { + com.google.storage.v2.BidiWriteHandle result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle buildPartial() { + com.google.storage.v2.BidiWriteHandle result = + new com.google.storage.v2.BidiWriteHandle(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiWriteHandle result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.handle_ = handle_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiWriteHandle) { + return mergeFrom((com.google.storage.v2.BidiWriteHandle) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiWriteHandle other) { + if (other == com.google.storage.v2.BidiWriteHandle.getDefaultInstance()) return this; + if (!other.getHandle().isEmpty()) { + setHandle(other.getHandle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + handle_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString handle_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Required. Opaque value describing a previous write.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + @java.lang.Override + public com.google.protobuf.ByteString getHandle() { + return handle_; + } + + /** + * + * + *
+     * Required. Opaque value describing a previous write.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The handle to set. + * @return This builder for chaining. + */ + public Builder setHandle(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + handle_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Opaque value describing a previous write.
+     * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearHandle() { + bitField0_ = (bitField0_ & ~0x00000001); + handle_ = getDefaultInstance().getHandle(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiWriteHandle) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiWriteHandle) + private static final com.google.storage.v2.BidiWriteHandle DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiWriteHandle(); + } + + public static com.google.storage.v2.BidiWriteHandle getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiWriteHandle parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandleOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandleOrBuilder.java new file mode 100644 index 000000000000..f83a642ecbc6 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteHandleOrBuilder.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiWriteHandleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiWriteHandle) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Opaque value describing a previous write.
+   * 
+ * + * bytes handle = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The handle. + */ + com.google.protobuf.ByteString getHandle(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedError.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedError.java new file mode 100644 index 000000000000..9f9502a6eccc --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedError.java @@ -0,0 +1,1117 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Error proto containing details for a redirected write. This error might be
+ * attached as details for an ABORTED response to BidiWriteObject.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectRedirectedError} + */ +@com.google.protobuf.Generated +public final class BidiWriteObjectRedirectedError extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiWriteObjectRedirectedError) + BidiWriteObjectRedirectedErrorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiWriteObjectRedirectedError"); + } + + // Use BidiWriteObjectRedirectedError.newBuilder() to construct. + private BidiWriteObjectRedirectedError(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiWriteObjectRedirectedError() { + routingToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRedirectedError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectRedirectedError.class, + com.google.storage.v2.BidiWriteObjectRedirectedError.Builder.class); + } + + private int bitField0_; + public static final int ROUTING_TOKEN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object routingToken_ = ""; + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return Whether the routingToken field is set. + */ + @java.lang.Override + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return The routingToken. + */ + @java.lang.Override + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return The bytes for routingToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_HANDLE_FIELD_NUMBER = 2; + private com.google.storage.v2.BidiWriteHandle writeHandle_; + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return Whether the writeHandle field is set. + */ + @java.lang.Override + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return The writeHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * The generation of the object that triggered the redirect. This is set
+   * iff `write_handle` is set. If set, the client must use this in an
+   * `AppendObjectSpec` first_message when retrying the write stream.
+   * 
+ * + * optional int64 generation = 3; + * + * @return Whether the generation field is set. + */ + @java.lang.Override + public boolean hasGeneration() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * The generation of the object that triggered the redirect. This is set
+   * iff `write_handle` is set. If set, the client must use this in an
+   * `AppendObjectSpec` first_message when retrying the write stream.
+   * 
+ * + * optional int64 generation = 3; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, routingToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getWriteHandle()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(3, generation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, routingToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteHandle()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiWriteObjectRedirectedError)) { + return super.equals(obj); + } + com.google.storage.v2.BidiWriteObjectRedirectedError other = + (com.google.storage.v2.BidiWriteObjectRedirectedError) obj; + + if (hasRoutingToken() != other.hasRoutingToken()) return false; + if (hasRoutingToken()) { + if (!getRoutingToken().equals(other.getRoutingToken())) return false; + } + if (hasWriteHandle() != other.hasWriteHandle()) return false; + if (hasWriteHandle()) { + if (!getWriteHandle().equals(other.getWriteHandle())) return false; + } + if (hasGeneration() != other.hasGeneration()) return false; + if (hasGeneration()) { + if (getGeneration() != other.getGeneration()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRoutingToken()) { + hash = (37 * hash) + ROUTING_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRoutingToken().hashCode(); + } + if (hasWriteHandle()) { + hash = (37 * hash) + WRITE_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getWriteHandle().hashCode(); + } + if (hasGeneration()) { + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiWriteObjectRedirectedError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Error proto containing details for a redirected write. This error might be
+   * attached as details for an ABORTED response to BidiWriteObject.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectRedirectedError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiWriteObjectRedirectedError) + com.google.storage.v2.BidiWriteObjectRedirectedErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRedirectedError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectRedirectedError.class, + com.google.storage.v2.BidiWriteObjectRedirectedError.Builder.class); + } + + // Construct using com.google.storage.v2.BidiWriteObjectRedirectedError.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetWriteHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + routingToken_ = ""; + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + generation_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRedirectedError getDefaultInstanceForType() { + return com.google.storage.v2.BidiWriteObjectRedirectedError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRedirectedError build() { + com.google.storage.v2.BidiWriteObjectRedirectedError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRedirectedError buildPartial() { + com.google.storage.v2.BidiWriteObjectRedirectedError result = + new com.google.storage.v2.BidiWriteObjectRedirectedError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiWriteObjectRedirectedError result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.routingToken_ = routingToken_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.writeHandle_ = + writeHandleBuilder_ == null ? writeHandle_ : writeHandleBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiWriteObjectRedirectedError) { + return mergeFrom((com.google.storage.v2.BidiWriteObjectRedirectedError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiWriteObjectRedirectedError other) { + if (other == com.google.storage.v2.BidiWriteObjectRedirectedError.getDefaultInstance()) + return this; + if (other.hasRoutingToken()) { + routingToken_ = other.routingToken_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasWriteHandle()) { + mergeWriteHandle(other.getWriteHandle()); + } + if (other.hasGeneration()) { + setGeneration(other.getGeneration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + routingToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetWriteHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object routingToken_ = ""; + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @return Whether the routingToken field is set. + */ + public boolean hasRoutingToken() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @return The routingToken. + */ + public java.lang.String getRoutingToken() { + java.lang.Object ref = routingToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + routingToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @return The bytes for routingToken. + */ + public com.google.protobuf.ByteString getRoutingTokenBytes() { + java.lang.Object ref = routingToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + routingToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @param value The routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + routingToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearRoutingToken() { + routingToken_ = getDefaultInstance().getRoutingToken(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * The routing token the client must use when retrying the write stream.
+     * This value must be provided in the header `x-goog-request-params`, with key
+     * `routing_token` and this string verbatim as the value.
+     * 
+ * + * optional string routing_token = 1; + * + * @param value The bytes for routingToken to set. + * @return This builder for chaining. + */ + public Builder setRoutingTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + routingToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.v2.BidiWriteHandle writeHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + writeHandleBuilder_; + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return Whether the writeHandle field is set. + */ + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return The writeHandle. + */ + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + if (writeHandleBuilder_ == null) { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } else { + return writeHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeHandle_ = value; + } else { + writeHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle.Builder builderForValue) { + if (writeHandleBuilder_ == null) { + writeHandle_ = builderForValue.build(); + } else { + writeHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public Builder mergeWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && writeHandle_ != null + && writeHandle_ != com.google.storage.v2.BidiWriteHandle.getDefaultInstance()) { + getWriteHandleBuilder().mergeFrom(value); + } else { + writeHandle_ = value; + } + } else { + writeHandleBuilder_.mergeFrom(value); + } + if (writeHandle_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public Builder clearWriteHandle() { + bitField0_ = (bitField0_ & ~0x00000002); + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public com.google.storage.v2.BidiWriteHandle.Builder getWriteHandleBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetWriteHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + if (writeHandleBuilder_ != null) { + return writeHandleBuilder_.getMessageOrBuilder(); + } else { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + } + + /** + * + * + *
+     * Opaque value describing a previous write. If set, the client must use this
+     * in an AppendObjectSpec first_message when retrying the write stream. If not
+     * set, clients might retry the original request.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + internalGetWriteHandleFieldBuilder() { + if (writeHandleBuilder_ == null) { + writeHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder>( + getWriteHandle(), getParentForChildren(), isClean()); + writeHandle_ = null; + } + return writeHandleBuilder_; + } + + private long generation_; + + /** + * + * + *
+     * The generation of the object that triggered the redirect. This is set
+     * iff `write_handle` is set. If set, the client must use this in an
+     * `AppendObjectSpec` first_message when retrying the write stream.
+     * 
+ * + * optional int64 generation = 3; + * + * @return Whether the generation field is set. + */ + @java.lang.Override + public boolean hasGeneration() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * The generation of the object that triggered the redirect. This is set
+     * iff `write_handle` is set. If set, the client must use this in an
+     * `AppendObjectSpec` first_message when retrying the write stream.
+     * 
+ * + * optional int64 generation = 3; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * The generation of the object that triggered the redirect. This is set
+     * iff `write_handle` is set. If set, the client must use this in an
+     * `AppendObjectSpec` first_message when retrying the write stream.
+     * 
+ * + * optional int64 generation = 3; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The generation of the object that triggered the redirect. This is set
+     * iff `write_handle` is set. If set, the client must use this in an
+     * `AppendObjectSpec` first_message when retrying the write stream.
+     * 
+ * + * optional int64 generation = 3; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiWriteObjectRedirectedError) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiWriteObjectRedirectedError) + private static final com.google.storage.v2.BidiWriteObjectRedirectedError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiWriteObjectRedirectedError(); + } + + public static com.google.storage.v2.BidiWriteObjectRedirectedError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiWriteObjectRedirectedError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRedirectedError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedErrorOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedErrorOrBuilder.java new file mode 100644 index 000000000000..deebedf68588 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRedirectedErrorOrBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiWriteObjectRedirectedErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiWriteObjectRedirectedError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return Whether the routingToken field is set. + */ + boolean hasRoutingToken(); + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return The routingToken. + */ + java.lang.String getRoutingToken(); + + /** + * + * + *
+   * The routing token the client must use when retrying the write stream.
+   * This value must be provided in the header `x-goog-request-params`, with key
+   * `routing_token` and this string verbatim as the value.
+   * 
+ * + * optional string routing_token = 1; + * + * @return The bytes for routingToken. + */ + com.google.protobuf.ByteString getRoutingTokenBytes(); + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return Whether the writeHandle field is set. + */ + boolean hasWriteHandle(); + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + * + * @return The writeHandle. + */ + com.google.storage.v2.BidiWriteHandle getWriteHandle(); + + /** + * + * + *
+   * Opaque value describing a previous write. If set, the client must use this
+   * in an AppendObjectSpec first_message when retrying the write stream. If not
+   * set, clients might retry the original request.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 2; + */ + com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder(); + + /** + * + * + *
+   * The generation of the object that triggered the redirect. This is set
+   * iff `write_handle` is set. If set, the client must use this in an
+   * `AppendObjectSpec` first_message when retrying the write stream.
+   * 
+ * + * optional int64 generation = 3; + * + * @return Whether the generation field is set. + */ + boolean hasGeneration(); + + /** + * + * + *
+   * The generation of the object that triggered the redirect. This is set
+   * iff `write_handle` is set. If set, the client must use this in an
+   * `AppendObjectSpec` first_message when retrying the write stream.
+   * 
+ * + * optional int64 generation = 3; + * + * @return The generation. + */ + long getGeneration(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequest.java new file mode 100644 index 000000000000..5a1f0c7d0cdd --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequest.java @@ -0,0 +1,2950 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectRequest} + */ +@com.google.protobuf.Generated +public final class BidiWriteObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiWriteObjectRequest) + BidiWriteObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiWriteObjectRequest"); + } + + // Use BidiWriteObjectRequest.newBuilder() to construct. + private BidiWriteObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiWriteObjectRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectRequest.class, + com.google.storage.v2.BidiWriteObjectRequest.Builder.class); + } + + private int bitField0_; + private int firstMessageCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object firstMessage_; + + public enum FirstMessageCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + UPLOAD_ID(1), + WRITE_OBJECT_SPEC(2), + APPEND_OBJECT_SPEC(11), + FIRSTMESSAGE_NOT_SET(0); + private final int value; + + private FirstMessageCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static FirstMessageCase valueOf(int value) { + return forNumber(value); + } + + public static FirstMessageCase forNumber(int value) { + switch (value) { + case 1: + return UPLOAD_ID; + case 2: + return WRITE_OBJECT_SPEC; + case 11: + return APPEND_OBJECT_SPEC; + case 0: + return FIRSTMESSAGE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public FirstMessageCase getFirstMessageCase() { + return FirstMessageCase.forNumber(firstMessageCase_); + } + + private int dataCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object data_; + + public enum DataCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + CHECKSUMMED_DATA(4), + DATA_NOT_SET(0); + private final int value; + + private DataCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataCase valueOf(int value) { + return forNumber(value); + } + + public static DataCase forNumber(int value) { + switch (value) { + case 4: + return CHECKSUMMED_DATA; + case 0: + return DATA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public DataCase getDataCase() { + return DataCase.forNumber(dataCase_); + } + + public static final int UPLOAD_ID_FIELD_NUMBER = 1; + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + public boolean hasUploadId() { + return firstMessageCase_ == 1; + } + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + public java.lang.String getUploadId() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (firstMessageCase_ == 1) { + firstMessage_ = s; + } + return s; + } + } + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (firstMessageCase_ == 1) { + firstMessage_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_OBJECT_SPEC_FIELD_NUMBER = 2; + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + @java.lang.Override + public boolean hasWriteObjectSpec() { + return firstMessageCase_ == 2; + } + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + + public static final int APPEND_OBJECT_SPEC_FIELD_NUMBER = 11; + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return Whether the appendObjectSpec field is set. + */ + @java.lang.Override + public boolean hasAppendObjectSpec() { + return firstMessageCase_ == 11; + } + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return The appendObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec getAppendObjectSpec() { + if (firstMessageCase_ == 11) { + return (com.google.storage.v2.AppendObjectSpec) firstMessage_; + } + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + @java.lang.Override + public com.google.storage.v2.AppendObjectSpecOrBuilder getAppendObjectSpecOrBuilder() { + if (firstMessageCase_ == 11) { + return (com.google.storage.v2.AppendObjectSpec) firstMessage_; + } + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + + public static final int WRITE_OFFSET_FIELD_NUMBER = 3; + private long writeOffset_ = 0L; + + /** + * + * + *
+   * Required. The offset from the beginning of the object at which the data
+   * should be written.
+   *
+   * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+   * indicates the initial offset for the `Write()` call. The value must be
+   * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+   * return (0 if this is the first write to the object).
+   *
+   * On subsequent calls, this value must be no larger than the sum of the
+   * first `write_offset` and the sizes of all `data` chunks sent previously on
+   * this stream.
+   *
+   * An invalid value causes an error.
+   * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + @java.lang.Override + public long getWriteOffset() { + return writeOffset_; + } + + public static final int CHECKSUMMED_DATA_FIELD_NUMBER = 4; + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return dataCase_ == 4; + } + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 6; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + public static final int STATE_LOOKUP_FIELD_NUMBER = 7; + private boolean stateLookup_ = false; + + /** + * + * + *
+   * Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
+   * or the client closes the stream, the service sends a
+   * `BidiWriteObjectResponse` containing the current persisted size. The
+   * persisted size sent in responses covers all the bytes the server has
+   * persisted thus far and can be used to decide what data is safe for the
+   * client to drop. Note that the object's current size reported by the
+   * `BidiWriteObjectResponse` might lag behind the number of bytes written by
+   * the client. This field is ignored if `finish_write` is set to true.
+   * 
+ * + * bool state_lookup = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The stateLookup. + */ + @java.lang.Override + public boolean getStateLookup() { + return stateLookup_; + } + + public static final int FLUSH_FIELD_NUMBER = 8; + private boolean flush_ = false; + + /** + * + * + *
+   * Optional. Persists data written on the stream, up to and including the
+   * current message, to permanent storage. This option should be used sparingly
+   * as it might reduce performance. Ongoing writes are periodically persisted
+   * on the server even when `flush` is not set. This field is ignored if
+   * `finish_write` is set to true since there's no need to checkpoint or flush
+   * if this message completes the write.
+   * 
+ * + * bool flush = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The flush. + */ + @java.lang.Override + public boolean getFlush() { + return flush_; + } + + public static final int FINISH_WRITE_FIELD_NUMBER = 9; + private boolean finishWrite_ = false; + + /** + * + * + *
+   * Optional. If `true`, this indicates that the write is complete. Sending any
+   * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+   * causes an error.
+   * For a non-resumable write (where the `upload_id` was not set in the first
+   * message), it is an error not to set this field in the final message of the
+   * stream.
+   * 
+ * + * bool finish_write = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + @java.lang.Override + public boolean getFinishWrite() { + return finishWrite_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 10; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (firstMessageCase_ == 1) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, firstMessage_); + } + if (firstMessageCase_ == 2) { + output.writeMessage(2, (com.google.storage.v2.WriteObjectSpec) firstMessage_); + } + if (writeOffset_ != 0L) { + output.writeInt64(3, writeOffset_); + } + if (dataCase_ == 4) { + output.writeMessage(4, (com.google.storage.v2.ChecksummedData) data_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getObjectChecksums()); + } + if (stateLookup_ != false) { + output.writeBool(7, stateLookup_); + } + if (flush_ != false) { + output.writeBool(8, flush_); + } + if (finishWrite_ != false) { + output.writeBool(9, finishWrite_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(10, getCommonObjectRequestParams()); + } + if (firstMessageCase_ == 11) { + output.writeMessage(11, (com.google.storage.v2.AppendObjectSpec) firstMessage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (firstMessageCase_ == 1) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, firstMessage_); + } + if (firstMessageCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.storage.v2.WriteObjectSpec) firstMessage_); + } + if (writeOffset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, writeOffset_); + } + if (dataCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.storage.v2.ChecksummedData) data_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getObjectChecksums()); + } + if (stateLookup_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, stateLookup_); + } + if (flush_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, flush_); + } + if (finishWrite_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, finishWrite_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, getCommonObjectRequestParams()); + } + if (firstMessageCase_ == 11) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 11, (com.google.storage.v2.AppendObjectSpec) firstMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiWriteObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.BidiWriteObjectRequest other = + (com.google.storage.v2.BidiWriteObjectRequest) obj; + + if (getWriteOffset() != other.getWriteOffset()) return false; + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (getStateLookup() != other.getStateLookup()) return false; + if (getFlush() != other.getFlush()) return false; + if (getFinishWrite() != other.getFinishWrite()) return false; + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (!getFirstMessageCase().equals(other.getFirstMessageCase())) return false; + switch (firstMessageCase_) { + case 1: + if (!getUploadId().equals(other.getUploadId())) return false; + break; + case 2: + if (!getWriteObjectSpec().equals(other.getWriteObjectSpec())) return false; + break; + case 11: + if (!getAppendObjectSpec().equals(other.getAppendObjectSpec())) return false; + break; + case 0: + default: + } + if (!getDataCase().equals(other.getDataCase())) return false; + switch (dataCase_) { + case 4: + if (!getChecksummedData().equals(other.getChecksummedData())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getWriteOffset()); + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + hash = (37 * hash) + STATE_LOOKUP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getStateLookup()); + hash = (37 * hash) + FLUSH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getFlush()); + hash = (37 * hash) + FINISH_WRITE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getFinishWrite()); + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + switch (firstMessageCase_) { + case 1: + hash = (37 * hash) + UPLOAD_ID_FIELD_NUMBER; + hash = (53 * hash) + getUploadId().hashCode(); + break; + case 2: + hash = (37 * hash) + WRITE_OBJECT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getWriteObjectSpec().hashCode(); + break; + case 11: + hash = (37 * hash) + APPEND_OBJECT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getAppendObjectSpec().hashCode(); + break; + case 0: + default: + } + switch (dataCase_) { + case 4: + hash = (37 * hash) + CHECKSUMMED_DATA_FIELD_NUMBER; + hash = (53 * hash) + getChecksummedData().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiWriteObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiWriteObjectRequest) + com.google.storage.v2.BidiWriteObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectRequest.class, + com.google.storage.v2.BidiWriteObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.BidiWriteObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetObjectChecksumsFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (writeObjectSpecBuilder_ != null) { + writeObjectSpecBuilder_.clear(); + } + if (appendObjectSpecBuilder_ != null) { + appendObjectSpecBuilder_.clear(); + } + writeOffset_ = 0L; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.clear(); + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + stateLookup_ = false; + flush_ = false; + finishWrite_ = false; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + firstMessageCase_ = 0; + firstMessage_ = null; + dataCase_ = 0; + data_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.BidiWriteObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRequest build() { + com.google.storage.v2.BidiWriteObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRequest buildPartial() { + com.google.storage.v2.BidiWriteObjectRequest result = + new com.google.storage.v2.BidiWriteObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiWriteObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.writeOffset_ = writeOffset_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.stateLookup_ = stateLookup_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.flush_ = flush_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.finishWrite_ = finishWrite_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.storage.v2.BidiWriteObjectRequest result) { + result.firstMessageCase_ = firstMessageCase_; + result.firstMessage_ = this.firstMessage_; + if (firstMessageCase_ == 2 && writeObjectSpecBuilder_ != null) { + result.firstMessage_ = writeObjectSpecBuilder_.build(); + } + if (firstMessageCase_ == 11 && appendObjectSpecBuilder_ != null) { + result.firstMessage_ = appendObjectSpecBuilder_.build(); + } + result.dataCase_ = dataCase_; + result.data_ = this.data_; + if (dataCase_ == 4 && checksummedDataBuilder_ != null) { + result.data_ = checksummedDataBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiWriteObjectRequest) { + return mergeFrom((com.google.storage.v2.BidiWriteObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiWriteObjectRequest other) { + if (other == com.google.storage.v2.BidiWriteObjectRequest.getDefaultInstance()) return this; + if (other.getWriteOffset() != 0L) { + setWriteOffset(other.getWriteOffset()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + if (other.getStateLookup() != false) { + setStateLookup(other.getStateLookup()); + } + if (other.getFlush() != false) { + setFlush(other.getFlush()); + } + if (other.getFinishWrite() != false) { + setFinishWrite(other.getFinishWrite()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + switch (other.getFirstMessageCase()) { + case UPLOAD_ID: + { + firstMessageCase_ = 1; + firstMessage_ = other.firstMessage_; + onChanged(); + break; + } + case WRITE_OBJECT_SPEC: + { + mergeWriteObjectSpec(other.getWriteObjectSpec()); + break; + } + case APPEND_OBJECT_SPEC: + { + mergeAppendObjectSpec(other.getAppendObjectSpec()); + break; + } + case FIRSTMESSAGE_NOT_SET: + { + break; + } + } + switch (other.getDataCase()) { + case CHECKSUMMED_DATA: + { + mergeChecksummedData(other.getChecksummedData()); + break; + } + case DATA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + firstMessageCase_ = 1; + firstMessage_ = s; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetWriteObjectSpecFieldBuilder().getBuilder(), extensionRegistry); + firstMessageCase_ = 2; + break; + } // case 18 + case 24: + { + writeOffset_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 24 + case 34: + { + input.readMessage( + internalGetChecksummedDataFieldBuilder().getBuilder(), extensionRegistry); + dataCase_ = 4; + break; + } // case 34 + case 50: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 56: + { + stateLookup_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 64: + { + flush_ = input.readBool(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 72: + { + finishWrite_ = input.readBool(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 82: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetAppendObjectSpecFieldBuilder().getBuilder(), extensionRegistry); + firstMessageCase_ = 11; + break; + } // case 90 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int firstMessageCase_ = 0; + private java.lang.Object firstMessage_; + + public FirstMessageCase getFirstMessageCase() { + return FirstMessageCase.forNumber(firstMessageCase_); + } + + public Builder clearFirstMessage() { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + return this; + } + + private int dataCase_ = 0; + private java.lang.Object data_; + + public DataCase getDataCase() { + return DataCase.forNumber(dataCase_); + } + + public Builder clearData() { + dataCase_ = 0; + data_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + @java.lang.Override + public boolean hasUploadId() { + return firstMessageCase_ == 1; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + @java.lang.Override + public java.lang.String getUploadId() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (firstMessageCase_ == 1) { + firstMessage_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (firstMessageCase_ == 1) { + firstMessage_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @param value The uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + firstMessageCase_ = 1; + firstMessage_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUploadId() { + if (firstMessageCase_ == 1) { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @param value The bytes for uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + firstMessageCase_ = 1; + firstMessage_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + writeObjectSpecBuilder_; + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + @java.lang.Override + public boolean hasWriteObjectSpec() { + return firstMessageCase_ == 2; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } else { + if (firstMessageCase_ == 2) { + return writeObjectSpecBuilder_.getMessage(); + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder setWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + firstMessage_ = value; + onChanged(); + } else { + writeObjectSpecBuilder_.setMessage(value); + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder setWriteObjectSpec( + com.google.storage.v2.WriteObjectSpec.Builder builderForValue) { + if (writeObjectSpecBuilder_ == null) { + firstMessage_ = builderForValue.build(); + onChanged(); + } else { + writeObjectSpecBuilder_.setMessage(builderForValue.build()); + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder mergeWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2 + && firstMessage_ != com.google.storage.v2.WriteObjectSpec.getDefaultInstance()) { + firstMessage_ = + com.google.storage.v2.WriteObjectSpec.newBuilder( + (com.google.storage.v2.WriteObjectSpec) firstMessage_) + .mergeFrom(value) + .buildPartial(); + } else { + firstMessage_ = value; + } + onChanged(); + } else { + if (firstMessageCase_ == 2) { + writeObjectSpecBuilder_.mergeFrom(value); + } else { + writeObjectSpecBuilder_.setMessage(value); + } + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder clearWriteObjectSpec() { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2) { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + } + } else { + if (firstMessageCase_ == 2) { + firstMessageCase_ = 0; + firstMessage_ = null; + } + writeObjectSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public com.google.storage.v2.WriteObjectSpec.Builder getWriteObjectSpecBuilder() { + return internalGetWriteObjectSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + if ((firstMessageCase_ == 2) && (writeObjectSpecBuilder_ != null)) { + return writeObjectSpecBuilder_.getMessageOrBuilder(); + } else { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + internalGetWriteObjectSpecFieldBuilder() { + if (writeObjectSpecBuilder_ == null) { + if (!(firstMessageCase_ == 2)) { + firstMessage_ = com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + writeObjectSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder>( + (com.google.storage.v2.WriteObjectSpec) firstMessage_, + getParentForChildren(), + isClean()); + firstMessage_ = null; + } + firstMessageCase_ = 2; + onChanged(); + return writeObjectSpecBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.AppendObjectSpec, + com.google.storage.v2.AppendObjectSpec.Builder, + com.google.storage.v2.AppendObjectSpecOrBuilder> + appendObjectSpecBuilder_; + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return Whether the appendObjectSpec field is set. + */ + @java.lang.Override + public boolean hasAppendObjectSpec() { + return firstMessageCase_ == 11; + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return The appendObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.AppendObjectSpec getAppendObjectSpec() { + if (appendObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 11) { + return (com.google.storage.v2.AppendObjectSpec) firstMessage_; + } + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } else { + if (firstMessageCase_ == 11) { + return appendObjectSpecBuilder_.getMessage(); + } + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + public Builder setAppendObjectSpec(com.google.storage.v2.AppendObjectSpec value) { + if (appendObjectSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + firstMessage_ = value; + onChanged(); + } else { + appendObjectSpecBuilder_.setMessage(value); + } + firstMessageCase_ = 11; + return this; + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + public Builder setAppendObjectSpec( + com.google.storage.v2.AppendObjectSpec.Builder builderForValue) { + if (appendObjectSpecBuilder_ == null) { + firstMessage_ = builderForValue.build(); + onChanged(); + } else { + appendObjectSpecBuilder_.setMessage(builderForValue.build()); + } + firstMessageCase_ = 11; + return this; + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + public Builder mergeAppendObjectSpec(com.google.storage.v2.AppendObjectSpec value) { + if (appendObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 11 + && firstMessage_ != com.google.storage.v2.AppendObjectSpec.getDefaultInstance()) { + firstMessage_ = + com.google.storage.v2.AppendObjectSpec.newBuilder( + (com.google.storage.v2.AppendObjectSpec) firstMessage_) + .mergeFrom(value) + .buildPartial(); + } else { + firstMessage_ = value; + } + onChanged(); + } else { + if (firstMessageCase_ == 11) { + appendObjectSpecBuilder_.mergeFrom(value); + } else { + appendObjectSpecBuilder_.setMessage(value); + } + } + firstMessageCase_ = 11; + return this; + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + public Builder clearAppendObjectSpec() { + if (appendObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 11) { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + } + } else { + if (firstMessageCase_ == 11) { + firstMessageCase_ = 0; + firstMessage_ = null; + } + appendObjectSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + public com.google.storage.v2.AppendObjectSpec.Builder getAppendObjectSpecBuilder() { + return internalGetAppendObjectSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + @java.lang.Override + public com.google.storage.v2.AppendObjectSpecOrBuilder getAppendObjectSpecOrBuilder() { + if ((firstMessageCase_ == 11) && (appendObjectSpecBuilder_ != null)) { + return appendObjectSpecBuilder_.getMessageOrBuilder(); + } else { + if (firstMessageCase_ == 11) { + return (com.google.storage.v2.AppendObjectSpec) firstMessage_; + } + return com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For appendable uploads. Describes the object to append to.
+     * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.AppendObjectSpec, + com.google.storage.v2.AppendObjectSpec.Builder, + com.google.storage.v2.AppendObjectSpecOrBuilder> + internalGetAppendObjectSpecFieldBuilder() { + if (appendObjectSpecBuilder_ == null) { + if (!(firstMessageCase_ == 11)) { + firstMessage_ = com.google.storage.v2.AppendObjectSpec.getDefaultInstance(); + } + appendObjectSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.AppendObjectSpec, + com.google.storage.v2.AppendObjectSpec.Builder, + com.google.storage.v2.AppendObjectSpecOrBuilder>( + (com.google.storage.v2.AppendObjectSpec) firstMessage_, + getParentForChildren(), + isClean()); + firstMessage_ = null; + } + firstMessageCase_ = 11; + onChanged(); + return appendObjectSpecBuilder_; + } + + private long writeOffset_; + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value must be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value must be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An invalid value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + @java.lang.Override + public long getWriteOffset() { + return writeOffset_; + } + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value must be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value must be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An invalid value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeOffset to set. + * @return This builder for chaining. + */ + public Builder setWriteOffset(long value) { + + writeOffset_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value must be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value must be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An invalid value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteOffset() { + bitField0_ = (bitField0_ & ~0x00000008); + writeOffset_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + checksummedDataBuilder_; + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return dataCase_ == 4; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } else { + if (dataCase_ == 4) { + return checksummedDataBuilder_.getMessage(); + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder setChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + data_ = value; + onChanged(); + } else { + checksummedDataBuilder_.setMessage(value); + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder setChecksummedData( + com.google.storage.v2.ChecksummedData.Builder builderForValue) { + if (checksummedDataBuilder_ == null) { + data_ = builderForValue.build(); + onChanged(); + } else { + checksummedDataBuilder_.setMessage(builderForValue.build()); + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder mergeChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4 && data_ != com.google.storage.v2.ChecksummedData.getDefaultInstance()) { + data_ = + com.google.storage.v2.ChecksummedData.newBuilder( + (com.google.storage.v2.ChecksummedData) data_) + .mergeFrom(value) + .buildPartial(); + } else { + data_ = value; + } + onChanged(); + } else { + if (dataCase_ == 4) { + checksummedDataBuilder_.mergeFrom(value); + } else { + checksummedDataBuilder_.setMessage(value); + } + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder clearChecksummedData() { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4) { + dataCase_ = 0; + data_ = null; + onChanged(); + } + } else { + if (dataCase_ == 4) { + dataCase_ = 0; + data_ = null; + } + checksummedDataBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public com.google.storage.v2.ChecksummedData.Builder getChecksummedDataBuilder() { + return internalGetChecksummedDataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if ((dataCase_ == 4) && (checksummedDataBuilder_ != null)) { + return checksummedDataBuilder_.getMessageOrBuilder(); + } else { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + internalGetChecksummedDataFieldBuilder() { + if (checksummedDataBuilder_ == null) { + if (!(dataCase_ == 4)) { + data_ = com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + checksummedDataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder>( + (com.google.storage.v2.ChecksummedData) data_, getParentForChildren(), isClean()); + data_ = null; + } + dataCase_ = 4; + onChanged(); + return checksummedDataBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00000020); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. Might only
+     * be provided in the first request or the last request (with finish_write
+     * set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + private boolean stateLookup_; + + /** + * + * + *
+     * Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
+     * or the client closes the stream, the service sends a
+     * `BidiWriteObjectResponse` containing the current persisted size. The
+     * persisted size sent in responses covers all the bytes the server has
+     * persisted thus far and can be used to decide what data is safe for the
+     * client to drop. Note that the object's current size reported by the
+     * `BidiWriteObjectResponse` might lag behind the number of bytes written by
+     * the client. This field is ignored if `finish_write` is set to true.
+     * 
+ * + * bool state_lookup = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The stateLookup. + */ + @java.lang.Override + public boolean getStateLookup() { + return stateLookup_; + } + + /** + * + * + *
+     * Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
+     * or the client closes the stream, the service sends a
+     * `BidiWriteObjectResponse` containing the current persisted size. The
+     * persisted size sent in responses covers all the bytes the server has
+     * persisted thus far and can be used to decide what data is safe for the
+     * client to drop. Note that the object's current size reported by the
+     * `BidiWriteObjectResponse` might lag behind the number of bytes written by
+     * the client. This field is ignored if `finish_write` is set to true.
+     * 
+ * + * bool state_lookup = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The stateLookup to set. + * @return This builder for chaining. + */ + public Builder setStateLookup(boolean value) { + + stateLookup_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
+     * or the client closes the stream, the service sends a
+     * `BidiWriteObjectResponse` containing the current persisted size. The
+     * persisted size sent in responses covers all the bytes the server has
+     * persisted thus far and can be used to decide what data is safe for the
+     * client to drop. Note that the object's current size reported by the
+     * `BidiWriteObjectResponse` might lag behind the number of bytes written by
+     * the client. This field is ignored if `finish_write` is set to true.
+     * 
+ * + * bool state_lookup = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearStateLookup() { + bitField0_ = (bitField0_ & ~0x00000040); + stateLookup_ = false; + onChanged(); + return this; + } + + private boolean flush_; + + /** + * + * + *
+     * Optional. Persists data written on the stream, up to and including the
+     * current message, to permanent storage. This option should be used sparingly
+     * as it might reduce performance. Ongoing writes are periodically persisted
+     * on the server even when `flush` is not set. This field is ignored if
+     * `finish_write` is set to true since there's no need to checkpoint or flush
+     * if this message completes the write.
+     * 
+ * + * bool flush = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The flush. + */ + @java.lang.Override + public boolean getFlush() { + return flush_; + } + + /** + * + * + *
+     * Optional. Persists data written on the stream, up to and including the
+     * current message, to permanent storage. This option should be used sparingly
+     * as it might reduce performance. Ongoing writes are periodically persisted
+     * on the server even when `flush` is not set. This field is ignored if
+     * `finish_write` is set to true since there's no need to checkpoint or flush
+     * if this message completes the write.
+     * 
+ * + * bool flush = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The flush to set. + * @return This builder for chaining. + */ + public Builder setFlush(boolean value) { + + flush_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Persists data written on the stream, up to and including the
+     * current message, to permanent storage. This option should be used sparingly
+     * as it might reduce performance. Ongoing writes are periodically persisted
+     * on the server even when `flush` is not set. This field is ignored if
+     * `finish_write` is set to true since there's no need to checkpoint or flush
+     * if this message completes the write.
+     * 
+ * + * bool flush = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFlush() { + bitField0_ = (bitField0_ & ~0x00000080); + flush_ = false; + onChanged(); + return this; + } + + private boolean finishWrite_; + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + @java.lang.Override + public boolean getFinishWrite() { + return finishWrite_; + } + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The finishWrite to set. + * @return This builder for chaining. + */ + public Builder setFinishWrite(boolean value) { + + finishWrite_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFinishWrite() { + bitField0_ = (bitField0_ & ~0x00000100); + finishWrite_ = false; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000200); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiWriteObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiWriteObjectRequest) + private static final com.google.storage.v2.BidiWriteObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiWriteObjectRequest(); + } + + public static com.google.storage.v2.BidiWriteObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiWriteObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequestOrBuilder.java new file mode 100644 index 000000000000..95ef87da740c --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectRequestOrBuilder.java @@ -0,0 +1,370 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiWriteObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiWriteObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + boolean hasUploadId(); + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + java.lang.String getUploadId(); + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + com.google.protobuf.ByteString getUploadIdBytes(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + boolean hasWriteObjectSpec(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + com.google.storage.v2.WriteObjectSpec getWriteObjectSpec(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder(); + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return Whether the appendObjectSpec field is set. + */ + boolean hasAppendObjectSpec(); + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + * + * @return The appendObjectSpec. + */ + com.google.storage.v2.AppendObjectSpec getAppendObjectSpec(); + + /** + * + * + *
+   * For appendable uploads. Describes the object to append to.
+   * 
+ * + * .google.storage.v2.AppendObjectSpec append_object_spec = 11; + */ + com.google.storage.v2.AppendObjectSpecOrBuilder getAppendObjectSpecOrBuilder(); + + /** + * + * + *
+   * Required. The offset from the beginning of the object at which the data
+   * should be written.
+   *
+   * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+   * indicates the initial offset for the `Write()` call. The value must be
+   * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+   * return (0 if this is the first write to the object).
+   *
+   * On subsequent calls, this value must be no larger than the sum of the
+   * first `write_offset` and the sizes of all `data` chunks sent previously on
+   * this stream.
+   *
+   * An invalid value causes an error.
+   * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + long getWriteOffset(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + boolean hasChecksummedData(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + com.google.storage.v2.ChecksummedData getChecksummedData(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. Might only
+   * be provided in the first request or the last request (with finish_write
+   * set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); + + /** + * + * + *
+   * Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
+   * or the client closes the stream, the service sends a
+   * `BidiWriteObjectResponse` containing the current persisted size. The
+   * persisted size sent in responses covers all the bytes the server has
+   * persisted thus far and can be used to decide what data is safe for the
+   * client to drop. Note that the object's current size reported by the
+   * `BidiWriteObjectResponse` might lag behind the number of bytes written by
+   * the client. This field is ignored if `finish_write` is set to true.
+   * 
+ * + * bool state_lookup = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The stateLookup. + */ + boolean getStateLookup(); + + /** + * + * + *
+   * Optional. Persists data written on the stream, up to and including the
+   * current message, to permanent storage. This option should be used sparingly
+   * as it might reduce performance. Ongoing writes are periodically persisted
+   * on the server even when `flush` is not set. This field is ignored if
+   * `finish_write` is set to true since there's no need to checkpoint or flush
+   * if this message completes the write.
+   * 
+ * + * bool flush = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The flush. + */ + boolean getFlush(); + + /** + * + * + *
+   * Optional. If `true`, this indicates that the write is complete. Sending any
+   * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+   * causes an error.
+   * For a non-resumable write (where the `upload_id` was not set in the first
+   * message), it is an error not to set this field in the final message of the
+   * stream.
+   * 
+ * + * bool finish_write = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + boolean getFinishWrite(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + com.google.storage.v2.BidiWriteObjectRequest.FirstMessageCase getFirstMessageCase(); + + com.google.storage.v2.BidiWriteObjectRequest.DataCase getDataCase(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponse.java new file mode 100644 index 000000000000..e467f9b10de1 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponse.java @@ -0,0 +1,1264 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response message for BidiWriteObject.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectResponse} + */ +@com.google.protobuf.Generated +public final class BidiWriteObjectResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BidiWriteObjectResponse) + BidiWriteObjectResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BidiWriteObjectResponse"); + } + + // Use BidiWriteObjectResponse.newBuilder() to construct. + private BidiWriteObjectResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BidiWriteObjectResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectResponse.class, + com.google.storage.v2.BidiWriteObjectResponse.Builder.class); + } + + private int bitField0_; + private int writeStatusCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object writeStatus_; + + public enum WriteStatusCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PERSISTED_SIZE(1), + RESOURCE(2), + WRITESTATUS_NOT_SET(0); + private final int value; + + private WriteStatusCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static WriteStatusCase valueOf(int value) { + return forNumber(value); + } + + public static WriteStatusCase forNumber(int value) { + switch (value) { + case 1: + return PERSISTED_SIZE; + case 2: + return RESOURCE; + case 0: + return WRITESTATUS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public static final int PERSISTED_SIZE_FIELD_NUMBER = 1; + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + @java.lang.Override + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + @java.lang.Override + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + public static final int RESOURCE_FIELD_NUMBER = 2; + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + public static final int WRITE_HANDLE_FIELD_NUMBER = 3; + private com.google.storage.v2.BidiWriteHandle writeHandle_; + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return Whether the writeHandle field is set. + */ + @java.lang.Override + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return The writeHandle. + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + @java.lang.Override + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (writeStatusCase_ == 1) { + output.writeInt64(1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + output.writeMessage(2, (com.google.storage.v2.Object) writeStatus_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getWriteHandle()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (writeStatusCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.storage.v2.Object) writeStatus_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getWriteHandle()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BidiWriteObjectResponse)) { + return super.equals(obj); + } + com.google.storage.v2.BidiWriteObjectResponse other = + (com.google.storage.v2.BidiWriteObjectResponse) obj; + + if (hasWriteHandle() != other.hasWriteHandle()) return false; + if (hasWriteHandle()) { + if (!getWriteHandle().equals(other.getWriteHandle())) return false; + } + if (!getWriteStatusCase().equals(other.getWriteStatusCase())) return false; + switch (writeStatusCase_) { + case 1: + if (getPersistedSize() != other.getPersistedSize()) return false; + break; + case 2: + if (!getResource().equals(other.getResource())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriteHandle()) { + hash = (37 * hash) + WRITE_HANDLE_FIELD_NUMBER; + hash = (53 * hash) + getWriteHandle().hashCode(); + } + switch (writeStatusCase_) { + case 1: + hash = (37 * hash) + PERSISTED_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPersistedSize()); + break; + case 2: + hash = (37 * hash) + RESOURCE_FIELD_NUMBER; + hash = (53 * hash) + getResource().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BidiWriteObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BidiWriteObjectResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BidiWriteObject.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BidiWriteObjectResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BidiWriteObjectResponse) + com.google.storage.v2.BidiWriteObjectResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BidiWriteObjectResponse.class, + com.google.storage.v2.BidiWriteObjectResponse.Builder.class); + } + + // Construct using com.google.storage.v2.BidiWriteObjectResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetWriteHandleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (resourceBuilder_ != null) { + resourceBuilder_.clear(); + } + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + writeStatusCase_ = 0; + writeStatus_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectResponse getDefaultInstanceForType() { + return com.google.storage.v2.BidiWriteObjectResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectResponse build() { + com.google.storage.v2.BidiWriteObjectResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectResponse buildPartial() { + com.google.storage.v2.BidiWriteObjectResponse result = + new com.google.storage.v2.BidiWriteObjectResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BidiWriteObjectResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.writeHandle_ = + writeHandleBuilder_ == null ? writeHandle_ : writeHandleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.storage.v2.BidiWriteObjectResponse result) { + result.writeStatusCase_ = writeStatusCase_; + result.writeStatus_ = this.writeStatus_; + if (writeStatusCase_ == 2 && resourceBuilder_ != null) { + result.writeStatus_ = resourceBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BidiWriteObjectResponse) { + return mergeFrom((com.google.storage.v2.BidiWriteObjectResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BidiWriteObjectResponse other) { + if (other == com.google.storage.v2.BidiWriteObjectResponse.getDefaultInstance()) return this; + if (other.hasWriteHandle()) { + mergeWriteHandle(other.getWriteHandle()); + } + switch (other.getWriteStatusCase()) { + case PERSISTED_SIZE: + { + setPersistedSize(other.getPersistedSize()); + break; + } + case RESOURCE: + { + mergeResource(other.getResource()); + break; + } + case WRITESTATUS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + writeStatus_ = input.readInt64(); + writeStatusCase_ = 1; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetResourceFieldBuilder().getBuilder(), extensionRegistry); + writeStatusCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetWriteHandleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int writeStatusCase_ = 0; + private java.lang.Object writeStatus_; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public Builder clearWriteStatus() { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @param value The persistedSize to set. + * @return This builder for chaining. + */ + public Builder setPersistedSize(long value) { + + writeStatusCase_ = 1; + writeStatus_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearPersistedSize() { + if (writeStatusCase_ == 1) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + resourceBuilder_; + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } else { + if (writeStatusCase_ == 2) { + return resourceBuilder_.getMessage(); + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStatus_ = value; + onChanged(); + } else { + resourceBuilder_.setMessage(value); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object.Builder builderForValue) { + if (resourceBuilder_ == null) { + writeStatus_ = builderForValue.build(); + onChanged(); + } else { + resourceBuilder_.setMessage(builderForValue.build()); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder mergeResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2 + && writeStatus_ != com.google.storage.v2.Object.getDefaultInstance()) { + writeStatus_ = + com.google.storage.v2.Object.newBuilder((com.google.storage.v2.Object) writeStatus_) + .mergeFrom(value) + .buildPartial(); + } else { + writeStatus_ = value; + } + onChanged(); + } else { + if (writeStatusCase_ == 2) { + resourceBuilder_.mergeFrom(value); + } else { + resourceBuilder_.setMessage(value); + } + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder clearResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + } else { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + } + resourceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public com.google.storage.v2.Object.Builder getResourceBuilder() { + return internalGetResourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if ((writeStatusCase_ == 2) && (resourceBuilder_ != null)) { + return resourceBuilder_.getMessageOrBuilder(); + } else { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetResourceFieldBuilder() { + if (resourceBuilder_ == null) { + if (!(writeStatusCase_ == 2)) { + writeStatus_ = com.google.storage.v2.Object.getDefaultInstance(); + } + resourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + (com.google.storage.v2.Object) writeStatus_, getParentForChildren(), isClean()); + writeStatus_ = null; + } + writeStatusCase_ = 2; + onChanged(); + return resourceBuilder_; + } + + private com.google.storage.v2.BidiWriteHandle writeHandle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + writeHandleBuilder_; + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return Whether the writeHandle field is set. + */ + public boolean hasWriteHandle() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return The writeHandle. + */ + public com.google.storage.v2.BidiWriteHandle getWriteHandle() { + if (writeHandleBuilder_ == null) { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } else { + return writeHandleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeHandle_ = value; + } else { + writeHandleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public Builder setWriteHandle(com.google.storage.v2.BidiWriteHandle.Builder builderForValue) { + if (writeHandleBuilder_ == null) { + writeHandle_ = builderForValue.build(); + } else { + writeHandleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public Builder mergeWriteHandle(com.google.storage.v2.BidiWriteHandle value) { + if (writeHandleBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && writeHandle_ != null + && writeHandle_ != com.google.storage.v2.BidiWriteHandle.getDefaultInstance()) { + getWriteHandleBuilder().mergeFrom(value); + } else { + writeHandle_ = value; + } + } else { + writeHandleBuilder_.mergeFrom(value); + } + if (writeHandle_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public Builder clearWriteHandle() { + bitField0_ = (bitField0_ & ~0x00000004); + writeHandle_ = null; + if (writeHandleBuilder_ != null) { + writeHandleBuilder_.dispose(); + writeHandleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public com.google.storage.v2.BidiWriteHandle.Builder getWriteHandleBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetWriteHandleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + public com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder() { + if (writeHandleBuilder_ != null) { + return writeHandleBuilder_.getMessageOrBuilder(); + } else { + return writeHandle_ == null + ? com.google.storage.v2.BidiWriteHandle.getDefaultInstance() + : writeHandle_; + } + } + + /** + * + * + *
+     * An optional write handle that is returned periodically in response
+     * messages. Clients should save it for later use in establishing a new stream
+     * if a connection is interrupted.
+     * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder> + internalGetWriteHandleFieldBuilder() { + if (writeHandleBuilder_ == null) { + writeHandleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.BidiWriteHandle, + com.google.storage.v2.BidiWriteHandle.Builder, + com.google.storage.v2.BidiWriteHandleOrBuilder>( + getWriteHandle(), getParentForChildren(), isClean()); + writeHandle_ = null; + } + return writeHandleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BidiWriteObjectResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BidiWriteObjectResponse) + private static final com.google.storage.v2.BidiWriteObjectResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BidiWriteObjectResponse(); + } + + public static com.google.storage.v2.BidiWriteObjectResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BidiWriteObjectResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BidiWriteObjectResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponseOrBuilder.java new file mode 100644 index 000000000000..3bce65b843b7 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BidiWriteObjectResponseOrBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BidiWriteObjectResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BidiWriteObjectResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + boolean hasPersistedSize(); + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + long getPersistedSize(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + boolean hasResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + com.google.storage.v2.Object getResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder(); + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return Whether the writeHandle field is set. + */ + boolean hasWriteHandle(); + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + * + * @return The writeHandle. + */ + com.google.storage.v2.BidiWriteHandle getWriteHandle(); + + /** + * + * + *
+   * An optional write handle that is returned periodically in response
+   * messages. Clients should save it for later use in establishing a new stream
+   * if a connection is interrupted.
+   * 
+ * + * optional .google.storage.v2.BidiWriteHandle write_handle = 3; + */ + com.google.storage.v2.BidiWriteHandleOrBuilder getWriteHandleOrBuilder(); + + com.google.storage.v2.BidiWriteObjectResponse.WriteStatusCase getWriteStatusCase(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Bucket.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Bucket.java new file mode 100644 index 000000000000..a5c8fd8f0f15 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Bucket.java @@ -0,0 +1,37851 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * A bucket.
+ * 
+ * + * Protobuf type {@code google.storage.v2.Bucket} + */ +@com.google.protobuf.Generated +public final class Bucket extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket) + BucketOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Bucket"); + } + + // Use Bucket.newBuilder() to construct. + private Bucket(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Bucket() { + name_ = ""; + bucketId_ = ""; + etag_ = ""; + project_ = ""; + location_ = ""; + locationType_ = ""; + storageClass_ = ""; + rpo_ = ""; + acl_ = java.util.Collections.emptyList(); + defaultObjectAcl_ = java.util.Collections.emptyList(); + cors_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Bucket_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 15: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.class, com.google.storage.v2.Bucket.Builder.class); + } + + public interface BillingOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Billing) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. When set to true, Requester Pays is enabled for this bucket.
+     * 
+ * + * bool requester_pays = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requesterPays. + */ + boolean getRequesterPays(); + } + + /** + * + * + *
+   * Billing properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Billing} + */ + public static final class Billing extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Billing) + BillingOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Billing"); + } + + // Use Billing.newBuilder() to construct. + private Billing(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Billing() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Billing_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Billing_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Billing.class, + com.google.storage.v2.Bucket.Billing.Builder.class); + } + + public static final int REQUESTER_PAYS_FIELD_NUMBER = 1; + private boolean requesterPays_ = false; + + /** + * + * + *
+     * Optional. When set to true, Requester Pays is enabled for this bucket.
+     * 
+ * + * bool requester_pays = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requesterPays. + */ + @java.lang.Override + public boolean getRequesterPays() { + return requesterPays_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (requesterPays_ != false) { + output.writeBool(1, requesterPays_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (requesterPays_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, requesterPays_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Billing)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Billing other = (com.google.storage.v2.Bucket.Billing) obj; + + if (getRequesterPays() != other.getRequesterPays()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + REQUESTER_PAYS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRequesterPays()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Billing parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Billing parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Billing parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Billing parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Billing prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Billing properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Billing} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Billing) + com.google.storage.v2.Bucket.BillingOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Billing_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Billing_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Billing.class, + com.google.storage.v2.Bucket.Billing.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Billing.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + requesterPays_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Billing_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Billing getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Billing.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Billing build() { + com.google.storage.v2.Bucket.Billing result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Billing buildPartial() { + com.google.storage.v2.Bucket.Billing result = + new com.google.storage.v2.Bucket.Billing(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Billing result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.requesterPays_ = requesterPays_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Billing) { + return mergeFrom((com.google.storage.v2.Bucket.Billing) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Billing other) { + if (other == com.google.storage.v2.Bucket.Billing.getDefaultInstance()) return this; + if (other.getRequesterPays() != false) { + setRequesterPays(other.getRequesterPays()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + requesterPays_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean requesterPays_; + + /** + * + * + *
+       * Optional. When set to true, Requester Pays is enabled for this bucket.
+       * 
+ * + * bool requester_pays = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requesterPays. + */ + @java.lang.Override + public boolean getRequesterPays() { + return requesterPays_; + } + + /** + * + * + *
+       * Optional. When set to true, Requester Pays is enabled for this bucket.
+       * 
+ * + * bool requester_pays = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The requesterPays to set. + * @return This builder for chaining. + */ + public Builder setRequesterPays(boolean value) { + + requesterPays_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. When set to true, Requester Pays is enabled for this bucket.
+       * 
+ * + * bool requester_pays = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRequesterPays() { + bitField0_ = (bitField0_ & ~0x00000001); + requesterPays_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Billing) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Billing) + private static final com.google.storage.v2.Bucket.Billing DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Billing(); + } + + public static com.google.storage.v2.Bucket.Billing getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Billing parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Billing getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CorsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Cors) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the origin. + */ + java.util.List getOriginList(); + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of origin. + */ + int getOriginCount(); + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The origin at the given index. + */ + java.lang.String getOrigin(int index); + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the origin at the given index. + */ + com.google.protobuf.ByteString getOriginBytes(int index); + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the method. + */ + java.util.List getMethodList(); + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of method. + */ + int getMethodCount(); + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The method at the given index. + */ + java.lang.String getMethod(int index); + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the method at the given index. + */ + com.google.protobuf.ByteString getMethodBytes(int index); + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the responseHeader. + */ + java.util.List getResponseHeaderList(); + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of responseHeader. + */ + int getResponseHeaderCount(); + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The responseHeader at the given index. + */ + java.lang.String getResponseHeader(int index); + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the responseHeader at the given index. + */ + com.google.protobuf.ByteString getResponseHeaderBytes(int index); + + /** + * + * + *
+     * Optional. The value, in seconds, to return in the [Access-Control-Max-Age
+     * header](https://www.w3.org/TR/cors/#access-control-max-age-response-header)
+     * used in preflight responses.
+     * 
+ * + * int32 max_age_seconds = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxAgeSeconds. + */ + int getMaxAgeSeconds(); + } + + /** + * + * + *
+   * Cross-Origin Response sharing (CORS) properties for a bucket.
+   * For more on Cloud Storage and CORS, see
+   * https://cloud.google.com/storage/docs/cross-origin.
+   * For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Cors} + */ + public static final class Cors extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Cors) + CorsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Cors"); + } + + // Use Cors.newBuilder() to construct. + private Cors(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Cors() { + origin_ = com.google.protobuf.LazyStringArrayList.emptyList(); + method_ = com.google.protobuf.LazyStringArrayList.emptyList(); + responseHeader_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Cors_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Cors_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Cors.class, + com.google.storage.v2.Bucket.Cors.Builder.class); + } + + public static final int ORIGIN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList origin_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the origin. + */ + public com.google.protobuf.ProtocolStringList getOriginList() { + return origin_; + } + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of origin. + */ + public int getOriginCount() { + return origin_.size(); + } + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The origin at the given index. + */ + public java.lang.String getOrigin(int index) { + return origin_.get(index); + } + + /** + * + * + *
+     * Optional. The list of origins eligible to receive CORS response headers.
+     * For more information about origins, see [RFC
+     * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+     * list of origins, and means `any origin`.
+     * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the origin at the given index. + */ + public com.google.protobuf.ByteString getOriginBytes(int index) { + return origin_.getByteString(index); + } + + public static final int METHOD_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList method_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the method. + */ + public com.google.protobuf.ProtocolStringList getMethodList() { + return method_; + } + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of method. + */ + public int getMethodCount() { + return method_.size(); + } + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The method at the given index. + */ + public java.lang.String getMethod(int index) { + return method_.get(index); + } + + /** + * + * + *
+     * Optional. The list of HTTP methods on which to include CORS response
+     * headers,
+     * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+     * methods, and means "any method".
+     * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the method at the given index. + */ + public com.google.protobuf.ByteString getMethodBytes(int index) { + return method_.getByteString(index); + } + + public static final int RESPONSE_HEADER_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList responseHeader_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the responseHeader. + */ + public com.google.protobuf.ProtocolStringList getResponseHeaderList() { + return responseHeader_; + } + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of responseHeader. + */ + public int getResponseHeaderCount() { + return responseHeader_.size(); + } + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The responseHeader at the given index. + */ + public java.lang.String getResponseHeader(int index) { + return responseHeader_.get(index); + } + + /** + * + * + *
+     * Optional. The list of HTTP headers other than the [simple response
+     * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+     * permission for the user-agent to share across domains.
+     * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the responseHeader at the given index. + */ + public com.google.protobuf.ByteString getResponseHeaderBytes(int index) { + return responseHeader_.getByteString(index); + } + + public static final int MAX_AGE_SECONDS_FIELD_NUMBER = 4; + private int maxAgeSeconds_ = 0; + + /** + * + * + *
+     * Optional. The value, in seconds, to return in the [Access-Control-Max-Age
+     * header](https://www.w3.org/TR/cors/#access-control-max-age-response-header)
+     * used in preflight responses.
+     * 
+ * + * int32 max_age_seconds = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxAgeSeconds. + */ + @java.lang.Override + public int getMaxAgeSeconds() { + return maxAgeSeconds_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < origin_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, origin_.getRaw(i)); + } + for (int i = 0; i < method_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, method_.getRaw(i)); + } + for (int i = 0; i < responseHeader_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, responseHeader_.getRaw(i)); + } + if (maxAgeSeconds_ != 0) { + output.writeInt32(4, maxAgeSeconds_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < origin_.size(); i++) { + dataSize += computeStringSizeNoTag(origin_.getRaw(i)); + } + size += dataSize; + size += 1 * getOriginList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < method_.size(); i++) { + dataSize += computeStringSizeNoTag(method_.getRaw(i)); + } + size += dataSize; + size += 1 * getMethodList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < responseHeader_.size(); i++) { + dataSize += computeStringSizeNoTag(responseHeader_.getRaw(i)); + } + size += dataSize; + size += 1 * getResponseHeaderList().size(); + } + if (maxAgeSeconds_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAgeSeconds_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Cors)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Cors other = (com.google.storage.v2.Bucket.Cors) obj; + + if (!getOriginList().equals(other.getOriginList())) return false; + if (!getMethodList().equals(other.getMethodList())) return false; + if (!getResponseHeaderList().equals(other.getResponseHeaderList())) return false; + if (getMaxAgeSeconds() != other.getMaxAgeSeconds()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOriginCount() > 0) { + hash = (37 * hash) + ORIGIN_FIELD_NUMBER; + hash = (53 * hash) + getOriginList().hashCode(); + } + if (getMethodCount() > 0) { + hash = (37 * hash) + METHOD_FIELD_NUMBER; + hash = (53 * hash) + getMethodList().hashCode(); + } + if (getResponseHeaderCount() > 0) { + hash = (37 * hash) + RESPONSE_HEADER_FIELD_NUMBER; + hash = (53 * hash) + getResponseHeaderList().hashCode(); + } + hash = (37 * hash) + MAX_AGE_SECONDS_FIELD_NUMBER; + hash = (53 * hash) + getMaxAgeSeconds(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Cors parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Cors parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Cors parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Cors parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Cors prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Cross-Origin Response sharing (CORS) properties for a bucket.
+     * For more on Cloud Storage and CORS, see
+     * https://cloud.google.com/storage/docs/cross-origin.
+     * For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Cors} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Cors) + com.google.storage.v2.Bucket.CorsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Cors_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Cors_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Cors.class, + com.google.storage.v2.Bucket.Cors.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Cors.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + origin_ = com.google.protobuf.LazyStringArrayList.emptyList(); + method_ = com.google.protobuf.LazyStringArrayList.emptyList(); + responseHeader_ = com.google.protobuf.LazyStringArrayList.emptyList(); + maxAgeSeconds_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Cors_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Cors getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Cors.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Cors build() { + com.google.storage.v2.Bucket.Cors result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Cors buildPartial() { + com.google.storage.v2.Bucket.Cors result = new com.google.storage.v2.Bucket.Cors(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Cors result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + origin_.makeImmutable(); + result.origin_ = origin_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + method_.makeImmutable(); + result.method_ = method_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + responseHeader_.makeImmutable(); + result.responseHeader_ = responseHeader_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.maxAgeSeconds_ = maxAgeSeconds_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Cors) { + return mergeFrom((com.google.storage.v2.Bucket.Cors) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Cors other) { + if (other == com.google.storage.v2.Bucket.Cors.getDefaultInstance()) return this; + if (!other.origin_.isEmpty()) { + if (origin_.isEmpty()) { + origin_ = other.origin_; + bitField0_ |= 0x00000001; + } else { + ensureOriginIsMutable(); + origin_.addAll(other.origin_); + } + onChanged(); + } + if (!other.method_.isEmpty()) { + if (method_.isEmpty()) { + method_ = other.method_; + bitField0_ |= 0x00000002; + } else { + ensureMethodIsMutable(); + method_.addAll(other.method_); + } + onChanged(); + } + if (!other.responseHeader_.isEmpty()) { + if (responseHeader_.isEmpty()) { + responseHeader_ = other.responseHeader_; + bitField0_ |= 0x00000004; + } else { + ensureResponseHeaderIsMutable(); + responseHeader_.addAll(other.responseHeader_); + } + onChanged(); + } + if (other.getMaxAgeSeconds() != 0) { + setMaxAgeSeconds(other.getMaxAgeSeconds()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureOriginIsMutable(); + origin_.add(s); + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureMethodIsMutable(); + method_.add(s); + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureResponseHeaderIsMutable(); + responseHeader_.add(s); + break; + } // case 26 + case 32: + { + maxAgeSeconds_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList origin_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureOriginIsMutable() { + if (!origin_.isModifiable()) { + origin_ = new com.google.protobuf.LazyStringArrayList(origin_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the origin. + */ + public com.google.protobuf.ProtocolStringList getOriginList() { + origin_.makeImmutable(); + return origin_; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of origin. + */ + public int getOriginCount() { + return origin_.size(); + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The origin at the given index. + */ + public java.lang.String getOrigin(int index) { + return origin_.get(index); + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the origin at the given index. + */ + public com.google.protobuf.ByteString getOriginBytes(int index) { + return origin_.getByteString(index); + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The origin to set. + * @return This builder for chaining. + */ + public Builder setOrigin(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureOriginIsMutable(); + origin_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The origin to add. + * @return This builder for chaining. + */ + public Builder addOrigin(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureOriginIsMutable(); + origin_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The origin to add. + * @return This builder for chaining. + */ + public Builder addAllOrigin(java.lang.Iterable values) { + ensureOriginIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, origin_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOrigin() { + origin_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of origins eligible to receive CORS response headers.
+       * For more information about origins, see [RFC
+       * 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the
+       * list of origins, and means `any origin`.
+       * 
+ * + * repeated string origin = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the origin to add. + * @return This builder for chaining. + */ + public Builder addOriginBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureOriginIsMutable(); + origin_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList method_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureMethodIsMutable() { + if (!method_.isModifiable()) { + method_ = new com.google.protobuf.LazyStringArrayList(method_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the method. + */ + public com.google.protobuf.ProtocolStringList getMethodList() { + method_.makeImmutable(); + return method_; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of method. + */ + public int getMethodCount() { + return method_.size(); + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The method at the given index. + */ + public java.lang.String getMethod(int index) { + return method_.get(index); + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the method at the given index. + */ + public com.google.protobuf.ByteString getMethodBytes(int index) { + return method_.getByteString(index); + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The method to set. + * @return This builder for chaining. + */ + public Builder setMethod(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMethodIsMutable(); + method_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The method to add. + * @return This builder for chaining. + */ + public Builder addMethod(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMethodIsMutable(); + method_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The method to add. + * @return This builder for chaining. + */ + public Builder addAllMethod(java.lang.Iterable values) { + ensureMethodIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, method_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMethod() { + method_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP methods on which to include CORS response
+       * headers,
+       * (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of
+       * methods, and means "any method".
+       * 
+ * + * repeated string method = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the method to add. + * @return This builder for chaining. + */ + public Builder addMethodBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureMethodIsMutable(); + method_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList responseHeader_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureResponseHeaderIsMutable() { + if (!responseHeader_.isModifiable()) { + responseHeader_ = new com.google.protobuf.LazyStringArrayList(responseHeader_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the responseHeader. + */ + public com.google.protobuf.ProtocolStringList getResponseHeaderList() { + responseHeader_.makeImmutable(); + return responseHeader_; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of responseHeader. + */ + public int getResponseHeaderCount() { + return responseHeader_.size(); + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The responseHeader at the given index. + */ + public java.lang.String getResponseHeader(int index) { + return responseHeader_.get(index); + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the responseHeader at the given index. + */ + public com.google.protobuf.ByteString getResponseHeaderBytes(int index) { + return responseHeader_.getByteString(index); + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The responseHeader to set. + * @return This builder for chaining. + */ + public Builder setResponseHeader(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureResponseHeaderIsMutable(); + responseHeader_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The responseHeader to add. + * @return This builder for chaining. + */ + public Builder addResponseHeader(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureResponseHeaderIsMutable(); + responseHeader_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The responseHeader to add. + * @return This builder for chaining. + */ + public Builder addAllResponseHeader(java.lang.Iterable values) { + ensureResponseHeaderIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, responseHeader_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearResponseHeader() { + responseHeader_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The list of HTTP headers other than the [simple response
+       * headers](https://www.w3.org/TR/cors/#simple-response-headers) to give
+       * permission for the user-agent to share across domains.
+       * 
+ * + * repeated string response_header = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the responseHeader to add. + * @return This builder for chaining. + */ + public Builder addResponseHeaderBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureResponseHeaderIsMutable(); + responseHeader_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int maxAgeSeconds_; + + /** + * + * + *
+       * Optional. The value, in seconds, to return in the [Access-Control-Max-Age
+       * header](https://www.w3.org/TR/cors/#access-control-max-age-response-header)
+       * used in preflight responses.
+       * 
+ * + * int32 max_age_seconds = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxAgeSeconds. + */ + @java.lang.Override + public int getMaxAgeSeconds() { + return maxAgeSeconds_; + } + + /** + * + * + *
+       * Optional. The value, in seconds, to return in the [Access-Control-Max-Age
+       * header](https://www.w3.org/TR/cors/#access-control-max-age-response-header)
+       * used in preflight responses.
+       * 
+ * + * int32 max_age_seconds = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The maxAgeSeconds to set. + * @return This builder for chaining. + */ + public Builder setMaxAgeSeconds(int value) { + + maxAgeSeconds_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The value, in seconds, to return in the [Access-Control-Max-Age
+       * header](https://www.w3.org/TR/cors/#access-control-max-age-response-header)
+       * used in preflight responses.
+       * 
+ * + * int32 max_age_seconds = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMaxAgeSeconds() { + bitField0_ = (bitField0_ & ~0x00000008); + maxAgeSeconds_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Cors) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Cors) + private static final com.google.storage.v2.Bucket.Cors DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Cors(); + } + + public static com.google.storage.v2.Bucket.Cors getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Cors parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Cors getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface EncryptionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Encryption) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt objects
+     * inserted into this bucket, if no encryption method is specified.
+     * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The defaultKmsKey. + */ + java.lang.String getDefaultKmsKey(); + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt objects
+     * inserted into this bucket, if no encryption method is specified.
+     * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for defaultKmsKey. + */ + com.google.protobuf.ByteString getDefaultKmsKeyBytes(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the googleManagedEncryptionEnforcementConfig field is set. + */ + boolean hasGoogleManagedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The googleManagedEncryptionEnforcementConfig. + */ + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getGoogleManagedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfigOrBuilder + getGoogleManagedEncryptionEnforcementConfigOrBuilder(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerManagedEncryptionEnforcementConfig field is set. + */ + boolean hasCustomerManagedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerManagedEncryptionEnforcementConfig. + */ + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + getCustomerManagedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfigOrBuilder + getCustomerManagedEncryptionEnforcementConfigOrBuilder(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerSuppliedEncryptionEnforcementConfig field is set. + */ + boolean hasCustomerSuppliedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerSuppliedEncryptionEnforcementConfig. + */ + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + getCustomerSuppliedEncryptionEnforcementConfig(); + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfigOrBuilder + getCustomerSuppliedEncryptionEnforcementConfigOrBuilder(); + } + + /** + * + * + *
+   * Encryption properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Encryption} + */ + public static final class Encryption extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Encryption) + EncryptionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Encryption"); + } + + // Use Encryption.newBuilder() to construct. + private Encryption(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Encryption() { + defaultKmsKey_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.class, + com.google.storage.v2.Bucket.Encryption.Builder.class); + } + + public interface GoogleManagedEncryptionEnforcementConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + boolean hasRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + java.lang.String getRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + com.google.protobuf.ByteString getRestrictionModeBytes(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + boolean hasEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + com.google.protobuf.Timestamp getEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder(); + } + + /** + * + * + *
+     * Google Managed Encryption (GMEK) enforcement config of a bucket.
+     * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig} + */ + public static final class GoogleManagedEncryptionEnforcementConfig + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + GoogleManagedEncryptionEnforcementConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GoogleManagedEncryptionEnforcementConfig"); + } + + // Use GoogleManagedEncryptionEnforcementConfig.newBuilder() to construct. + private GoogleManagedEncryptionEnforcementConfig( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GoogleManagedEncryptionEnforcementConfig() { + restrictionMode_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .class, + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder.class); + } + + private int bitField0_; + public static final int RESTRICTION_MODE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + @java.lang.Override + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + @java.lang.Override + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } + } + + /** + * + * + *
+       * Restriction mode for google-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * google-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using google-managed
+       * encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EFFECTIVE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp effectiveTime_; + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + @java.lang.Override + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEffectiveTime() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, restrictionMode_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, restrictionMode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig other = + (com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) obj; + + if (hasRestrictionMode() != other.hasRestrictionMode()) return false; + if (hasRestrictionMode()) { + if (!getRestrictionMode().equals(other.getRestrictionMode())) return false; + } + if (hasEffectiveTime() != other.hasEffectiveTime()) return false; + if (hasEffectiveTime()) { + if (!getEffectiveTime().equals(other.getEffectiveTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRestrictionMode()) { + hash = (37 * hash) + RESTRICTION_MODE_FIELD_NUMBER; + hash = (53 * hash) + getRestrictionMode().hashCode(); + } + if (hasEffectiveTime()) { + hash = (37 * hash) + EFFECTIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Google Managed Encryption (GMEK) enforcement config of a bucket.
+       * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .class, + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder.class); + } + + // Construct using + // com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEffectiveTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + restrictionMode_ = ""; + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + build() { + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + buildPartial() { + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig result = + new com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig( + this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.restrictionMode_ = restrictionMode_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.effectiveTime_ = + effectiveTimeBuilder_ == null ? effectiveTime_ : effectiveTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) { + return mergeFrom( + (com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + other) { + if (other + == com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance()) return this; + if (other.hasRestrictionMode()) { + restrictionMode_ = other.restrictionMode_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasEffectiveTime()) { + mergeEffectiveTime(other.getEffectiveTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage( + internalGetEffectiveTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + restrictionMode_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionMode(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return This builder for chaining. + */ + public Builder clearRestrictionMode() { + restrictionMode_ = getDefaultInstance().getRestrictionMode(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for google-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * google-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using google-managed
+         * encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The bytes for restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionModeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp effectiveTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + effectiveTimeBuilder_; + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + public com.google.protobuf.Timestamp getEffectiveTime() { + if (effectiveTimeBuilder_ == null) { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } else { + return effectiveTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveTime_ = value; + } else { + effectiveTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (effectiveTimeBuilder_ == null) { + effectiveTime_ = builderForValue.build(); + } else { + effectiveTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder mergeEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && effectiveTime_ != null + && effectiveTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEffectiveTimeBuilder().mergeFrom(value); + } else { + effectiveTime_ = value; + } + } else { + effectiveTimeBuilder_.mergeFrom(value); + } + if (effectiveTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder clearEffectiveTime() { + bitField0_ = (bitField0_ & ~0x00000002); + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getEffectiveTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEffectiveTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + if (effectiveTimeBuilder_ != null) { + return effectiveTimeBuilder_.getMessageOrBuilder(); + } else { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEffectiveTimeFieldBuilder() { + if (effectiveTimeBuilder_ == null) { + effectiveTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEffectiveTime(), getParentForChildren(), isClean()); + effectiveTime_ = null; + } + return effectiveTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig) + private static final com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig(); + } + + public static com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GoogleManagedEncryptionEnforcementConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CustomerManagedEncryptionEnforcementConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + boolean hasRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + java.lang.String getRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + com.google.protobuf.ByteString getRestrictionModeBytes(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + boolean hasEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + com.google.protobuf.Timestamp getEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder(); + } + + /** + * + * + *
+     * Customer Managed Encryption (CMEK) enforcement config of a bucket.
+     * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig} + */ + public static final class CustomerManagedEncryptionEnforcementConfig + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + CustomerManagedEncryptionEnforcementConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CustomerManagedEncryptionEnforcementConfig"); + } + + // Use CustomerManagedEncryptionEnforcementConfig.newBuilder() to construct. + private CustomerManagedEncryptionEnforcementConfig( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CustomerManagedEncryptionEnforcementConfig() { + restrictionMode_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .class, + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder.class); + } + + private int bitField0_; + public static final int RESTRICTION_MODE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + @java.lang.Override + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + @java.lang.Override + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } + } + + /** + * + * + *
+       * Restriction mode for customer-managed encryption for new objects within
+       * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-managed encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-managed encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EFFECTIVE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp effectiveTime_; + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + @java.lang.Override + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEffectiveTime() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, restrictionMode_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, restrictionMode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig other = + (com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + obj; + + if (hasRestrictionMode() != other.hasRestrictionMode()) return false; + if (hasRestrictionMode()) { + if (!getRestrictionMode().equals(other.getRestrictionMode())) return false; + } + if (hasEffectiveTime() != other.hasEffectiveTime()) return false; + if (hasEffectiveTime()) { + if (!getEffectiveTime().equals(other.getEffectiveTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRestrictionMode()) { + hash = (37 * hash) + RESTRICTION_MODE_FIELD_NUMBER; + hash = (53 * hash) + getRestrictionMode().hashCode(); + } + if (hasEffectiveTime()) { + hash = (37 * hash) + EFFECTIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Customer Managed Encryption (CMEK) enforcement config of a bucket.
+       * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .class, + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder.class); + } + + // Construct using + // com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEffectiveTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + restrictionMode_ = ""; + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + build() { + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + buildPartial() { + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + result = + new com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.restrictionMode_ = restrictionMode_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.effectiveTime_ = + effectiveTimeBuilder_ == null ? effectiveTime_ : effectiveTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) { + return mergeFrom( + (com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + other) { + if (other + == com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance()) return this; + if (other.hasRestrictionMode()) { + restrictionMode_ = other.restrictionMode_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasEffectiveTime()) { + mergeEffectiveTime(other.getEffectiveTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage( + internalGetEffectiveTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + restrictionMode_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionMode(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return This builder for chaining. + */ + public Builder clearRestrictionMode() { + restrictionMode_ = getDefaultInstance().getRestrictionMode(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for customer-managed encryption for new objects within
+         * the bucket. Valid values are: `NotRestricted` and `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-managed encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-managed encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The bytes for restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionModeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp effectiveTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + effectiveTimeBuilder_; + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + public com.google.protobuf.Timestamp getEffectiveTime() { + if (effectiveTimeBuilder_ == null) { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } else { + return effectiveTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveTime_ = value; + } else { + effectiveTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (effectiveTimeBuilder_ == null) { + effectiveTime_ = builderForValue.build(); + } else { + effectiveTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder mergeEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && effectiveTime_ != null + && effectiveTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEffectiveTimeBuilder().mergeFrom(value); + } else { + effectiveTime_ = value; + } + } else { + effectiveTimeBuilder_.mergeFrom(value); + } + if (effectiveTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder clearEffectiveTime() { + bitField0_ = (bitField0_ & ~0x00000002); + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getEffectiveTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEffectiveTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + if (effectiveTimeBuilder_ != null) { + return effectiveTimeBuilder_.getMessageOrBuilder(); + } else { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEffectiveTimeFieldBuilder() { + if (effectiveTimeBuilder_ == null) { + effectiveTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEffectiveTime(), getParentForChildren(), isClean()); + effectiveTime_ = null; + } + return effectiveTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig) + private static final com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig(); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CustomerManagedEncryptionEnforcementConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser + parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CustomerSuppliedEncryptionEnforcementConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + boolean hasRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + java.lang.String getRestrictionMode(); + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + com.google.protobuf.ByteString getRestrictionModeBytes(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + boolean hasEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + com.google.protobuf.Timestamp getEffectiveTime(); + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder(); + } + + /** + * + * + *
+     * Customer Supplied Encryption (CSEK) enforcement config of a bucket.
+     * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig} + */ + public static final class CustomerSuppliedEncryptionEnforcementConfig + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + CustomerSuppliedEncryptionEnforcementConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CustomerSuppliedEncryptionEnforcementConfig"); + } + + // Use CustomerSuppliedEncryptionEnforcementConfig.newBuilder() to construct. + private CustomerSuppliedEncryptionEnforcementConfig( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CustomerSuppliedEncryptionEnforcementConfig() { + restrictionMode_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .class, + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .Builder.class); + } + + private int bitField0_; + public static final int RESTRICTION_MODE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + @java.lang.Override + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + @java.lang.Override + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } + } + + /** + * + * + *
+       * Restriction mode for customer-supplied encryption for new objects
+       * within the bucket. Valid values are: `NotRestricted` and
+       * `FullyRestricted`.
+       * If `NotRestricted` or unset, creation of new objects with
+       * customer-supplied encryption is allowed.
+       * If `FullyRestricted`, new objects can't be created using
+       * customer-supplied encryption.
+       * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EFFECTIVE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp effectiveTime_; + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + @java.lang.Override + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEffectiveTime() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + /** + * + * + *
+       * Time from which the config was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, restrictionMode_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getEffectiveTime()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, restrictionMode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig other = + (com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + obj; + + if (hasRestrictionMode() != other.hasRestrictionMode()) return false; + if (hasRestrictionMode()) { + if (!getRestrictionMode().equals(other.getRestrictionMode())) return false; + } + if (hasEffectiveTime() != other.hasEffectiveTime()) return false; + if (hasEffectiveTime()) { + if (!getEffectiveTime().equals(other.getEffectiveTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRestrictionMode()) { + hash = (37 * hash) + RESTRICTION_MODE_FIELD_NUMBER; + hash = (53 * hash) + getRestrictionMode().hashCode(); + } + if (hasEffectiveTime()) { + hash = (37 * hash) + EFFECTIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Customer Supplied Encryption (CSEK) enforcement config of a bucket.
+       * 
+ * + * Protobuf type {@code + * google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig.class, + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig.Builder.class); + } + + // Construct using + // com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEffectiveTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + restrictionMode_ = ""; + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + build() { + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + buildPartial() { + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + result = + new com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.restrictionMode_ = restrictionMode_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.effectiveTime_ = + effectiveTimeBuilder_ == null ? effectiveTime_ : effectiveTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) { + return mergeFrom( + (com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + other) { + if (other + == com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance()) return this; + if (other.hasRestrictionMode()) { + restrictionMode_ = other.restrictionMode_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasEffectiveTime()) { + mergeEffectiveTime(other.getEffectiveTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage( + internalGetEffectiveTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + restrictionMode_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object restrictionMode_ = ""; + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return Whether the restrictionMode field is set. + */ + public boolean hasRestrictionMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The restrictionMode. + */ + public java.lang.String getRestrictionMode() { + java.lang.Object ref = restrictionMode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restrictionMode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return The bytes for restrictionMode. + */ + public com.google.protobuf.ByteString getRestrictionModeBytes() { + java.lang.Object ref = restrictionMode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restrictionMode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionMode(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @return This builder for chaining. + */ + public Builder clearRestrictionMode() { + restrictionMode_ = getDefaultInstance().getRestrictionMode(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+         * Restriction mode for customer-supplied encryption for new objects
+         * within the bucket. Valid values are: `NotRestricted` and
+         * `FullyRestricted`.
+         * If `NotRestricted` or unset, creation of new objects with
+         * customer-supplied encryption is allowed.
+         * If `FullyRestricted`, new objects can't be created using
+         * customer-supplied encryption.
+         * 
+ * + * optional string restriction_mode = 3; + * + * @param value The bytes for restrictionMode to set. + * @return This builder for chaining. + */ + public Builder setRestrictionModeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restrictionMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp effectiveTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + effectiveTimeBuilder_; + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + public com.google.protobuf.Timestamp getEffectiveTime() { + if (effectiveTimeBuilder_ == null) { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } else { + return effectiveTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveTime_ = value; + } else { + effectiveTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (effectiveTimeBuilder_ == null) { + effectiveTime_ = builderForValue.build(); + } else { + effectiveTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder mergeEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && effectiveTime_ != null + && effectiveTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEffectiveTimeBuilder().mergeFrom(value); + } else { + effectiveTime_ = value; + } + } else { + effectiveTimeBuilder_.mergeFrom(value); + } + if (effectiveTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder clearEffectiveTime() { + bitField0_ = (bitField0_ & ~0x00000002); + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getEffectiveTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEffectiveTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + if (effectiveTimeBuilder_ != null) { + return effectiveTimeBuilder_.getMessageOrBuilder(); + } else { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + } + + /** + * + * + *
+         * Time from which the config was effective. This is service-provided.
+         * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEffectiveTimeFieldBuilder() { + if (effectiveTimeBuilder_ == null) { + effectiveTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEffectiveTime(), getParentForChildren(), isClean()); + effectiveTime_ = null; + } + return effectiveTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig) + private static final com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig(); + } + + public static com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser< + CustomerSuppliedEncryptionEnforcementConfig>() { + @java.lang.Override + public CustomerSuppliedEncryptionEnforcementConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser + parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int DEFAULT_KMS_KEY_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object defaultKmsKey_ = ""; + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt objects
+     * inserted into this bucket, if no encryption method is specified.
+     * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The defaultKmsKey. + */ + @java.lang.Override + public java.lang.String getDefaultKmsKey() { + java.lang.Object ref = defaultKmsKey_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultKmsKey_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt objects
+     * inserted into this bucket, if no encryption method is specified.
+     * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for defaultKmsKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDefaultKmsKeyBytes() { + java.lang.Object ref = defaultKmsKey_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultKmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GOOGLE_MANAGED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER = 2; + private com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + googleManagedEncryptionEnforcementConfig_; + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the googleManagedEncryptionEnforcementConfig field is set. + */ + @java.lang.Override + public boolean hasGoogleManagedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The googleManagedEncryptionEnforcementConfig. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getGoogleManagedEncryptionEnforcementConfig() { + return googleManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance() + : googleManagedEncryptionEnforcementConfig_; + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with GMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfigOrBuilder + getGoogleManagedEncryptionEnforcementConfigOrBuilder() { + return googleManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance() + : googleManagedEncryptionEnforcementConfig_; + } + + public static final int CUSTOMER_MANAGED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER = 3; + private com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + customerManagedEncryptionEnforcementConfig_; + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerManagedEncryptionEnforcementConfig field is set. + */ + @java.lang.Override + public boolean hasCustomerManagedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerManagedEncryptionEnforcementConfig. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + getCustomerManagedEncryptionEnforcementConfig() { + return customerManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance() + : customerManagedEncryptionEnforcementConfig_; + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with CMEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder + getCustomerManagedEncryptionEnforcementConfigOrBuilder() { + return customerManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance() + : customerManagedEncryptionEnforcementConfig_; + } + + public static final int CUSTOMER_SUPPLIED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER = 4; + private com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + customerSuppliedEncryptionEnforcementConfig_; + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerSuppliedEncryptionEnforcementConfig field is set. + */ + @java.lang.Override + public boolean hasCustomerSuppliedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerSuppliedEncryptionEnforcementConfig. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + getCustomerSuppliedEncryptionEnforcementConfig() { + return customerSuppliedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance() + : customerSuppliedEncryptionEnforcementConfig_; + } + + /** + * + * + *
+     * Optional. If omitted, then new objects with CSEK encryption-type is
+     * allowed. If set, then new objects created in this bucket must comply with
+     * enforcement config. Changing this has no effect on existing objects; it
+     * applies to new objects only.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder + getCustomerSuppliedEncryptionEnforcementConfigOrBuilder() { + return customerSuppliedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance() + : customerSuppliedEncryptionEnforcementConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(defaultKmsKey_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, defaultKmsKey_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getGoogleManagedEncryptionEnforcementConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getCustomerManagedEncryptionEnforcementConfig()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getCustomerSuppliedEncryptionEnforcementConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(defaultKmsKey_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, defaultKmsKey_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, getGoogleManagedEncryptionEnforcementConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, getCustomerManagedEncryptionEnforcementConfig()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, getCustomerSuppliedEncryptionEnforcementConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Encryption)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Encryption other = (com.google.storage.v2.Bucket.Encryption) obj; + + if (!getDefaultKmsKey().equals(other.getDefaultKmsKey())) return false; + if (hasGoogleManagedEncryptionEnforcementConfig() + != other.hasGoogleManagedEncryptionEnforcementConfig()) return false; + if (hasGoogleManagedEncryptionEnforcementConfig()) { + if (!getGoogleManagedEncryptionEnforcementConfig() + .equals(other.getGoogleManagedEncryptionEnforcementConfig())) return false; + } + if (hasCustomerManagedEncryptionEnforcementConfig() + != other.hasCustomerManagedEncryptionEnforcementConfig()) return false; + if (hasCustomerManagedEncryptionEnforcementConfig()) { + if (!getCustomerManagedEncryptionEnforcementConfig() + .equals(other.getCustomerManagedEncryptionEnforcementConfig())) return false; + } + if (hasCustomerSuppliedEncryptionEnforcementConfig() + != other.hasCustomerSuppliedEncryptionEnforcementConfig()) return false; + if (hasCustomerSuppliedEncryptionEnforcementConfig()) { + if (!getCustomerSuppliedEncryptionEnforcementConfig() + .equals(other.getCustomerSuppliedEncryptionEnforcementConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DEFAULT_KMS_KEY_FIELD_NUMBER; + hash = (53 * hash) + getDefaultKmsKey().hashCode(); + if (hasGoogleManagedEncryptionEnforcementConfig()) { + hash = (37 * hash) + GOOGLE_MANAGED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getGoogleManagedEncryptionEnforcementConfig().hashCode(); + } + if (hasCustomerManagedEncryptionEnforcementConfig()) { + hash = (37 * hash) + CUSTOMER_MANAGED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCustomerManagedEncryptionEnforcementConfig().hashCode(); + } + if (hasCustomerSuppliedEncryptionEnforcementConfig()) { + hash = (37 * hash) + CUSTOMER_SUPPLIED_ENCRYPTION_ENFORCEMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCustomerSuppliedEncryptionEnforcementConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Encryption parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Encryption prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Encryption properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Encryption} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Encryption) + com.google.storage.v2.Bucket.EncryptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Encryption.class, + com.google.storage.v2.Bucket.Encryption.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Encryption.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetGoogleManagedEncryptionEnforcementConfigFieldBuilder(); + internalGetCustomerManagedEncryptionEnforcementConfigFieldBuilder(); + internalGetCustomerSuppliedEncryptionEnforcementConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + defaultKmsKey_ = ""; + googleManagedEncryptionEnforcementConfig_ = null; + if (googleManagedEncryptionEnforcementConfigBuilder_ != null) { + googleManagedEncryptionEnforcementConfigBuilder_.dispose(); + googleManagedEncryptionEnforcementConfigBuilder_ = null; + } + customerManagedEncryptionEnforcementConfig_ = null; + if (customerManagedEncryptionEnforcementConfigBuilder_ != null) { + customerManagedEncryptionEnforcementConfigBuilder_.dispose(); + customerManagedEncryptionEnforcementConfigBuilder_ = null; + } + customerSuppliedEncryptionEnforcementConfig_ = null; + if (customerSuppliedEncryptionEnforcementConfigBuilder_ != null) { + customerSuppliedEncryptionEnforcementConfigBuilder_.dispose(); + customerSuppliedEncryptionEnforcementConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Encryption_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Encryption.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption build() { + com.google.storage.v2.Bucket.Encryption result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption buildPartial() { + com.google.storage.v2.Bucket.Encryption result = + new com.google.storage.v2.Bucket.Encryption(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Encryption result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.defaultKmsKey_ = defaultKmsKey_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.googleManagedEncryptionEnforcementConfig_ = + googleManagedEncryptionEnforcementConfigBuilder_ == null + ? googleManagedEncryptionEnforcementConfig_ + : googleManagedEncryptionEnforcementConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.customerManagedEncryptionEnforcementConfig_ = + customerManagedEncryptionEnforcementConfigBuilder_ == null + ? customerManagedEncryptionEnforcementConfig_ + : customerManagedEncryptionEnforcementConfigBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.customerSuppliedEncryptionEnforcementConfig_ = + customerSuppliedEncryptionEnforcementConfigBuilder_ == null + ? customerSuppliedEncryptionEnforcementConfig_ + : customerSuppliedEncryptionEnforcementConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Encryption) { + return mergeFrom((com.google.storage.v2.Bucket.Encryption) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Encryption other) { + if (other == com.google.storage.v2.Bucket.Encryption.getDefaultInstance()) return this; + if (!other.getDefaultKmsKey().isEmpty()) { + defaultKmsKey_ = other.defaultKmsKey_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasGoogleManagedEncryptionEnforcementConfig()) { + mergeGoogleManagedEncryptionEnforcementConfig( + other.getGoogleManagedEncryptionEnforcementConfig()); + } + if (other.hasCustomerManagedEncryptionEnforcementConfig()) { + mergeCustomerManagedEncryptionEnforcementConfig( + other.getCustomerManagedEncryptionEnforcementConfig()); + } + if (other.hasCustomerSuppliedEncryptionEnforcementConfig()) { + mergeCustomerSuppliedEncryptionEnforcementConfig( + other.getCustomerSuppliedEncryptionEnforcementConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + defaultKmsKey_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetGoogleManagedEncryptionEnforcementConfigFieldBuilder() + .getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCustomerManagedEncryptionEnforcementConfigFieldBuilder() + .getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCustomerSuppliedEncryptionEnforcementConfigFieldBuilder() + .getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object defaultKmsKey_ = ""; + + /** + * + * + *
+       * Optional. The name of the Cloud KMS key that is used to encrypt objects
+       * inserted into this bucket, if no encryption method is specified.
+       * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The defaultKmsKey. + */ + public java.lang.String getDefaultKmsKey() { + java.lang.Object ref = defaultKmsKey_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultKmsKey_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. The name of the Cloud KMS key that is used to encrypt objects
+       * inserted into this bucket, if no encryption method is specified.
+       * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for defaultKmsKey. + */ + public com.google.protobuf.ByteString getDefaultKmsKeyBytes() { + java.lang.Object ref = defaultKmsKey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultKmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. The name of the Cloud KMS key that is used to encrypt objects
+       * inserted into this bucket, if no encryption method is specified.
+       * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The defaultKmsKey to set. + * @return This builder for chaining. + */ + public Builder setDefaultKmsKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + defaultKmsKey_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The name of the Cloud KMS key that is used to encrypt objects
+       * inserted into this bucket, if no encryption method is specified.
+       * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDefaultKmsKey() { + defaultKmsKey_ = getDefaultInstance().getDefaultKmsKey(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The name of the Cloud KMS key that is used to encrypt objects
+       * inserted into this bucket, if no encryption method is specified.
+       * 
+ * + * + * string default_kms_key = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for defaultKmsKey to set. + * @return This builder for chaining. + */ + public Builder setDefaultKmsKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + defaultKmsKey_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + googleManagedEncryptionEnforcementConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfigOrBuilder> + googleManagedEncryptionEnforcementConfigBuilder_; + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the googleManagedEncryptionEnforcementConfig field is set. + */ + public boolean hasGoogleManagedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The googleManagedEncryptionEnforcementConfig. + */ + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + getGoogleManagedEncryptionEnforcementConfig() { + if (googleManagedEncryptionEnforcementConfigBuilder_ == null) { + return googleManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance() + : googleManagedEncryptionEnforcementConfig_; + } else { + return googleManagedEncryptionEnforcementConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setGoogleManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig value) { + if (googleManagedEncryptionEnforcementConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + googleManagedEncryptionEnforcementConfig_ = value; + } else { + googleManagedEncryptionEnforcementConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setGoogleManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig.Builder + builderForValue) { + if (googleManagedEncryptionEnforcementConfigBuilder_ == null) { + googleManagedEncryptionEnforcementConfig_ = builderForValue.build(); + } else { + googleManagedEncryptionEnforcementConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeGoogleManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig value) { + if (googleManagedEncryptionEnforcementConfigBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && googleManagedEncryptionEnforcementConfig_ != null + && googleManagedEncryptionEnforcementConfig_ + != com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfig.getDefaultInstance()) { + getGoogleManagedEncryptionEnforcementConfigBuilder().mergeFrom(value); + } else { + googleManagedEncryptionEnforcementConfig_ = value; + } + } else { + googleManagedEncryptionEnforcementConfigBuilder_.mergeFrom(value); + } + if (googleManagedEncryptionEnforcementConfig_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearGoogleManagedEncryptionEnforcementConfig() { + bitField0_ = (bitField0_ & ~0x00000002); + googleManagedEncryptionEnforcementConfig_ = null; + if (googleManagedEncryptionEnforcementConfigBuilder_ != null) { + googleManagedEncryptionEnforcementConfigBuilder_.dispose(); + googleManagedEncryptionEnforcementConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder + getGoogleManagedEncryptionEnforcementConfigBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetGoogleManagedEncryptionEnforcementConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfigOrBuilder + getGoogleManagedEncryptionEnforcementConfigOrBuilder() { + if (googleManagedEncryptionEnforcementConfigBuilder_ != null) { + return googleManagedEncryptionEnforcementConfigBuilder_.getMessageOrBuilder(); + } else { + return googleManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .getDefaultInstance() + : googleManagedEncryptionEnforcementConfig_; + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with GMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig google_managed_encryption_enforcement_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfigOrBuilder> + internalGetGoogleManagedEncryptionEnforcementConfigFieldBuilder() { + if (googleManagedEncryptionEnforcementConfigBuilder_ == null) { + googleManagedEncryptionEnforcementConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .GoogleManagedEncryptionEnforcementConfigOrBuilder>( + getGoogleManagedEncryptionEnforcementConfig(), getParentForChildren(), isClean()); + googleManagedEncryptionEnforcementConfig_ = null; + } + return googleManagedEncryptionEnforcementConfigBuilder_; + } + + private com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + customerManagedEncryptionEnforcementConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder> + customerManagedEncryptionEnforcementConfigBuilder_; + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerManagedEncryptionEnforcementConfig field is set. + */ + public boolean hasCustomerManagedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerManagedEncryptionEnforcementConfig. + */ + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + getCustomerManagedEncryptionEnforcementConfig() { + if (customerManagedEncryptionEnforcementConfigBuilder_ == null) { + return customerManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance() + : customerManagedEncryptionEnforcementConfig_; + } else { + return customerManagedEncryptionEnforcementConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + value) { + if (customerManagedEncryptionEnforcementConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customerManagedEncryptionEnforcementConfig_ = value; + } else { + customerManagedEncryptionEnforcementConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig.Builder + builderForValue) { + if (customerManagedEncryptionEnforcementConfigBuilder_ == null) { + customerManagedEncryptionEnforcementConfig_ = builderForValue.build(); + } else { + customerManagedEncryptionEnforcementConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomerManagedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + value) { + if (customerManagedEncryptionEnforcementConfigBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && customerManagedEncryptionEnforcementConfig_ != null + && customerManagedEncryptionEnforcementConfig_ + != com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig.getDefaultInstance()) { + getCustomerManagedEncryptionEnforcementConfigBuilder().mergeFrom(value); + } else { + customerManagedEncryptionEnforcementConfig_ = value; + } + } else { + customerManagedEncryptionEnforcementConfigBuilder_.mergeFrom(value); + } + if (customerManagedEncryptionEnforcementConfig_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomerManagedEncryptionEnforcementConfig() { + bitField0_ = (bitField0_ & ~0x00000004); + customerManagedEncryptionEnforcementConfig_ = null; + if (customerManagedEncryptionEnforcementConfigBuilder_ != null) { + customerManagedEncryptionEnforcementConfigBuilder_.dispose(); + customerManagedEncryptionEnforcementConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder + getCustomerManagedEncryptionEnforcementConfigBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCustomerManagedEncryptionEnforcementConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder + getCustomerManagedEncryptionEnforcementConfigOrBuilder() { + if (customerManagedEncryptionEnforcementConfigBuilder_ != null) { + return customerManagedEncryptionEnforcementConfigBuilder_.getMessageOrBuilder(); + } else { + return customerManagedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .getDefaultInstance() + : customerManagedEncryptionEnforcementConfig_; + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CMEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig customer_managed_encryption_enforcement_config = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder> + internalGetCustomerManagedEncryptionEnforcementConfigFieldBuilder() { + if (customerManagedEncryptionEnforcementConfigBuilder_ == null) { + customerManagedEncryptionEnforcementConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerManagedEncryptionEnforcementConfigOrBuilder>( + getCustomerManagedEncryptionEnforcementConfig(), + getParentForChildren(), + isClean()); + customerManagedEncryptionEnforcementConfig_ = null; + } + return customerManagedEncryptionEnforcementConfigBuilder_; + } + + private com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + customerSuppliedEncryptionEnforcementConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder> + customerSuppliedEncryptionEnforcementConfigBuilder_; + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerSuppliedEncryptionEnforcementConfig field is set. + */ + public boolean hasCustomerSuppliedEncryptionEnforcementConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerSuppliedEncryptionEnforcementConfig. + */ + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + getCustomerSuppliedEncryptionEnforcementConfig() { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ == null) { + return customerSuppliedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance() + : customerSuppliedEncryptionEnforcementConfig_; + } else { + return customerSuppliedEncryptionEnforcementConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerSuppliedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + value) { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customerSuppliedEncryptionEnforcementConfig_ = value; + } else { + customerSuppliedEncryptionEnforcementConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerSuppliedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .Builder + builderForValue) { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ == null) { + customerSuppliedEncryptionEnforcementConfig_ = builderForValue.build(); + } else { + customerSuppliedEncryptionEnforcementConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomerSuppliedEncryptionEnforcementConfig( + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + value) { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && customerSuppliedEncryptionEnforcementConfig_ != null + && customerSuppliedEncryptionEnforcementConfig_ + != com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig.getDefaultInstance()) { + getCustomerSuppliedEncryptionEnforcementConfigBuilder().mergeFrom(value); + } else { + customerSuppliedEncryptionEnforcementConfig_ = value; + } + } else { + customerSuppliedEncryptionEnforcementConfigBuilder_.mergeFrom(value); + } + if (customerSuppliedEncryptionEnforcementConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomerSuppliedEncryptionEnforcementConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + customerSuppliedEncryptionEnforcementConfig_ = null; + if (customerSuppliedEncryptionEnforcementConfigBuilder_ != null) { + customerSuppliedEncryptionEnforcementConfigBuilder_.dispose(); + customerSuppliedEncryptionEnforcementConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .Builder + getCustomerSuppliedEncryptionEnforcementConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCustomerSuppliedEncryptionEnforcementConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder + getCustomerSuppliedEncryptionEnforcementConfigOrBuilder() { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ != null) { + return customerSuppliedEncryptionEnforcementConfigBuilder_.getMessageOrBuilder(); + } else { + return customerSuppliedEncryptionEnforcementConfig_ == null + ? com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .getDefaultInstance() + : customerSuppliedEncryptionEnforcementConfig_; + } + } + + /** + * + * + *
+       * Optional. If omitted, then new objects with CSEK encryption-type is
+       * allowed. If set, then new objects created in this bucket must comply with
+       * enforcement config. Changing this has no effect on existing objects; it
+       * applies to new objects only.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig customer_supplied_encryption_enforcement_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig + .Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder> + internalGetCustomerSuppliedEncryptionEnforcementConfigFieldBuilder() { + if (customerSuppliedEncryptionEnforcementConfigBuilder_ == null) { + customerSuppliedEncryptionEnforcementConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig, + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfig.Builder, + com.google.storage.v2.Bucket.Encryption + .CustomerSuppliedEncryptionEnforcementConfigOrBuilder>( + getCustomerSuppliedEncryptionEnforcementConfig(), + getParentForChildren(), + isClean()); + customerSuppliedEncryptionEnforcementConfig_ = null; + } + return customerSuppliedEncryptionEnforcementConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Encryption) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Encryption) + private static final com.google.storage.v2.Bucket.Encryption DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Encryption(); + } + + public static com.google.storage.v2.Bucket.Encryption getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Encryption parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface IamConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.IamConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uniformBucketLevelAccess field is set. + */ + boolean hasUniformBucketLevelAccess(); + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uniformBucketLevelAccess. + */ + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess getUniformBucketLevelAccess(); + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder + getUniformBucketLevelAccessOrBuilder(); + + /** + * + * + *
+     * Optional. Whether IAM enforces public access prevention. Valid values are
+     * `enforced` or `inherited`.
+     * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The publicAccessPrevention. + */ + java.lang.String getPublicAccessPrevention(); + + /** + * + * + *
+     * Optional. Whether IAM enforces public access prevention. Valid values are
+     * `enforced` or `inherited`.
+     * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for publicAccessPrevention. + */ + com.google.protobuf.ByteString getPublicAccessPreventionBytes(); + } + + /** + * + * + *
+   * Bucket restriction options.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IamConfig} + */ + public static final class IamConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.IamConfig) + IamConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IamConfig"); + } + + // Use IamConfig.newBuilder() to construct. + private IamConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IamConfig() { + publicAccessPrevention_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IamConfig.class, + com.google.storage.v2.Bucket.IamConfig.Builder.class); + } + + public interface UniformBucketLevelAccessOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Optional. If set, access checks only use bucket-level IAM policies or
+       * above.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + boolean getEnabled(); + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lockTime field is set. + */ + boolean hasLockTime(); + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockTime. + */ + com.google.protobuf.Timestamp getLockTime(); + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getLockTimeOrBuilder(); + } + + /** + * + * + *
+     * Settings for Uniform Bucket level access.
+     * See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess} + */ + public static final class UniformBucketLevelAccess extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) + UniformBucketLevelAccessOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UniformBucketLevelAccess"); + } + + // Use UniformBucketLevelAccess.newBuilder() to construct. + private UniformBucketLevelAccess(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UniformBucketLevelAccess() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.class, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder.class); + } + + private int bitField0_; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+       * Optional. If set, access checks only use bucket-level IAM policies or
+       * above.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + public static final int LOCK_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp lockTime_; + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lockTime field is set. + */ + @java.lang.Override + public boolean hasLockTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getLockTime() { + return lockTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : lockTime_; + } + + /** + * + * + *
+       * Optional. The deadline time for changing
+       * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+       * `false`. Mutable until the specified deadline is reached, but not
+       * afterward.
+       * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getLockTimeOrBuilder() { + return lockTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : lockTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getLockTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getLockTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess other = + (com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (hasLockTime() != other.hasLockTime()) return false; + if (hasLockTime()) { + if (!getLockTime().equals(other.getLockTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + if (hasLockTime()) { + hash = (37 * hash) + LOCK_TIME_FIELD_NUMBER; + hash = (53 * hash) + getLockTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Settings for Uniform Bucket level access.
+       * See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.class, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder.class); + } + + // Construct using + // com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetLockTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + lockTime_ = null; + if (lockTimeBuilder_ != null) { + lockTimeBuilder_.dispose(); + lockTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess build() { + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess buildPartial() { + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess result = + new com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.lockTime_ = lockTimeBuilder_ == null ? lockTime_ : lockTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) { + return mergeFrom( + (com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess other) { + if (other + == com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + .getDefaultInstance()) return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + if (other.hasLockTime()) { + mergeLockTime(other.getLockTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetLockTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+         * Optional. If set, access checks only use bucket-level IAM policies or
+         * above.
+         * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+         * Optional. If set, access checks only use bucket-level IAM policies or
+         * above.
+         * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. If set, access checks only use bucket-level IAM policies or
+         * above.
+         * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp lockTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + lockTimeBuilder_; + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lockTime field is set. + */ + public boolean hasLockTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockTime. + */ + public com.google.protobuf.Timestamp getLockTime() { + if (lockTimeBuilder_ == null) { + return lockTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lockTime_; + } else { + return lockTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLockTime(com.google.protobuf.Timestamp value) { + if (lockTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lockTime_ = value; + } else { + lockTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLockTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (lockTimeBuilder_ == null) { + lockTime_ = builderForValue.build(); + } else { + lockTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLockTime(com.google.protobuf.Timestamp value) { + if (lockTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && lockTime_ != null + && lockTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getLockTimeBuilder().mergeFrom(value); + } else { + lockTime_ = value; + } + } else { + lockTimeBuilder_.mergeFrom(value); + } + if (lockTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLockTime() { + bitField0_ = (bitField0_ & ~0x00000002); + lockTime_ = null; + if (lockTimeBuilder_ != null) { + lockTimeBuilder_.dispose(); + lockTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getLockTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetLockTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getLockTimeOrBuilder() { + if (lockTimeBuilder_ != null) { + return lockTimeBuilder_.getMessageOrBuilder(); + } else { + return lockTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lockTime_; + } + } + + /** + * + * + *
+         * Optional. The deadline time for changing
+         * `iam_config.uniform_bucket_level_access.enabled` from `true` to
+         * `false`. Mutable until the specified deadline is reached, but not
+         * afterward.
+         * 
+ * + * .google.protobuf.Timestamp lock_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetLockTimeFieldBuilder() { + if (lockTimeBuilder_ == null) { + lockTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getLockTime(), getParentForChildren(), isClean()); + lockTime_ = null; + } + return lockTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess) + private static final com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess(); + } + + public static com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UniformBucketLevelAccess parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int UNIFORM_BUCKET_LEVEL_ACCESS_FIELD_NUMBER = 1; + private com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + uniformBucketLevelAccess_; + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uniformBucketLevelAccess field is set. + */ + @java.lang.Override + public boolean hasUniformBucketLevelAccess() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uniformBucketLevelAccess. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + getUniformBucketLevelAccess() { + return uniformBucketLevelAccess_ == null + ? com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.getDefaultInstance() + : uniformBucketLevelAccess_; + } + + /** + * + * + *
+     * Optional. Bucket restriction options currently enforced on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder + getUniformBucketLevelAccessOrBuilder() { + return uniformBucketLevelAccess_ == null + ? com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.getDefaultInstance() + : uniformBucketLevelAccess_; + } + + public static final int PUBLIC_ACCESS_PREVENTION_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object publicAccessPrevention_ = ""; + + /** + * + * + *
+     * Optional. Whether IAM enforces public access prevention. Valid values are
+     * `enforced` or `inherited`.
+     * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The publicAccessPrevention. + */ + @java.lang.Override + public java.lang.String getPublicAccessPrevention() { + java.lang.Object ref = publicAccessPrevention_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + publicAccessPrevention_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. Whether IAM enforces public access prevention. Valid values are
+     * `enforced` or `inherited`.
+     * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for publicAccessPrevention. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPublicAccessPreventionBytes() { + java.lang.Object ref = publicAccessPrevention_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + publicAccessPrevention_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getUniformBucketLevelAccess()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(publicAccessPrevention_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, publicAccessPrevention_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, getUniformBucketLevelAccess()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(publicAccessPrevention_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, publicAccessPrevention_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.IamConfig)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.IamConfig other = (com.google.storage.v2.Bucket.IamConfig) obj; + + if (hasUniformBucketLevelAccess() != other.hasUniformBucketLevelAccess()) return false; + if (hasUniformBucketLevelAccess()) { + if (!getUniformBucketLevelAccess().equals(other.getUniformBucketLevelAccess())) + return false; + } + if (!getPublicAccessPrevention().equals(other.getPublicAccessPrevention())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUniformBucketLevelAccess()) { + hash = (37 * hash) + UNIFORM_BUCKET_LEVEL_ACCESS_FIELD_NUMBER; + hash = (53 * hash) + getUniformBucketLevelAccess().hashCode(); + } + hash = (37 * hash) + PUBLIC_ACCESS_PREVENTION_FIELD_NUMBER; + hash = (53 * hash) + getPublicAccessPrevention().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IamConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.IamConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Bucket restriction options.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IamConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.IamConfig) + com.google.storage.v2.Bucket.IamConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IamConfig.class, + com.google.storage.v2.Bucket.IamConfig.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.IamConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetUniformBucketLevelAccessFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + uniformBucketLevelAccess_ = null; + if (uniformBucketLevelAccessBuilder_ != null) { + uniformBucketLevelAccessBuilder_.dispose(); + uniformBucketLevelAccessBuilder_ = null; + } + publicAccessPrevention_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IamConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.IamConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig build() { + com.google.storage.v2.Bucket.IamConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig buildPartial() { + com.google.storage.v2.Bucket.IamConfig result = + new com.google.storage.v2.Bucket.IamConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.IamConfig result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.uniformBucketLevelAccess_ = + uniformBucketLevelAccessBuilder_ == null + ? uniformBucketLevelAccess_ + : uniformBucketLevelAccessBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.publicAccessPrevention_ = publicAccessPrevention_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.IamConfig) { + return mergeFrom((com.google.storage.v2.Bucket.IamConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.IamConfig other) { + if (other == com.google.storage.v2.Bucket.IamConfig.getDefaultInstance()) return this; + if (other.hasUniformBucketLevelAccess()) { + mergeUniformBucketLevelAccess(other.getUniformBucketLevelAccess()); + } + if (!other.getPublicAccessPrevention().isEmpty()) { + publicAccessPrevention_ = other.publicAccessPrevention_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetUniformBucketLevelAccessFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + publicAccessPrevention_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + uniformBucketLevelAccess_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder> + uniformBucketLevelAccessBuilder_; + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uniformBucketLevelAccess field is set. + */ + public boolean hasUniformBucketLevelAccess() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uniformBucketLevelAccess. + */ + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + getUniformBucketLevelAccess() { + if (uniformBucketLevelAccessBuilder_ == null) { + return uniformBucketLevelAccess_ == null + ? com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.getDefaultInstance() + : uniformBucketLevelAccess_; + } else { + return uniformBucketLevelAccessBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUniformBucketLevelAccess( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess value) { + if (uniformBucketLevelAccessBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + uniformBucketLevelAccess_ = value; + } else { + uniformBucketLevelAccessBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUniformBucketLevelAccess( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder builderForValue) { + if (uniformBucketLevelAccessBuilder_ == null) { + uniformBucketLevelAccess_ = builderForValue.build(); + } else { + uniformBucketLevelAccessBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUniformBucketLevelAccess( + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess value) { + if (uniformBucketLevelAccessBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && uniformBucketLevelAccess_ != null + && uniformBucketLevelAccess_ + != com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + .getDefaultInstance()) { + getUniformBucketLevelAccessBuilder().mergeFrom(value); + } else { + uniformBucketLevelAccess_ = value; + } + } else { + uniformBucketLevelAccessBuilder_.mergeFrom(value); + } + if (uniformBucketLevelAccess_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUniformBucketLevelAccess() { + bitField0_ = (bitField0_ & ~0x00000001); + uniformBucketLevelAccess_ = null; + if (uniformBucketLevelAccessBuilder_ != null) { + uniformBucketLevelAccessBuilder_.dispose(); + uniformBucketLevelAccessBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder + getUniformBucketLevelAccessBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetUniformBucketLevelAccessFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder + getUniformBucketLevelAccessOrBuilder() { + if (uniformBucketLevelAccessBuilder_ != null) { + return uniformBucketLevelAccessBuilder_.getMessageOrBuilder(); + } else { + return uniformBucketLevelAccess_ == null + ? com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.getDefaultInstance() + : uniformBucketLevelAccess_; + } + } + + /** + * + * + *
+       * Optional. Bucket restriction options currently enforced on the bucket.
+       * 
+ * + * + * .google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess uniform_bucket_level_access = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder> + internalGetUniformBucketLevelAccessFieldBuilder() { + if (uniformBucketLevelAccessBuilder_ == null) { + uniformBucketLevelAccessBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.Builder, + com.google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccessOrBuilder>( + getUniformBucketLevelAccess(), getParentForChildren(), isClean()); + uniformBucketLevelAccess_ = null; + } + return uniformBucketLevelAccessBuilder_; + } + + private java.lang.Object publicAccessPrevention_ = ""; + + /** + * + * + *
+       * Optional. Whether IAM enforces public access prevention. Valid values are
+       * `enforced` or `inherited`.
+       * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The publicAccessPrevention. + */ + public java.lang.String getPublicAccessPrevention() { + java.lang.Object ref = publicAccessPrevention_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + publicAccessPrevention_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. Whether IAM enforces public access prevention. Valid values are
+       * `enforced` or `inherited`.
+       * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for publicAccessPrevention. + */ + public com.google.protobuf.ByteString getPublicAccessPreventionBytes() { + java.lang.Object ref = publicAccessPrevention_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + publicAccessPrevention_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. Whether IAM enforces public access prevention. Valid values are
+       * `enforced` or `inherited`.
+       * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The publicAccessPrevention to set. + * @return This builder for chaining. + */ + public Builder setPublicAccessPrevention(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + publicAccessPrevention_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Whether IAM enforces public access prevention. Valid values are
+       * `enforced` or `inherited`.
+       * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPublicAccessPrevention() { + publicAccessPrevention_ = getDefaultInstance().getPublicAccessPrevention(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Whether IAM enforces public access prevention. Valid values are
+       * `enforced` or `inherited`.
+       * 
+ * + * string public_access_prevention = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for publicAccessPrevention to set. + * @return This builder for chaining. + */ + public Builder setPublicAccessPreventionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + publicAccessPrevention_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.IamConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.IamConfig) + private static final com.google.storage.v2.Bucket.IamConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.IamConfig(); + } + + public static com.google.storage.v2.Bucket.IamConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IamConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface LifecycleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Lifecycle) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getRuleList(); + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Lifecycle.Rule getRule(int index); + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getRuleCount(); + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getRuleOrBuilderList(); + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder getRuleOrBuilder(int index); + } + + /** + * + * + *
+   * Lifecycle properties of a bucket.
+   * For more information, see [Object Lifecycle
+   * Management](https://cloud.google.com/storage/docs/lifecycle).
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle} + */ + public static final class Lifecycle extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Lifecycle) + LifecycleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Lifecycle"); + } + + // Use Lifecycle.newBuilder() to construct. + private Lifecycle(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Lifecycle() { + rule_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.class, + com.google.storage.v2.Bucket.Lifecycle.Builder.class); + } + + public interface RuleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Lifecycle.Rule) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the action field is set. + */ + boolean hasAction(); + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The action. + */ + com.google.storage.v2.Bucket.Lifecycle.Rule.Action getAction(); + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder getActionOrBuilder(); + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the condition field is set. + */ + boolean hasCondition(); + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The condition. + */ + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getCondition(); + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder getConditionOrBuilder(); + } + + /** + * + * + *
+     * A lifecycle Rule, combining an action to take on an object and a
+     * condition which triggers that action.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule} + */ + public static final class Rule extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Lifecycle.Rule) + RuleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Rule"); + } + + // Use Rule.newBuilder() to construct. + private Rule(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Rule() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Builder.class); + } + + public interface ActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Lifecycle.Rule.Action) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Optional. Type of the action. Currently, only `Delete`,
+         * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+         * supported.
+         * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
+         * Optional. Type of the action. Currently, only `Delete`,
+         * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+         * supported.
+         * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); + + /** + * + * + *
+         * Optional. Target storage class. Required iff the type of the action
+         * is SetStorageClass.
+         * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + java.lang.String getStorageClass(); + + /** + * + * + *
+         * Optional. Target storage class. Required iff the type of the action
+         * is SetStorageClass.
+         * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + com.google.protobuf.ByteString getStorageClassBytes(); + } + + /** + * + * + *
+       * An action to take on an object.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule.Action} + */ + public static final class Action extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Lifecycle.Rule.Action) + ActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Action"); + } + + // Use Action.newBuilder() to construct. + private Action(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Action() { + type_ = ""; + storageClass_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder.class); + } + + public static final int TYPE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
+         * Optional. Type of the action. Currently, only `Delete`,
+         * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+         * supported.
+         * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
+         * Optional. Type of the action. Currently, only `Delete`,
+         * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+         * supported.
+         * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STORAGE_CLASS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object storageClass_ = ""; + + /** + * + * + *
+         * Optional. Target storage class. Required iff the type of the action
+         * is SetStorageClass.
+         * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + @java.lang.Override + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } + } + + /** + * + * + *
+         * Optional. Target storage class. Required iff the type of the action
+         * is SetStorageClass.
+         * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, storageClass_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, storageClass_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Lifecycle.Rule.Action)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Lifecycle.Rule.Action other = + (com.google.storage.v2.Bucket.Lifecycle.Rule.Action) obj; + + if (!getType().equals(other.getType())) return false; + if (!getStorageClass().equals(other.getStorageClass())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (37 * hash) + STORAGE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getStorageClass().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.Lifecycle.Rule.Action prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * An action to take on an object.
+         * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule.Action} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Lifecycle.Rule.Action) + com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Lifecycle.Rule.Action.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + type_ = ""; + storageClass_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action build() { + com.google.storage.v2.Bucket.Lifecycle.Rule.Action result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action buildPartial() { + com.google.storage.v2.Bucket.Lifecycle.Rule.Action result = + new com.google.storage.v2.Bucket.Lifecycle.Rule.Action(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Lifecycle.Rule.Action result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.storageClass_ = storageClass_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Lifecycle.Rule.Action) { + return mergeFrom((com.google.storage.v2.Bucket.Lifecycle.Rule.Action) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Lifecycle.Rule.Action other) { + if (other == com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance()) + return this; + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getStorageClass().isEmpty()) { + storageClass_ = other.storageClass_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + storageClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object type_ = ""; + + /** + * + * + *
+           * Optional. Type of the action. Currently, only `Delete`,
+           * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+           * supported.
+           * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+           * Optional. Type of the action. Currently, only `Delete`,
+           * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+           * supported.
+           * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+           * Optional. Type of the action. Currently, only `Delete`,
+           * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+           * supported.
+           * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Type of the action. Currently, only `Delete`,
+           * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+           * supported.
+           * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Type of the action. Currently, only `Delete`,
+           * `SetStorageClass`, and `AbortIncompleteMultipartUpload` are
+           * supported.
+           * 
+ * + * string type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object storageClass_ = ""; + + /** + * + * + *
+           * Optional. Target storage class. Required iff the type of the action
+           * is SetStorageClass.
+           * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+           * Optional. Target storage class. Required iff the type of the action
+           * is SetStorageClass.
+           * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+           * Optional. Target storage class. Required iff the type of the action
+           * is SetStorageClass.
+           * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + storageClass_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Target storage class. Required iff the type of the action
+           * is SetStorageClass.
+           * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearStorageClass() { + storageClass_ = getDefaultInstance().getStorageClass(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Target storage class. Required iff the type of the action
+           * is SetStorageClass.
+           * 
+ * + * string storage_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + storageClass_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Lifecycle.Rule.Action) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Lifecycle.Rule.Action) + private static final com.google.storage.v2.Bucket.Lifecycle.Rule.Action DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Lifecycle.Rule.Action(); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Action getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Action parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ConditionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Lifecycle.Rule.Condition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Age of an object (in days). This condition is satisfied when an
+         * object reaches the specified age.
+         * A value of 0 indicates that all objects immediately match this
+         * condition.
+         * 
+ * + * optional int32 age_days = 1; + * + * @return Whether the ageDays field is set. + */ + boolean hasAgeDays(); + + /** + * + * + *
+         * Age of an object (in days). This condition is satisfied when an
+         * object reaches the specified age.
+         * A value of 0 indicates that all objects immediately match this
+         * condition.
+         * 
+ * + * optional int32 age_days = 1; + * + * @return The ageDays. + */ + int getAgeDays(); + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the createdBefore field is set. + */ + boolean hasCreatedBefore(); + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The createdBefore. + */ + com.google.type.Date getCreatedBefore(); + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.type.DateOrBuilder getCreatedBeforeOrBuilder(); + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is
+         * `true`, this condition matches live objects; if the value
+         * is `false`, it matches archived objects.
+         * 
+ * + * optional bool is_live = 3; + * + * @return Whether the isLive field is set. + */ + boolean hasIsLive(); + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is
+         * `true`, this condition matches live objects; if the value
+         * is `false`, it matches archived objects.
+         * 
+ * + * optional bool is_live = 3; + * + * @return The isLive. + */ + boolean getIsLive(); + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is N, this
+         * condition is satisfied when there are at least N versions (including
+         * the live version) newer than this version of the object.
+         * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return Whether the numNewerVersions field is set. + */ + boolean hasNumNewerVersions(); + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is N, this
+         * condition is satisfied when there are at least N versions (including
+         * the live version) newer than this version of the object.
+         * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return The numNewerVersions. + */ + int getNumNewerVersions(); + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesStorageClass. + */ + java.util.List getMatchesStorageClassList(); + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesStorageClass. + */ + int getMatchesStorageClassCount(); + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesStorageClass at the given index. + */ + java.lang.String getMatchesStorageClass(int index); + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesStorageClass at the given index. + */ + com.google.protobuf.ByteString getMatchesStorageClassBytes(int index); + + /** + * + * + *
+         * Number of days that have elapsed since the custom timestamp set on an
+         * object.
+         * The value of the field must be a nonnegative integer.
+         * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return Whether the daysSinceCustomTime field is set. + */ + boolean hasDaysSinceCustomTime(); + + /** + * + * + *
+         * Number of days that have elapsed since the custom timestamp set on an
+         * object.
+         * The value of the field must be a nonnegative integer.
+         * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return The daysSinceCustomTime. + */ + int getDaysSinceCustomTime(); + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTimeBefore field is set. + */ + boolean hasCustomTimeBefore(); + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTimeBefore. + */ + com.google.type.Date getCustomTimeBefore(); + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.type.DateOrBuilder getCustomTimeBeforeOrBuilder(); + + /** + * + * + *
+         * This condition is relevant only for versioned objects. An object
+         * version satisfies this condition only if these many days have been
+         * passed since it became noncurrent. The value of the field must be a
+         * nonnegative integer. If it's zero, the object version becomes
+         * eligible for Lifecycle action as soon as it becomes noncurrent.
+         * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return Whether the daysSinceNoncurrentTime field is set. + */ + boolean hasDaysSinceNoncurrentTime(); + + /** + * + * + *
+         * This condition is relevant only for versioned objects. An object
+         * version satisfies this condition only if these many days have been
+         * passed since it became noncurrent. The value of the field must be a
+         * nonnegative integer. If it's zero, the object version becomes
+         * eligible for Lifecycle action as soon as it becomes noncurrent.
+         * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return The daysSinceNoncurrentTime. + */ + int getDaysSinceNoncurrentTime(); + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the noncurrentTimeBefore field is set. + */ + boolean hasNoncurrentTimeBefore(); + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The noncurrentTimeBefore. + */ + com.google.type.Date getNoncurrentTimeBefore(); + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.type.DateOrBuilder getNoncurrentTimeBeforeOrBuilder(); + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesPrefix. + */ + java.util.List getMatchesPrefixList(); + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesPrefix. + */ + int getMatchesPrefixCount(); + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesPrefix at the given index. + */ + java.lang.String getMatchesPrefix(int index); + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesPrefix at the given index. + */ + com.google.protobuf.ByteString getMatchesPrefixBytes(int index); + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesSuffix. + */ + java.util.List getMatchesSuffixList(); + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesSuffix. + */ + int getMatchesSuffixCount(); + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesSuffix at the given index. + */ + java.lang.String getMatchesSuffix(int index); + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesSuffix at the given index. + */ + com.google.protobuf.ByteString getMatchesSuffixBytes(int index); + } + + /** + * + * + *
+       * A condition of an object which triggers some action.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule.Condition} + */ + public static final class Condition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Lifecycle.Rule.Condition) + ConditionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Condition"); + } + + // Use Condition.newBuilder() to construct. + private Condition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Condition() { + matchesStorageClass_ = com.google.protobuf.LazyStringArrayList.emptyList(); + matchesPrefix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + matchesSuffix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder.class); + } + + private int bitField0_; + public static final int AGE_DAYS_FIELD_NUMBER = 1; + private int ageDays_ = 0; + + /** + * + * + *
+         * Age of an object (in days). This condition is satisfied when an
+         * object reaches the specified age.
+         * A value of 0 indicates that all objects immediately match this
+         * condition.
+         * 
+ * + * optional int32 age_days = 1; + * + * @return Whether the ageDays field is set. + */ + @java.lang.Override + public boolean hasAgeDays() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Age of an object (in days). This condition is satisfied when an
+         * object reaches the specified age.
+         * A value of 0 indicates that all objects immediately match this
+         * condition.
+         * 
+ * + * optional int32 age_days = 1; + * + * @return The ageDays. + */ + @java.lang.Override + public int getAgeDays() { + return ageDays_; + } + + public static final int CREATED_BEFORE_FIELD_NUMBER = 2; + private com.google.type.Date createdBefore_; + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the createdBefore field is set. + */ + @java.lang.Override + public boolean hasCreatedBefore() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The createdBefore. + */ + @java.lang.Override + public com.google.type.Date getCreatedBefore() { + return createdBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : createdBefore_; + } + + /** + * + * + *
+         * Optional. This condition is satisfied when an object is created
+         * before midnight of the specified date in UTC.
+         * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.type.DateOrBuilder getCreatedBeforeOrBuilder() { + return createdBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : createdBefore_; + } + + public static final int IS_LIVE_FIELD_NUMBER = 3; + private boolean isLive_ = false; + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is
+         * `true`, this condition matches live objects; if the value
+         * is `false`, it matches archived objects.
+         * 
+ * + * optional bool is_live = 3; + * + * @return Whether the isLive field is set. + */ + @java.lang.Override + public boolean hasIsLive() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is
+         * `true`, this condition matches live objects; if the value
+         * is `false`, it matches archived objects.
+         * 
+ * + * optional bool is_live = 3; + * + * @return The isLive. + */ + @java.lang.Override + public boolean getIsLive() { + return isLive_; + } + + public static final int NUM_NEWER_VERSIONS_FIELD_NUMBER = 4; + private int numNewerVersions_ = 0; + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is N, this
+         * condition is satisfied when there are at least N versions (including
+         * the live version) newer than this version of the object.
+         * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return Whether the numNewerVersions field is set. + */ + @java.lang.Override + public boolean hasNumNewerVersions() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+         * Relevant only for versioned objects. If the value is N, this
+         * condition is satisfied when there are at least N versions (including
+         * the live version) newer than this version of the object.
+         * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return The numNewerVersions. + */ + @java.lang.Override + public int getNumNewerVersions() { + return numNewerVersions_; + } + + public static final int MATCHES_STORAGE_CLASS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList matchesStorageClass_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesStorageClass. + */ + public com.google.protobuf.ProtocolStringList getMatchesStorageClassList() { + return matchesStorageClass_; + } + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesStorageClass. + */ + public int getMatchesStorageClassCount() { + return matchesStorageClass_.size(); + } + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesStorageClass at the given index. + */ + public java.lang.String getMatchesStorageClass(int index) { + return matchesStorageClass_.get(index); + } + + /** + * + * + *
+         * Optional. Objects having any of the storage classes specified by this
+         * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+         * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+         * `DURABLE_REDUCED_AVAILABILITY`.
+         * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesStorageClass at the given index. + */ + public com.google.protobuf.ByteString getMatchesStorageClassBytes(int index) { + return matchesStorageClass_.getByteString(index); + } + + public static final int DAYS_SINCE_CUSTOM_TIME_FIELD_NUMBER = 7; + private int daysSinceCustomTime_ = 0; + + /** + * + * + *
+         * Number of days that have elapsed since the custom timestamp set on an
+         * object.
+         * The value of the field must be a nonnegative integer.
+         * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return Whether the daysSinceCustomTime field is set. + */ + @java.lang.Override + public boolean hasDaysSinceCustomTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+         * Number of days that have elapsed since the custom timestamp set on an
+         * object.
+         * The value of the field must be a nonnegative integer.
+         * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return The daysSinceCustomTime. + */ + @java.lang.Override + public int getDaysSinceCustomTime() { + return daysSinceCustomTime_; + } + + public static final int CUSTOM_TIME_BEFORE_FIELD_NUMBER = 8; + private com.google.type.Date customTimeBefore_; + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTimeBefore field is set. + */ + @java.lang.Override + public boolean hasCustomTimeBefore() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTimeBefore. + */ + @java.lang.Override + public com.google.type.Date getCustomTimeBefore() { + return customTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : customTimeBefore_; + } + + /** + * + * + *
+         * Optional. An object matches this condition if the custom timestamp
+         * set on the object is before the specified date in UTC.
+         * 
+ * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.type.DateOrBuilder getCustomTimeBeforeOrBuilder() { + return customTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : customTimeBefore_; + } + + public static final int DAYS_SINCE_NONCURRENT_TIME_FIELD_NUMBER = 9; + private int daysSinceNoncurrentTime_ = 0; + + /** + * + * + *
+         * This condition is relevant only for versioned objects. An object
+         * version satisfies this condition only if these many days have been
+         * passed since it became noncurrent. The value of the field must be a
+         * nonnegative integer. If it's zero, the object version becomes
+         * eligible for Lifecycle action as soon as it becomes noncurrent.
+         * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return Whether the daysSinceNoncurrentTime field is set. + */ + @java.lang.Override + public boolean hasDaysSinceNoncurrentTime() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+         * This condition is relevant only for versioned objects. An object
+         * version satisfies this condition only if these many days have been
+         * passed since it became noncurrent. The value of the field must be a
+         * nonnegative integer. If it's zero, the object version becomes
+         * eligible for Lifecycle action as soon as it becomes noncurrent.
+         * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return The daysSinceNoncurrentTime. + */ + @java.lang.Override + public int getDaysSinceNoncurrentTime() { + return daysSinceNoncurrentTime_; + } + + public static final int NONCURRENT_TIME_BEFORE_FIELD_NUMBER = 10; + private com.google.type.Date noncurrentTimeBefore_; + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the noncurrentTimeBefore field is set. + */ + @java.lang.Override + public boolean hasNoncurrentTimeBefore() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The noncurrentTimeBefore. + */ + @java.lang.Override + public com.google.type.Date getNoncurrentTimeBefore() { + return noncurrentTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : noncurrentTimeBefore_; + } + + /** + * + * + *
+         * Optional. This condition is relevant only for versioned objects. An
+         * object version satisfies this condition only if it became noncurrent
+         * before the specified date in UTC.
+         * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.type.DateOrBuilder getNoncurrentTimeBeforeOrBuilder() { + return noncurrentTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : noncurrentTimeBefore_; + } + + public static final int MATCHES_PREFIX_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList matchesPrefix_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesPrefix. + */ + public com.google.protobuf.ProtocolStringList getMatchesPrefixList() { + return matchesPrefix_; + } + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesPrefix. + */ + public int getMatchesPrefixCount() { + return matchesPrefix_.size(); + } + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesPrefix at the given index. + */ + public java.lang.String getMatchesPrefix(int index) { + return matchesPrefix_.get(index); + } + + /** + * + * + *
+         * Optional. List of object name prefixes. If any prefix exactly matches
+         * the beginning of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesPrefix at the given index. + */ + public com.google.protobuf.ByteString getMatchesPrefixBytes(int index) { + return matchesPrefix_.getByteString(index); + } + + public static final int MATCHES_SUFFIX_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList matchesSuffix_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesSuffix. + */ + public com.google.protobuf.ProtocolStringList getMatchesSuffixList() { + return matchesSuffix_; + } + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesSuffix. + */ + public int getMatchesSuffixCount() { + return matchesSuffix_.size(); + } + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesSuffix at the given index. + */ + public java.lang.String getMatchesSuffix(int index) { + return matchesSuffix_.get(index); + } + + /** + * + * + *
+         * Optional. List of object name suffixes. If any suffix exactly matches
+         * the end of the object name, the condition evaluates to true.
+         * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesSuffix at the given index. + */ + public com.google.protobuf.ByteString getMatchesSuffixBytes(int index) { + return matchesSuffix_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt32(1, ageDays_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getCreatedBefore()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeBool(3, isLive_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt32(4, numNewerVersions_); + } + for (int i = 0; i < matchesStorageClass_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString( + output, 5, matchesStorageClass_.getRaw(i)); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt32(7, daysSinceCustomTime_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(8, getCustomTimeBefore()); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeInt32(9, daysSinceNoncurrentTime_); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeMessage(10, getNoncurrentTimeBefore()); + } + for (int i = 0; i < matchesPrefix_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, matchesPrefix_.getRaw(i)); + } + for (int i = 0; i < matchesSuffix_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 12, matchesSuffix_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, ageDays_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreatedBefore()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, isLive_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, numNewerVersions_); + } + { + int dataSize = 0; + for (int i = 0; i < matchesStorageClass_.size(); i++) { + dataSize += computeStringSizeNoTag(matchesStorageClass_.getRaw(i)); + } + size += dataSize; + size += 1 * getMatchesStorageClassList().size(); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, daysSinceCustomTime_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(8, getCustomTimeBefore()); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size(9, daysSinceNoncurrentTime_); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, getNoncurrentTimeBefore()); + } + { + int dataSize = 0; + for (int i = 0; i < matchesPrefix_.size(); i++) { + dataSize += computeStringSizeNoTag(matchesPrefix_.getRaw(i)); + } + size += dataSize; + size += 1 * getMatchesPrefixList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < matchesSuffix_.size(); i++) { + dataSize += computeStringSizeNoTag(matchesSuffix_.getRaw(i)); + } + size += dataSize; + size += 1 * getMatchesSuffixList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Lifecycle.Rule.Condition)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition other = + (com.google.storage.v2.Bucket.Lifecycle.Rule.Condition) obj; + + if (hasAgeDays() != other.hasAgeDays()) return false; + if (hasAgeDays()) { + if (getAgeDays() != other.getAgeDays()) return false; + } + if (hasCreatedBefore() != other.hasCreatedBefore()) return false; + if (hasCreatedBefore()) { + if (!getCreatedBefore().equals(other.getCreatedBefore())) return false; + } + if (hasIsLive() != other.hasIsLive()) return false; + if (hasIsLive()) { + if (getIsLive() != other.getIsLive()) return false; + } + if (hasNumNewerVersions() != other.hasNumNewerVersions()) return false; + if (hasNumNewerVersions()) { + if (getNumNewerVersions() != other.getNumNewerVersions()) return false; + } + if (!getMatchesStorageClassList().equals(other.getMatchesStorageClassList())) + return false; + if (hasDaysSinceCustomTime() != other.hasDaysSinceCustomTime()) return false; + if (hasDaysSinceCustomTime()) { + if (getDaysSinceCustomTime() != other.getDaysSinceCustomTime()) return false; + } + if (hasCustomTimeBefore() != other.hasCustomTimeBefore()) return false; + if (hasCustomTimeBefore()) { + if (!getCustomTimeBefore().equals(other.getCustomTimeBefore())) return false; + } + if (hasDaysSinceNoncurrentTime() != other.hasDaysSinceNoncurrentTime()) return false; + if (hasDaysSinceNoncurrentTime()) { + if (getDaysSinceNoncurrentTime() != other.getDaysSinceNoncurrentTime()) return false; + } + if (hasNoncurrentTimeBefore() != other.hasNoncurrentTimeBefore()) return false; + if (hasNoncurrentTimeBefore()) { + if (!getNoncurrentTimeBefore().equals(other.getNoncurrentTimeBefore())) return false; + } + if (!getMatchesPrefixList().equals(other.getMatchesPrefixList())) return false; + if (!getMatchesSuffixList().equals(other.getMatchesSuffixList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAgeDays()) { + hash = (37 * hash) + AGE_DAYS_FIELD_NUMBER; + hash = (53 * hash) + getAgeDays(); + } + if (hasCreatedBefore()) { + hash = (37 * hash) + CREATED_BEFORE_FIELD_NUMBER; + hash = (53 * hash) + getCreatedBefore().hashCode(); + } + if (hasIsLive()) { + hash = (37 * hash) + IS_LIVE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsLive()); + } + if (hasNumNewerVersions()) { + hash = (37 * hash) + NUM_NEWER_VERSIONS_FIELD_NUMBER; + hash = (53 * hash) + getNumNewerVersions(); + } + if (getMatchesStorageClassCount() > 0) { + hash = (37 * hash) + MATCHES_STORAGE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getMatchesStorageClassList().hashCode(); + } + if (hasDaysSinceCustomTime()) { + hash = (37 * hash) + DAYS_SINCE_CUSTOM_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDaysSinceCustomTime(); + } + if (hasCustomTimeBefore()) { + hash = (37 * hash) + CUSTOM_TIME_BEFORE_FIELD_NUMBER; + hash = (53 * hash) + getCustomTimeBefore().hashCode(); + } + if (hasDaysSinceNoncurrentTime()) { + hash = (37 * hash) + DAYS_SINCE_NONCURRENT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDaysSinceNoncurrentTime(); + } + if (hasNoncurrentTimeBefore()) { + hash = (37 * hash) + NONCURRENT_TIME_BEFORE_FIELD_NUMBER; + hash = (53 * hash) + getNoncurrentTimeBefore().hashCode(); + } + if (getMatchesPrefixCount() > 0) { + hash = (37 * hash) + MATCHES_PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getMatchesPrefixList().hashCode(); + } + if (getMatchesSuffixCount() > 0) { + hash = (37 * hash) + MATCHES_SUFFIX_FIELD_NUMBER; + hash = (53 * hash) + getMatchesSuffixList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * A condition of an object which triggers some action.
+         * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule.Condition} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Lifecycle.Rule.Condition) + com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreatedBeforeFieldBuilder(); + internalGetCustomTimeBeforeFieldBuilder(); + internalGetNoncurrentTimeBeforeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + ageDays_ = 0; + createdBefore_ = null; + if (createdBeforeBuilder_ != null) { + createdBeforeBuilder_.dispose(); + createdBeforeBuilder_ = null; + } + isLive_ = false; + numNewerVersions_ = 0; + matchesStorageClass_ = com.google.protobuf.LazyStringArrayList.emptyList(); + daysSinceCustomTime_ = 0; + customTimeBefore_ = null; + if (customTimeBeforeBuilder_ != null) { + customTimeBeforeBuilder_.dispose(); + customTimeBeforeBuilder_ = null; + } + daysSinceNoncurrentTime_ = 0; + noncurrentTimeBefore_ = null; + if (noncurrentTimeBeforeBuilder_ != null) { + noncurrentTimeBeforeBuilder_.dispose(); + noncurrentTimeBeforeBuilder_ = null; + } + matchesPrefix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + matchesSuffix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition build() { + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition buildPartial() { + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition result = + new com.google.storage.v2.Bucket.Lifecycle.Rule.Condition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Lifecycle.Rule.Condition result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.ageDays_ = ageDays_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createdBefore_ = + createdBeforeBuilder_ == null ? createdBefore_ : createdBeforeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.isLive_ = isLive_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.numNewerVersions_ = numNewerVersions_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + matchesStorageClass_.makeImmutable(); + result.matchesStorageClass_ = matchesStorageClass_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.daysSinceCustomTime_ = daysSinceCustomTime_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.customTimeBefore_ = + customTimeBeforeBuilder_ == null + ? customTimeBefore_ + : customTimeBeforeBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.daysSinceNoncurrentTime_ = daysSinceNoncurrentTime_; + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.noncurrentTimeBefore_ = + noncurrentTimeBeforeBuilder_ == null + ? noncurrentTimeBefore_ + : noncurrentTimeBeforeBuilder_.build(); + to_bitField0_ |= 0x00000080; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + matchesPrefix_.makeImmutable(); + result.matchesPrefix_ = matchesPrefix_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + matchesSuffix_.makeImmutable(); + result.matchesSuffix_ = matchesSuffix_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Lifecycle.Rule.Condition) { + return mergeFrom((com.google.storage.v2.Bucket.Lifecycle.Rule.Condition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Lifecycle.Rule.Condition other) { + if (other == com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance()) + return this; + if (other.hasAgeDays()) { + setAgeDays(other.getAgeDays()); + } + if (other.hasCreatedBefore()) { + mergeCreatedBefore(other.getCreatedBefore()); + } + if (other.hasIsLive()) { + setIsLive(other.getIsLive()); + } + if (other.hasNumNewerVersions()) { + setNumNewerVersions(other.getNumNewerVersions()); + } + if (!other.matchesStorageClass_.isEmpty()) { + if (matchesStorageClass_.isEmpty()) { + matchesStorageClass_ = other.matchesStorageClass_; + bitField0_ |= 0x00000010; + } else { + ensureMatchesStorageClassIsMutable(); + matchesStorageClass_.addAll(other.matchesStorageClass_); + } + onChanged(); + } + if (other.hasDaysSinceCustomTime()) { + setDaysSinceCustomTime(other.getDaysSinceCustomTime()); + } + if (other.hasCustomTimeBefore()) { + mergeCustomTimeBefore(other.getCustomTimeBefore()); + } + if (other.hasDaysSinceNoncurrentTime()) { + setDaysSinceNoncurrentTime(other.getDaysSinceNoncurrentTime()); + } + if (other.hasNoncurrentTimeBefore()) { + mergeNoncurrentTimeBefore(other.getNoncurrentTimeBefore()); + } + if (!other.matchesPrefix_.isEmpty()) { + if (matchesPrefix_.isEmpty()) { + matchesPrefix_ = other.matchesPrefix_; + bitField0_ |= 0x00000200; + } else { + ensureMatchesPrefixIsMutable(); + matchesPrefix_.addAll(other.matchesPrefix_); + } + onChanged(); + } + if (!other.matchesSuffix_.isEmpty()) { + if (matchesSuffix_.isEmpty()) { + matchesSuffix_ = other.matchesSuffix_; + bitField0_ |= 0x00000400; + } else { + ensureMatchesSuffixIsMutable(); + matchesSuffix_.addAll(other.matchesSuffix_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + ageDays_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetCreatedBeforeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + isLive_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + numNewerVersions_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureMatchesStorageClassIsMutable(); + matchesStorageClass_.add(s); + break; + } // case 42 + case 56: + { + daysSinceCustomTime_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCustomTimeBeforeFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 66 + case 72: + { + daysSinceNoncurrentTime_ = input.readInt32(); + bitField0_ |= 0x00000080; + break; + } // case 72 + case 82: + { + input.readMessage( + internalGetNoncurrentTimeBeforeFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 82 + case 90: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureMatchesPrefixIsMutable(); + matchesPrefix_.add(s); + break; + } // case 90 + case 98: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureMatchesSuffixIsMutable(); + matchesSuffix_.add(s); + break; + } // case 98 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int ageDays_; + + /** + * + * + *
+           * Age of an object (in days). This condition is satisfied when an
+           * object reaches the specified age.
+           * A value of 0 indicates that all objects immediately match this
+           * condition.
+           * 
+ * + * optional int32 age_days = 1; + * + * @return Whether the ageDays field is set. + */ + @java.lang.Override + public boolean hasAgeDays() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+           * Age of an object (in days). This condition is satisfied when an
+           * object reaches the specified age.
+           * A value of 0 indicates that all objects immediately match this
+           * condition.
+           * 
+ * + * optional int32 age_days = 1; + * + * @return The ageDays. + */ + @java.lang.Override + public int getAgeDays() { + return ageDays_; + } + + /** + * + * + *
+           * Age of an object (in days). This condition is satisfied when an
+           * object reaches the specified age.
+           * A value of 0 indicates that all objects immediately match this
+           * condition.
+           * 
+ * + * optional int32 age_days = 1; + * + * @param value The ageDays to set. + * @return This builder for chaining. + */ + public Builder setAgeDays(int value) { + + ageDays_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+           * Age of an object (in days). This condition is satisfied when an
+           * object reaches the specified age.
+           * A value of 0 indicates that all objects immediately match this
+           * condition.
+           * 
+ * + * optional int32 age_days = 1; + * + * @return This builder for chaining. + */ + public Builder clearAgeDays() { + bitField0_ = (bitField0_ & ~0x00000001); + ageDays_ = 0; + onChanged(); + return this; + } + + private com.google.type.Date createdBefore_; + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + createdBeforeBuilder_; + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the createdBefore field is set. + */ + public boolean hasCreatedBefore() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The createdBefore. + */ + public com.google.type.Date getCreatedBefore() { + if (createdBeforeBuilder_ == null) { + return createdBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : createdBefore_; + } else { + return createdBeforeBuilder_.getMessage(); + } + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCreatedBefore(com.google.type.Date value) { + if (createdBeforeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createdBefore_ = value; + } else { + createdBeforeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCreatedBefore(com.google.type.Date.Builder builderForValue) { + if (createdBeforeBuilder_ == null) { + createdBefore_ = builderForValue.build(); + } else { + createdBeforeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCreatedBefore(com.google.type.Date value) { + if (createdBeforeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && createdBefore_ != null + && createdBefore_ != com.google.type.Date.getDefaultInstance()) { + getCreatedBeforeBuilder().mergeFrom(value); + } else { + createdBefore_ = value; + } + } else { + createdBeforeBuilder_.mergeFrom(value); + } + if (createdBefore_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCreatedBefore() { + bitField0_ = (bitField0_ & ~0x00000002); + createdBefore_ = null; + if (createdBeforeBuilder_ != null) { + createdBeforeBuilder_.dispose(); + createdBeforeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.Date.Builder getCreatedBeforeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCreatedBeforeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.DateOrBuilder getCreatedBeforeOrBuilder() { + if (createdBeforeBuilder_ != null) { + return createdBeforeBuilder_.getMessageOrBuilder(); + } else { + return createdBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : createdBefore_; + } + } + + /** + * + * + *
+           * Optional. This condition is satisfied when an object is created
+           * before midnight of the specified date in UTC.
+           * 
+ * + * .google.type.Date created_before = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + internalGetCreatedBeforeFieldBuilder() { + if (createdBeforeBuilder_ == null) { + createdBeforeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, + com.google.type.Date.Builder, + com.google.type.DateOrBuilder>( + getCreatedBefore(), getParentForChildren(), isClean()); + createdBefore_ = null; + } + return createdBeforeBuilder_; + } + + private boolean isLive_; + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is
+           * `true`, this condition matches live objects; if the value
+           * is `false`, it matches archived objects.
+           * 
+ * + * optional bool is_live = 3; + * + * @return Whether the isLive field is set. + */ + @java.lang.Override + public boolean hasIsLive() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is
+           * `true`, this condition matches live objects; if the value
+           * is `false`, it matches archived objects.
+           * 
+ * + * optional bool is_live = 3; + * + * @return The isLive. + */ + @java.lang.Override + public boolean getIsLive() { + return isLive_; + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is
+           * `true`, this condition matches live objects; if the value
+           * is `false`, it matches archived objects.
+           * 
+ * + * optional bool is_live = 3; + * + * @param value The isLive to set. + * @return This builder for chaining. + */ + public Builder setIsLive(boolean value) { + + isLive_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is
+           * `true`, this condition matches live objects; if the value
+           * is `false`, it matches archived objects.
+           * 
+ * + * optional bool is_live = 3; + * + * @return This builder for chaining. + */ + public Builder clearIsLive() { + bitField0_ = (bitField0_ & ~0x00000004); + isLive_ = false; + onChanged(); + return this; + } + + private int numNewerVersions_; + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is N, this
+           * condition is satisfied when there are at least N versions (including
+           * the live version) newer than this version of the object.
+           * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return Whether the numNewerVersions field is set. + */ + @java.lang.Override + public boolean hasNumNewerVersions() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is N, this
+           * condition is satisfied when there are at least N versions (including
+           * the live version) newer than this version of the object.
+           * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return The numNewerVersions. + */ + @java.lang.Override + public int getNumNewerVersions() { + return numNewerVersions_; + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is N, this
+           * condition is satisfied when there are at least N versions (including
+           * the live version) newer than this version of the object.
+           * 
+ * + * optional int32 num_newer_versions = 4; + * + * @param value The numNewerVersions to set. + * @return This builder for chaining. + */ + public Builder setNumNewerVersions(int value) { + + numNewerVersions_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+           * Relevant only for versioned objects. If the value is N, this
+           * condition is satisfied when there are at least N versions (including
+           * the live version) newer than this version of the object.
+           * 
+ * + * optional int32 num_newer_versions = 4; + * + * @return This builder for chaining. + */ + public Builder clearNumNewerVersions() { + bitField0_ = (bitField0_ & ~0x00000008); + numNewerVersions_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList matchesStorageClass_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureMatchesStorageClassIsMutable() { + if (!matchesStorageClass_.isModifiable()) { + matchesStorageClass_ = + new com.google.protobuf.LazyStringArrayList(matchesStorageClass_); + } + bitField0_ |= 0x00000010; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesStorageClass. + */ + public com.google.protobuf.ProtocolStringList getMatchesStorageClassList() { + matchesStorageClass_.makeImmutable(); + return matchesStorageClass_; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesStorageClass. + */ + public int getMatchesStorageClassCount() { + return matchesStorageClass_.size(); + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesStorageClass at the given index. + */ + public java.lang.String getMatchesStorageClass(int index) { + return matchesStorageClass_.get(index); + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesStorageClass at the given index. + */ + public com.google.protobuf.ByteString getMatchesStorageClassBytes(int index) { + return matchesStorageClass_.getByteString(index); + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The matchesStorageClass to set. + * @return This builder for chaining. + */ + public Builder setMatchesStorageClass(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesStorageClassIsMutable(); + matchesStorageClass_.set(index, value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The matchesStorageClass to add. + * @return This builder for chaining. + */ + public Builder addMatchesStorageClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesStorageClassIsMutable(); + matchesStorageClass_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The matchesStorageClass to add. + * @return This builder for chaining. + */ + public Builder addAllMatchesStorageClass(java.lang.Iterable values) { + ensureMatchesStorageClassIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, matchesStorageClass_); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMatchesStorageClass() { + matchesStorageClass_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + ; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. Objects having any of the storage classes specified by this
+           * condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+           * `NEARLINE`, `COLDLINE`, `STANDARD`, and
+           * `DURABLE_REDUCED_AVAILABILITY`.
+           * 
+ * + * + * repeated string matches_storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the matchesStorageClass to add. + * @return This builder for chaining. + */ + public Builder addMatchesStorageClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureMatchesStorageClassIsMutable(); + matchesStorageClass_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private int daysSinceCustomTime_; + + /** + * + * + *
+           * Number of days that have elapsed since the custom timestamp set on an
+           * object.
+           * The value of the field must be a nonnegative integer.
+           * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return Whether the daysSinceCustomTime field is set. + */ + @java.lang.Override + public boolean hasDaysSinceCustomTime() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+           * Number of days that have elapsed since the custom timestamp set on an
+           * object.
+           * The value of the field must be a nonnegative integer.
+           * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return The daysSinceCustomTime. + */ + @java.lang.Override + public int getDaysSinceCustomTime() { + return daysSinceCustomTime_; + } + + /** + * + * + *
+           * Number of days that have elapsed since the custom timestamp set on an
+           * object.
+           * The value of the field must be a nonnegative integer.
+           * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @param value The daysSinceCustomTime to set. + * @return This builder for chaining. + */ + public Builder setDaysSinceCustomTime(int value) { + + daysSinceCustomTime_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+           * Number of days that have elapsed since the custom timestamp set on an
+           * object.
+           * The value of the field must be a nonnegative integer.
+           * 
+ * + * optional int32 days_since_custom_time = 7; + * + * @return This builder for chaining. + */ + public Builder clearDaysSinceCustomTime() { + bitField0_ = (bitField0_ & ~0x00000020); + daysSinceCustomTime_ = 0; + onChanged(); + return this; + } + + private com.google.type.Date customTimeBefore_; + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + customTimeBeforeBuilder_; + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTimeBefore field is set. + */ + public boolean hasCustomTimeBefore() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTimeBefore. + */ + public com.google.type.Date getCustomTimeBefore() { + if (customTimeBeforeBuilder_ == null) { + return customTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : customTimeBefore_; + } else { + return customTimeBeforeBuilder_.getMessage(); + } + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomTimeBefore(com.google.type.Date value) { + if (customTimeBeforeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customTimeBefore_ = value; + } else { + customTimeBeforeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomTimeBefore(com.google.type.Date.Builder builderForValue) { + if (customTimeBeforeBuilder_ == null) { + customTimeBefore_ = builderForValue.build(); + } else { + customTimeBeforeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomTimeBefore(com.google.type.Date value) { + if (customTimeBeforeBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && customTimeBefore_ != null + && customTimeBefore_ != com.google.type.Date.getDefaultInstance()) { + getCustomTimeBeforeBuilder().mergeFrom(value); + } else { + customTimeBefore_ = value; + } + } else { + customTimeBeforeBuilder_.mergeFrom(value); + } + if (customTimeBefore_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomTimeBefore() { + bitField0_ = (bitField0_ & ~0x00000040); + customTimeBefore_ = null; + if (customTimeBeforeBuilder_ != null) { + customTimeBeforeBuilder_.dispose(); + customTimeBeforeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.Date.Builder getCustomTimeBeforeBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetCustomTimeBeforeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.DateOrBuilder getCustomTimeBeforeOrBuilder() { + if (customTimeBeforeBuilder_ != null) { + return customTimeBeforeBuilder_.getMessageOrBuilder(); + } else { + return customTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : customTimeBefore_; + } + } + + /** + * + * + *
+           * Optional. An object matches this condition if the custom timestamp
+           * set on the object is before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date custom_time_before = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + internalGetCustomTimeBeforeFieldBuilder() { + if (customTimeBeforeBuilder_ == null) { + customTimeBeforeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, + com.google.type.Date.Builder, + com.google.type.DateOrBuilder>( + getCustomTimeBefore(), getParentForChildren(), isClean()); + customTimeBefore_ = null; + } + return customTimeBeforeBuilder_; + } + + private int daysSinceNoncurrentTime_; + + /** + * + * + *
+           * This condition is relevant only for versioned objects. An object
+           * version satisfies this condition only if these many days have been
+           * passed since it became noncurrent. The value of the field must be a
+           * nonnegative integer. If it's zero, the object version becomes
+           * eligible for Lifecycle action as soon as it becomes noncurrent.
+           * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return Whether the daysSinceNoncurrentTime field is set. + */ + @java.lang.Override + public boolean hasDaysSinceNoncurrentTime() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+           * This condition is relevant only for versioned objects. An object
+           * version satisfies this condition only if these many days have been
+           * passed since it became noncurrent. The value of the field must be a
+           * nonnegative integer. If it's zero, the object version becomes
+           * eligible for Lifecycle action as soon as it becomes noncurrent.
+           * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return The daysSinceNoncurrentTime. + */ + @java.lang.Override + public int getDaysSinceNoncurrentTime() { + return daysSinceNoncurrentTime_; + } + + /** + * + * + *
+           * This condition is relevant only for versioned objects. An object
+           * version satisfies this condition only if these many days have been
+           * passed since it became noncurrent. The value of the field must be a
+           * nonnegative integer. If it's zero, the object version becomes
+           * eligible for Lifecycle action as soon as it becomes noncurrent.
+           * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @param value The daysSinceNoncurrentTime to set. + * @return This builder for chaining. + */ + public Builder setDaysSinceNoncurrentTime(int value) { + + daysSinceNoncurrentTime_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+           * This condition is relevant only for versioned objects. An object
+           * version satisfies this condition only if these many days have been
+           * passed since it became noncurrent. The value of the field must be a
+           * nonnegative integer. If it's zero, the object version becomes
+           * eligible for Lifecycle action as soon as it becomes noncurrent.
+           * 
+ * + * optional int32 days_since_noncurrent_time = 9; + * + * @return This builder for chaining. + */ + public Builder clearDaysSinceNoncurrentTime() { + bitField0_ = (bitField0_ & ~0x00000080); + daysSinceNoncurrentTime_ = 0; + onChanged(); + return this; + } + + private com.google.type.Date noncurrentTimeBefore_; + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + noncurrentTimeBeforeBuilder_; + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the noncurrentTimeBefore field is set. + */ + public boolean hasNoncurrentTimeBefore() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The noncurrentTimeBefore. + */ + public com.google.type.Date getNoncurrentTimeBefore() { + if (noncurrentTimeBeforeBuilder_ == null) { + return noncurrentTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : noncurrentTimeBefore_; + } else { + return noncurrentTimeBeforeBuilder_.getMessage(); + } + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNoncurrentTimeBefore(com.google.type.Date value) { + if (noncurrentTimeBeforeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + noncurrentTimeBefore_ = value; + } else { + noncurrentTimeBeforeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNoncurrentTimeBefore(com.google.type.Date.Builder builderForValue) { + if (noncurrentTimeBeforeBuilder_ == null) { + noncurrentTimeBefore_ = builderForValue.build(); + } else { + noncurrentTimeBeforeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeNoncurrentTimeBefore(com.google.type.Date value) { + if (noncurrentTimeBeforeBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && noncurrentTimeBefore_ != null + && noncurrentTimeBefore_ != com.google.type.Date.getDefaultInstance()) { + getNoncurrentTimeBeforeBuilder().mergeFrom(value); + } else { + noncurrentTimeBefore_ = value; + } + } else { + noncurrentTimeBeforeBuilder_.mergeFrom(value); + } + if (noncurrentTimeBefore_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearNoncurrentTimeBefore() { + bitField0_ = (bitField0_ & ~0x00000100); + noncurrentTimeBefore_ = null; + if (noncurrentTimeBeforeBuilder_ != null) { + noncurrentTimeBeforeBuilder_.dispose(); + noncurrentTimeBeforeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.Date.Builder getNoncurrentTimeBeforeBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetNoncurrentTimeBeforeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.type.DateOrBuilder getNoncurrentTimeBeforeOrBuilder() { + if (noncurrentTimeBeforeBuilder_ != null) { + return noncurrentTimeBeforeBuilder_.getMessageOrBuilder(); + } else { + return noncurrentTimeBefore_ == null + ? com.google.type.Date.getDefaultInstance() + : noncurrentTimeBefore_; + } + } + + /** + * + * + *
+           * Optional. This condition is relevant only for versioned objects. An
+           * object version satisfies this condition only if it became noncurrent
+           * before the specified date in UTC.
+           * 
+ * + * + * .google.type.Date noncurrent_time_before = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, com.google.type.Date.Builder, com.google.type.DateOrBuilder> + internalGetNoncurrentTimeBeforeFieldBuilder() { + if (noncurrentTimeBeforeBuilder_ == null) { + noncurrentTimeBeforeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.type.Date, + com.google.type.Date.Builder, + com.google.type.DateOrBuilder>( + getNoncurrentTimeBefore(), getParentForChildren(), isClean()); + noncurrentTimeBefore_ = null; + } + return noncurrentTimeBeforeBuilder_; + } + + private com.google.protobuf.LazyStringArrayList matchesPrefix_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureMatchesPrefixIsMutable() { + if (!matchesPrefix_.isModifiable()) { + matchesPrefix_ = new com.google.protobuf.LazyStringArrayList(matchesPrefix_); + } + bitField0_ |= 0x00000200; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesPrefix. + */ + public com.google.protobuf.ProtocolStringList getMatchesPrefixList() { + matchesPrefix_.makeImmutable(); + return matchesPrefix_; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesPrefix. + */ + public int getMatchesPrefixCount() { + return matchesPrefix_.size(); + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesPrefix at the given index. + */ + public java.lang.String getMatchesPrefix(int index) { + return matchesPrefix_.get(index); + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesPrefix at the given index. + */ + public com.google.protobuf.ByteString getMatchesPrefixBytes(int index) { + return matchesPrefix_.getByteString(index); + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The matchesPrefix to set. + * @return This builder for chaining. + */ + public Builder setMatchesPrefix(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesPrefixIsMutable(); + matchesPrefix_.set(index, value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The matchesPrefix to add. + * @return This builder for chaining. + */ + public Builder addMatchesPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesPrefixIsMutable(); + matchesPrefix_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The matchesPrefix to add. + * @return This builder for chaining. + */ + public Builder addAllMatchesPrefix(java.lang.Iterable values) { + ensureMatchesPrefixIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, matchesPrefix_); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMatchesPrefix() { + matchesPrefix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + ; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name prefixes. If any prefix exactly matches
+           * the beginning of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_prefix = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the matchesPrefix to add. + * @return This builder for chaining. + */ + public Builder addMatchesPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureMatchesPrefixIsMutable(); + matchesPrefix_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList matchesSuffix_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureMatchesSuffixIsMutable() { + if (!matchesSuffix_.isModifiable()) { + matchesSuffix_ = new com.google.protobuf.LazyStringArrayList(matchesSuffix_); + } + bitField0_ |= 0x00000400; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the matchesSuffix. + */ + public com.google.protobuf.ProtocolStringList getMatchesSuffixList() { + matchesSuffix_.makeImmutable(); + return matchesSuffix_; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of matchesSuffix. + */ + public int getMatchesSuffixCount() { + return matchesSuffix_.size(); + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The matchesSuffix at the given index. + */ + public java.lang.String getMatchesSuffix(int index) { + return matchesSuffix_.get(index); + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the matchesSuffix at the given index. + */ + public com.google.protobuf.ByteString getMatchesSuffixBytes(int index) { + return matchesSuffix_.getByteString(index); + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The matchesSuffix to set. + * @return This builder for chaining. + */ + public Builder setMatchesSuffix(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesSuffixIsMutable(); + matchesSuffix_.set(index, value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The matchesSuffix to add. + * @return This builder for chaining. + */ + public Builder addMatchesSuffix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchesSuffixIsMutable(); + matchesSuffix_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The matchesSuffix to add. + * @return This builder for chaining. + */ + public Builder addAllMatchesSuffix(java.lang.Iterable values) { + ensureMatchesSuffixIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, matchesSuffix_); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMatchesSuffix() { + matchesSuffix_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + ; + onChanged(); + return this; + } + + /** + * + * + *
+           * Optional. List of object name suffixes. If any suffix exactly matches
+           * the end of the object name, the condition evaluates to true.
+           * 
+ * + * repeated string matches_suffix = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the matchesSuffix to add. + * @return This builder for chaining. + */ + public Builder addMatchesSuffixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureMatchesSuffixIsMutable(); + matchesSuffix_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Lifecycle.Rule.Condition) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Lifecycle.Rule.Condition) + private static final com.google.storage.v2.Bucket.Lifecycle.Rule.Condition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Lifecycle.Rule.Condition(); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Condition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int ACTION_FIELD_NUMBER = 1; + private com.google.storage.v2.Bucket.Lifecycle.Rule.Action action_; + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the action field is set. + */ + @java.lang.Override + public boolean hasAction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The action. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action getAction() { + return action_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance() + : action_; + } + + /** + * + * + *
+       * Optional. The action to take.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder getActionOrBuilder() { + return action_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance() + : action_; + } + + public static final int CONDITION_FIELD_NUMBER = 2; + private com.google.storage.v2.Bucket.Lifecycle.Rule.Condition condition_; + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the condition field is set. + */ + @java.lang.Override + public boolean hasCondition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The condition. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getCondition() { + return condition_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance() + : condition_; + } + + /** + * + * + *
+       * Optional. The condition under which the action is taken.
+       * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder + getConditionOrBuilder() { + return condition_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance() + : condition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAction()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getCondition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAction()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCondition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Lifecycle.Rule)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Lifecycle.Rule other = + (com.google.storage.v2.Bucket.Lifecycle.Rule) obj; + + if (hasAction() != other.hasAction()) return false; + if (hasAction()) { + if (!getAction().equals(other.getAction())) return false; + } + if (hasCondition() != other.hasCondition()) return false; + if (hasCondition()) { + if (!getCondition().equals(other.getCondition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAction()) { + hash = (37 * hash) + ACTION_FIELD_NUMBER; + hash = (53 * hash) + getAction().hashCode(); + } + if (hasCondition()) { + hash = (37 * hash) + CONDITION_FIELD_NUMBER; + hash = (53 * hash) + getCondition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Lifecycle.Rule prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * A lifecycle Rule, combining an action to take on an object and a
+       * condition which triggers that action.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle.Rule} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Lifecycle.Rule) + com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.Rule.class, + com.google.storage.v2.Bucket.Lifecycle.Rule.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Lifecycle.Rule.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetActionFieldBuilder(); + internalGetConditionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + action_ = null; + if (actionBuilder_ != null) { + actionBuilder_.dispose(); + actionBuilder_ = null; + } + condition_ = null; + if (conditionBuilder_ != null) { + conditionBuilder_.dispose(); + conditionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Lifecycle.Rule.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule build() { + com.google.storage.v2.Bucket.Lifecycle.Rule result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule buildPartial() { + com.google.storage.v2.Bucket.Lifecycle.Rule result = + new com.google.storage.v2.Bucket.Lifecycle.Rule(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Lifecycle.Rule result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.action_ = actionBuilder_ == null ? action_ : actionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.condition_ = conditionBuilder_ == null ? condition_ : conditionBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Lifecycle.Rule) { + return mergeFrom((com.google.storage.v2.Bucket.Lifecycle.Rule) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Lifecycle.Rule other) { + if (other == com.google.storage.v2.Bucket.Lifecycle.Rule.getDefaultInstance()) + return this; + if (other.hasAction()) { + mergeAction(other.getAction()); + } + if (other.hasCondition()) { + mergeCondition(other.getCondition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetActionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetConditionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Bucket.Lifecycle.Rule.Action action_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Action, + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder> + actionBuilder_; + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the action field is set. + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The action. + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action getAction() { + if (actionBuilder_ == null) { + return action_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance() + : action_; + } else { + return actionBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAction(com.google.storage.v2.Bucket.Lifecycle.Rule.Action value) { + if (actionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + } else { + actionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAction( + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder builderForValue) { + if (actionBuilder_ == null) { + action_ = builderForValue.build(); + } else { + actionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAction(com.google.storage.v2.Bucket.Lifecycle.Rule.Action value) { + if (actionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && action_ != null + && action_ + != com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance()) { + getActionBuilder().mergeFrom(value); + } else { + action_ = value; + } + } else { + actionBuilder_.mergeFrom(value); + } + if (action_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAction() { + bitField0_ = (bitField0_ & ~0x00000001); + action_ = null; + if (actionBuilder_ != null) { + actionBuilder_.dispose(); + actionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder getActionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetActionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder getActionOrBuilder() { + if (actionBuilder_ != null) { + return actionBuilder_.getMessageOrBuilder(); + } else { + return action_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Action.getDefaultInstance() + : action_; + } + } + + /** + * + * + *
+         * Optional. The action to take.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Action action = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Action, + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder> + internalGetActionFieldBuilder() { + if (actionBuilder_ == null) { + actionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Action, + com.google.storage.v2.Bucket.Lifecycle.Rule.Action.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ActionOrBuilder>( + getAction(), getParentForChildren(), isClean()); + action_ = null; + } + return actionBuilder_; + } + + private com.google.storage.v2.Bucket.Lifecycle.Rule.Condition condition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition, + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder> + conditionBuilder_; + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the condition field is set. + */ + public boolean hasCondition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The condition. + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition getCondition() { + if (conditionBuilder_ == null) { + return condition_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance() + : condition_; + } else { + return conditionBuilder_.getMessage(); + } + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCondition(com.google.storage.v2.Bucket.Lifecycle.Rule.Condition value) { + if (conditionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + condition_ = value; + } else { + conditionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCondition( + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder builderForValue) { + if (conditionBuilder_ == null) { + condition_ = builderForValue.build(); + } else { + conditionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCondition(com.google.storage.v2.Bucket.Lifecycle.Rule.Condition value) { + if (conditionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && condition_ != null + && condition_ + != com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance()) { + getConditionBuilder().mergeFrom(value); + } else { + condition_ = value; + } + } else { + conditionBuilder_.mergeFrom(value); + } + if (condition_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCondition() { + bitField0_ = (bitField0_ & ~0x00000002); + condition_ = null; + if (conditionBuilder_ != null) { + conditionBuilder_.dispose(); + conditionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder getConditionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetConditionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder + getConditionOrBuilder() { + if (conditionBuilder_ != null) { + return conditionBuilder_.getMessageOrBuilder(); + } else { + return condition_ == null + ? com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.getDefaultInstance() + : condition_; + } + } + + /** + * + * + *
+         * Optional. The condition under which the action is taken.
+         * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle.Rule.Condition condition = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition, + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder> + internalGetConditionFieldBuilder() { + if (conditionBuilder_ == null) { + conditionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition, + com.google.storage.v2.Bucket.Lifecycle.Rule.Condition.Builder, + com.google.storage.v2.Bucket.Lifecycle.Rule.ConditionOrBuilder>( + getCondition(), getParentForChildren(), isClean()); + condition_ = null; + } + return conditionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Lifecycle.Rule) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Lifecycle.Rule) + private static final com.google.storage.v2.Bucket.Lifecycle.Rule DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Lifecycle.Rule(); + } + + public static com.google.storage.v2.Bucket.Lifecycle.Rule getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Rule parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int RULE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List rule_; + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getRuleList() { + return rule_; + } + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getRuleOrBuilderList() { + return rule_; + } + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getRuleCount() { + return rule_.size(); + } + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.Rule getRule(int index) { + return rule_.get(index); + } + + /** + * + * + *
+     * Optional. A lifecycle management rule, which is made of an action to take
+     * and the condition under which the action is taken.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder getRuleOrBuilder(int index) { + return rule_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < rule_.size(); i++) { + output.writeMessage(1, rule_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < rule_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, rule_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Lifecycle)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Lifecycle other = (com.google.storage.v2.Bucket.Lifecycle) obj; + + if (!getRuleList().equals(other.getRuleList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getRuleCount() > 0) { + hash = (37 * hash) + RULE_FIELD_NUMBER; + hash = (53 * hash) + getRuleList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Lifecycle parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Lifecycle prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Lifecycle properties of a bucket.
+     * For more information, see [Object Lifecycle
+     * Management](https://cloud.google.com/storage/docs/lifecycle).
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Lifecycle} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Lifecycle) + com.google.storage.v2.Bucket.LifecycleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Lifecycle.class, + com.google.storage.v2.Bucket.Lifecycle.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Lifecycle.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (ruleBuilder_ == null) { + rule_ = java.util.Collections.emptyList(); + } else { + rule_ = null; + ruleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Lifecycle_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle build() { + com.google.storage.v2.Bucket.Lifecycle result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle buildPartial() { + com.google.storage.v2.Bucket.Lifecycle result = + new com.google.storage.v2.Bucket.Lifecycle(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.Bucket.Lifecycle result) { + if (ruleBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + rule_ = java.util.Collections.unmodifiableList(rule_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.rule_ = rule_; + } else { + result.rule_ = ruleBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.Bucket.Lifecycle result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Lifecycle) { + return mergeFrom((com.google.storage.v2.Bucket.Lifecycle) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Lifecycle other) { + if (other == com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance()) return this; + if (ruleBuilder_ == null) { + if (!other.rule_.isEmpty()) { + if (rule_.isEmpty()) { + rule_ = other.rule_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRuleIsMutable(); + rule_.addAll(other.rule_); + } + onChanged(); + } + } else { + if (!other.rule_.isEmpty()) { + if (ruleBuilder_.isEmpty()) { + ruleBuilder_.dispose(); + ruleBuilder_ = null; + rule_ = other.rule_; + bitField0_ = (bitField0_ & ~0x00000001); + ruleBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRuleFieldBuilder() + : null; + } else { + ruleBuilder_.addAllMessages(other.rule_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.v2.Bucket.Lifecycle.Rule m = + input.readMessage( + com.google.storage.v2.Bucket.Lifecycle.Rule.parser(), extensionRegistry); + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + rule_.add(m); + } else { + ruleBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List rule_ = + java.util.Collections.emptyList(); + + private void ensureRuleIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + rule_ = new java.util.ArrayList(rule_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule, + com.google.storage.v2.Bucket.Lifecycle.Rule.Builder, + com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder> + ruleBuilder_; + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getRuleList() { + if (ruleBuilder_ == null) { + return java.util.Collections.unmodifiableList(rule_); + } else { + return ruleBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getRuleCount() { + if (ruleBuilder_ == null) { + return rule_.size(); + } else { + return ruleBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule getRule(int index) { + if (ruleBuilder_ == null) { + return rule_.get(index); + } else { + return ruleBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRule(int index, com.google.storage.v2.Bucket.Lifecycle.Rule value) { + if (ruleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRuleIsMutable(); + rule_.set(index, value); + onChanged(); + } else { + ruleBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRule( + int index, com.google.storage.v2.Bucket.Lifecycle.Rule.Builder builderForValue) { + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + rule_.set(index, builderForValue.build()); + onChanged(); + } else { + ruleBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addRule(com.google.storage.v2.Bucket.Lifecycle.Rule value) { + if (ruleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRuleIsMutable(); + rule_.add(value); + onChanged(); + } else { + ruleBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addRule(int index, com.google.storage.v2.Bucket.Lifecycle.Rule value) { + if (ruleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRuleIsMutable(); + rule_.add(index, value); + onChanged(); + } else { + ruleBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addRule(com.google.storage.v2.Bucket.Lifecycle.Rule.Builder builderForValue) { + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + rule_.add(builderForValue.build()); + onChanged(); + } else { + ruleBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addRule( + int index, com.google.storage.v2.Bucket.Lifecycle.Rule.Builder builderForValue) { + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + rule_.add(index, builderForValue.build()); + onChanged(); + } else { + ruleBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllRule( + java.lang.Iterable values) { + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, rule_); + onChanged(); + } else { + ruleBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRule() { + if (ruleBuilder_ == null) { + rule_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + ruleBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeRule(int index) { + if (ruleBuilder_ == null) { + ensureRuleIsMutable(); + rule_.remove(index); + onChanged(); + } else { + ruleBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Builder getRuleBuilder(int index) { + return internalGetRuleFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder getRuleOrBuilder(int index) { + if (ruleBuilder_ == null) { + return rule_.get(index); + } else { + return ruleBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getRuleOrBuilderList() { + if (ruleBuilder_ != null) { + return ruleBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rule_); + } + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Builder addRuleBuilder() { + return internalGetRuleFieldBuilder() + .addBuilder(com.google.storage.v2.Bucket.Lifecycle.Rule.getDefaultInstance()); + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Rule.Builder addRuleBuilder(int index) { + return internalGetRuleFieldBuilder() + .addBuilder(index, com.google.storage.v2.Bucket.Lifecycle.Rule.getDefaultInstance()); + } + + /** + * + * + *
+       * Optional. A lifecycle management rule, which is made of an action to take
+       * and the condition under which the action is taken.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.Lifecycle.Rule rule = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getRuleBuilderList() { + return internalGetRuleFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule, + com.google.storage.v2.Bucket.Lifecycle.Rule.Builder, + com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder> + internalGetRuleFieldBuilder() { + if (ruleBuilder_ == null) { + ruleBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle.Rule, + com.google.storage.v2.Bucket.Lifecycle.Rule.Builder, + com.google.storage.v2.Bucket.Lifecycle.RuleOrBuilder>( + rule_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + rule_ = null; + } + return ruleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Lifecycle) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Lifecycle) + private static final com.google.storage.v2.Bucket.Lifecycle DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Lifecycle(); + } + + public static com.google.storage.v2.Bucket.Lifecycle getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Lifecycle parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface LoggingOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Logging) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The destination bucket where the current bucket's logs should
+     * be placed, using path format (like `projects/123456/buckets/foo`).
+     * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logBucket. + */ + java.lang.String getLogBucket(); + + /** + * + * + *
+     * Optional. The destination bucket where the current bucket's logs should
+     * be placed, using path format (like `projects/123456/buckets/foo`).
+     * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logBucket. + */ + com.google.protobuf.ByteString getLogBucketBytes(); + + /** + * + * + *
+     * Optional. A prefix for log object names.
+     * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logObjectPrefix. + */ + java.lang.String getLogObjectPrefix(); + + /** + * + * + *
+     * Optional. A prefix for log object names.
+     * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logObjectPrefix. + */ + com.google.protobuf.ByteString getLogObjectPrefixBytes(); + } + + /** + * + * + *
+   * Logging-related properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Logging} + */ + public static final class Logging extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Logging) + LoggingOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Logging"); + } + + // Use Logging.newBuilder() to construct. + private Logging(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Logging() { + logBucket_ = ""; + logObjectPrefix_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Logging_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Logging_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Logging.class, + com.google.storage.v2.Bucket.Logging.Builder.class); + } + + public static final int LOG_BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object logBucket_ = ""; + + /** + * + * + *
+     * Optional. The destination bucket where the current bucket's logs should
+     * be placed, using path format (like `projects/123456/buckets/foo`).
+     * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logBucket. + */ + @java.lang.Override + public java.lang.String getLogBucket() { + java.lang.Object ref = logBucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + logBucket_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. The destination bucket where the current bucket's logs should
+     * be placed, using path format (like `projects/123456/buckets/foo`).
+     * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logBucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLogBucketBytes() { + java.lang.Object ref = logBucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + logBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOG_OBJECT_PREFIX_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object logObjectPrefix_ = ""; + + /** + * + * + *
+     * Optional. A prefix for log object names.
+     * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logObjectPrefix. + */ + @java.lang.Override + public java.lang.String getLogObjectPrefix() { + java.lang.Object ref = logObjectPrefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + logObjectPrefix_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. A prefix for log object names.
+     * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logObjectPrefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLogObjectPrefixBytes() { + java.lang.Object ref = logObjectPrefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + logObjectPrefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(logBucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, logBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(logObjectPrefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, logObjectPrefix_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(logBucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, logBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(logObjectPrefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, logObjectPrefix_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Logging)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Logging other = (com.google.storage.v2.Bucket.Logging) obj; + + if (!getLogBucket().equals(other.getLogBucket())) return false; + if (!getLogObjectPrefix().equals(other.getLogObjectPrefix())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOG_BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getLogBucket().hashCode(); + hash = (37 * hash) + LOG_OBJECT_PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getLogObjectPrefix().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Logging parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Logging parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Logging parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Logging parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Logging prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Logging-related properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Logging} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Logging) + com.google.storage.v2.Bucket.LoggingOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Logging_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Logging_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Logging.class, + com.google.storage.v2.Bucket.Logging.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Logging.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + logBucket_ = ""; + logObjectPrefix_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Logging_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Logging getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Logging.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Logging build() { + com.google.storage.v2.Bucket.Logging result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Logging buildPartial() { + com.google.storage.v2.Bucket.Logging result = + new com.google.storage.v2.Bucket.Logging(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Logging result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.logBucket_ = logBucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.logObjectPrefix_ = logObjectPrefix_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Logging) { + return mergeFrom((com.google.storage.v2.Bucket.Logging) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Logging other) { + if (other == com.google.storage.v2.Bucket.Logging.getDefaultInstance()) return this; + if (!other.getLogBucket().isEmpty()) { + logBucket_ = other.logBucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getLogObjectPrefix().isEmpty()) { + logObjectPrefix_ = other.logObjectPrefix_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + logBucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + logObjectPrefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object logBucket_ = ""; + + /** + * + * + *
+       * Optional. The destination bucket where the current bucket's logs should
+       * be placed, using path format (like `projects/123456/buckets/foo`).
+       * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logBucket. + */ + public java.lang.String getLogBucket() { + java.lang.Object ref = logBucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + logBucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. The destination bucket where the current bucket's logs should
+       * be placed, using path format (like `projects/123456/buckets/foo`).
+       * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logBucket. + */ + public com.google.protobuf.ByteString getLogBucketBytes() { + java.lang.Object ref = logBucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + logBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. The destination bucket where the current bucket's logs should
+       * be placed, using path format (like `projects/123456/buckets/foo`).
+       * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The logBucket to set. + * @return This builder for chaining. + */ + public Builder setLogBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + logBucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The destination bucket where the current bucket's logs should
+       * be placed, using path format (like `projects/123456/buckets/foo`).
+       * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLogBucket() { + logBucket_ = getDefaultInstance().getLogBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The destination bucket where the current bucket's logs should
+       * be placed, using path format (like `projects/123456/buckets/foo`).
+       * 
+ * + * string log_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for logBucket to set. + * @return This builder for chaining. + */ + public Builder setLogBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + logBucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object logObjectPrefix_ = ""; + + /** + * + * + *
+       * Optional. A prefix for log object names.
+       * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The logObjectPrefix. + */ + public java.lang.String getLogObjectPrefix() { + java.lang.Object ref = logObjectPrefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + logObjectPrefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. A prefix for log object names.
+       * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for logObjectPrefix. + */ + public com.google.protobuf.ByteString getLogObjectPrefixBytes() { + java.lang.Object ref = logObjectPrefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + logObjectPrefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. A prefix for log object names.
+       * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The logObjectPrefix to set. + * @return This builder for chaining. + */ + public Builder setLogObjectPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + logObjectPrefix_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. A prefix for log object names.
+       * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLogObjectPrefix() { + logObjectPrefix_ = getDefaultInstance().getLogObjectPrefix(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. A prefix for log object names.
+       * 
+ * + * string log_object_prefix = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for logObjectPrefix to set. + * @return This builder for chaining. + */ + public Builder setLogObjectPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + logObjectPrefix_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Logging) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Logging) + private static final com.google.storage.v2.Bucket.Logging DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Logging(); + } + + public static com.google.storage.v2.Bucket.Logging getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Logging parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Logging getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ObjectRetentionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.ObjectRetention) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Output only. If true, object retention is enabled for the
+     * bucket.
+     * 
+ * + * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enabled. + */ + boolean getEnabled(); + } + + /** + * + * + *
+   * Object Retention related properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.ObjectRetention} + */ + public static final class ObjectRetention extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.ObjectRetention) + ObjectRetentionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectRetention"); + } + + // Use ObjectRetention.newBuilder() to construct. + private ObjectRetention(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectRetention() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_ObjectRetention_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.ObjectRetention.class, + com.google.storage.v2.Bucket.ObjectRetention.Builder.class); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+     * Optional. Output only. If true, object retention is enabled for the
+     * bucket.
+     * 
+ * + * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.ObjectRetention)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.ObjectRetention other = + (com.google.storage.v2.Bucket.ObjectRetention) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.ObjectRetention parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.ObjectRetention prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Object Retention related properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.ObjectRetention} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.ObjectRetention) + com.google.storage.v2.Bucket.ObjectRetentionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_ObjectRetention_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.ObjectRetention.class, + com.google.storage.v2.Bucket.ObjectRetention.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.ObjectRetention.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetention getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetention build() { + com.google.storage.v2.Bucket.ObjectRetention result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetention buildPartial() { + com.google.storage.v2.Bucket.ObjectRetention result = + new com.google.storage.v2.Bucket.ObjectRetention(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.ObjectRetention result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.ObjectRetention) { + return mergeFrom((com.google.storage.v2.Bucket.ObjectRetention) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.ObjectRetention other) { + if (other == com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance()) return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+       * Optional. Output only. If true, object retention is enabled for the
+       * bucket.
+       * 
+ * + * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+       * Optional. Output only. If true, object retention is enabled for the
+       * bucket.
+       * 
+ * + * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Output only. If true, object retention is enabled for the
+       * bucket.
+       * 
+ * + * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.ObjectRetention) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.ObjectRetention) + private static final com.google.storage.v2.Bucket.ObjectRetention DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.ObjectRetention(); + } + + public static com.google.storage.v2.Bucket.ObjectRetention getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectRetention parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetention getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface RetentionPolicyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.RetentionPolicy) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the effectiveTime field is set. + */ + boolean hasEffectiveTime(); + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The effectiveTime. + */ + com.google.protobuf.Timestamp getEffectiveTime(); + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder(); + + /** + * + * + *
+     * Optional. Once locked, an object retention policy cannot be modified.
+     * 
+ * + * bool is_locked = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The isLocked. + */ + boolean getIsLocked(); + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + boolean hasRetentionDuration(); + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + com.google.protobuf.Duration getRetentionDuration(); + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder(); + } + + /** + * + * + *
+   * Retention policy properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.RetentionPolicy} + */ + public static final class RetentionPolicy extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.RetentionPolicy) + RetentionPolicyOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RetentionPolicy"); + } + + // Use RetentionPolicy.newBuilder() to construct. + private RetentionPolicy(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RetentionPolicy() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_RetentionPolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.RetentionPolicy.class, + com.google.storage.v2.Bucket.RetentionPolicy.Builder.class); + } + + private int bitField0_; + public static final int EFFECTIVE_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp effectiveTime_; + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the effectiveTime field is set. + */ + @java.lang.Override + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The effectiveTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEffectiveTime() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + /** + * + * + *
+     * Optional. Server-determined value that indicates the time from which
+     * policy was enforced and effective.
+     * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + public static final int IS_LOCKED_FIELD_NUMBER = 2; + private boolean isLocked_ = false; + + /** + * + * + *
+     * Optional. Once locked, an object retention policy cannot be modified.
+     * 
+ * + * bool is_locked = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The isLocked. + */ + @java.lang.Override + public boolean getIsLocked() { + return isLocked_; + } + + public static final int RETENTION_DURATION_FIELD_NUMBER = 4; + private com.google.protobuf.Duration retentionDuration_; + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + @java.lang.Override + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + @java.lang.Override + public com.google.protobuf.Duration getRetentionDuration() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + /** + * + * + *
+     * Optional. The duration that objects need to be retained. Retention
+     * duration must be greater than zero and less than 100 years. Note that
+     * enforcement of retention periods less than a day is not guaranteed. Such
+     * periods should only be used for testing purposes. Any `nanos` value
+     * specified is rounded down to the nearest second.
+     * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getEffectiveTime()); + } + if (isLocked_ != false) { + output.writeBool(2, isLocked_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getRetentionDuration()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getEffectiveTime()); + } + if (isLocked_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, isLocked_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getRetentionDuration()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.RetentionPolicy)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.RetentionPolicy other = + (com.google.storage.v2.Bucket.RetentionPolicy) obj; + + if (hasEffectiveTime() != other.hasEffectiveTime()) return false; + if (hasEffectiveTime()) { + if (!getEffectiveTime().equals(other.getEffectiveTime())) return false; + } + if (getIsLocked() != other.getIsLocked()) return false; + if (hasRetentionDuration() != other.hasRetentionDuration()) return false; + if (hasRetentionDuration()) { + if (!getRetentionDuration().equals(other.getRetentionDuration())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasEffectiveTime()) { + hash = (37 * hash) + EFFECTIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveTime().hashCode(); + } + hash = (37 * hash) + IS_LOCKED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsLocked()); + if (hasRetentionDuration()) { + hash = (37 * hash) + RETENTION_DURATION_FIELD_NUMBER; + hash = (53 * hash) + getRetentionDuration().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.RetentionPolicy prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Retention policy properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.RetentionPolicy} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.RetentionPolicy) + com.google.storage.v2.Bucket.RetentionPolicyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_RetentionPolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.RetentionPolicy.class, + com.google.storage.v2.Bucket.RetentionPolicy.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.RetentionPolicy.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEffectiveTimeFieldBuilder(); + internalGetRetentionDurationFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + isLocked_ = false; + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicy getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicy build() { + com.google.storage.v2.Bucket.RetentionPolicy result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicy buildPartial() { + com.google.storage.v2.Bucket.RetentionPolicy result = + new com.google.storage.v2.Bucket.RetentionPolicy(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.RetentionPolicy result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.effectiveTime_ = + effectiveTimeBuilder_ == null ? effectiveTime_ : effectiveTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.isLocked_ = isLocked_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.retentionDuration_ = + retentionDurationBuilder_ == null + ? retentionDuration_ + : retentionDurationBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.RetentionPolicy) { + return mergeFrom((com.google.storage.v2.Bucket.RetentionPolicy) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.RetentionPolicy other) { + if (other == com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance()) return this; + if (other.hasEffectiveTime()) { + mergeEffectiveTime(other.getEffectiveTime()); + } + if (other.getIsLocked() != false) { + setIsLocked(other.getIsLocked()); + } + if (other.hasRetentionDuration()) { + mergeRetentionDuration(other.getRetentionDuration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetEffectiveTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + isLocked_ = input.readBool(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 34: + { + input.readMessage( + internalGetRetentionDurationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp effectiveTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + effectiveTimeBuilder_; + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the effectiveTime field is set. + */ + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The effectiveTime. + */ + public com.google.protobuf.Timestamp getEffectiveTime() { + if (effectiveTimeBuilder_ == null) { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } else { + return effectiveTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveTime_ = value; + } else { + effectiveTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (effectiveTimeBuilder_ == null) { + effectiveTime_ = builderForValue.build(); + } else { + effectiveTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && effectiveTime_ != null + && effectiveTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEffectiveTimeBuilder().mergeFrom(value); + } else { + effectiveTime_ = value; + } + } else { + effectiveTimeBuilder_.mergeFrom(value); + } + if (effectiveTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEffectiveTime() { + bitField0_ = (bitField0_ & ~0x00000001); + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getEffectiveTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetEffectiveTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + if (effectiveTimeBuilder_ != null) { + return effectiveTimeBuilder_.getMessageOrBuilder(); + } else { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + } + + /** + * + * + *
+       * Optional. Server-determined value that indicates the time from which
+       * policy was enforced and effective.
+       * 
+ * + * + * .google.protobuf.Timestamp effective_time = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEffectiveTimeFieldBuilder() { + if (effectiveTimeBuilder_ == null) { + effectiveTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEffectiveTime(), getParentForChildren(), isClean()); + effectiveTime_ = null; + } + return effectiveTimeBuilder_; + } + + private boolean isLocked_; + + /** + * + * + *
+       * Optional. Once locked, an object retention policy cannot be modified.
+       * 
+ * + * bool is_locked = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The isLocked. + */ + @java.lang.Override + public boolean getIsLocked() { + return isLocked_; + } + + /** + * + * + *
+       * Optional. Once locked, an object retention policy cannot be modified.
+       * 
+ * + * bool is_locked = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The isLocked to set. + * @return This builder for chaining. + */ + public Builder setIsLocked(boolean value) { + + isLocked_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Once locked, an object retention policy cannot be modified.
+       * 
+ * + * bool is_locked = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearIsLocked() { + bitField0_ = (bitField0_ & ~0x00000002); + isLocked_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.Duration retentionDuration_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + retentionDurationBuilder_; + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + public com.google.protobuf.Duration getRetentionDuration() { + if (retentionDurationBuilder_ == null) { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } else { + return retentionDurationBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionDuration_ = value; + } else { + retentionDurationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration.Builder builderForValue) { + if (retentionDurationBuilder_ == null) { + retentionDuration_ = builderForValue.build(); + } else { + retentionDurationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && retentionDuration_ != null + && retentionDuration_ != com.google.protobuf.Duration.getDefaultInstance()) { + getRetentionDurationBuilder().mergeFrom(value); + } else { + retentionDuration_ = value; + } + } else { + retentionDurationBuilder_.mergeFrom(value); + } + if (retentionDuration_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetentionDuration() { + bitField0_ = (bitField0_ & ~0x00000004); + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getRetentionDurationBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetRetentionDurationFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + if (retentionDurationBuilder_ != null) { + return retentionDurationBuilder_.getMessageOrBuilder(); + } else { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + } + + /** + * + * + *
+       * Optional. The duration that objects need to be retained. Retention
+       * duration must be greater than zero and less than 100 years. Note that
+       * enforcement of retention periods less than a day is not guaranteed. Such
+       * periods should only be used for testing purposes. Any `nanos` value
+       * specified is rounded down to the nearest second.
+       * 
+ * + * + * .google.protobuf.Duration retention_duration = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetRetentionDurationFieldBuilder() { + if (retentionDurationBuilder_ == null) { + retentionDurationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getRetentionDuration(), getParentForChildren(), isClean()); + retentionDuration_ = null; + } + return retentionDurationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.RetentionPolicy) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.RetentionPolicy) + private static final com.google.storage.v2.Bucket.RetentionPolicy DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.RetentionPolicy(); + } + + public static com.google.storage.v2.Bucket.RetentionPolicy getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RetentionPolicy parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicy getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SoftDeletePolicyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.SoftDeletePolicy) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return Whether the retentionDuration field is set. + */ + boolean hasRetentionDuration(); + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return The retentionDuration. + */ + com.google.protobuf.Duration getRetentionDuration(); + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder(); + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + boolean hasEffectiveTime(); + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + com.google.protobuf.Timestamp getEffectiveTime(); + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder(); + } + + /** + * + * + *
+   * Soft delete policy properties of a bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.SoftDeletePolicy} + */ + public static final class SoftDeletePolicy extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.SoftDeletePolicy) + SoftDeletePolicyOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SoftDeletePolicy"); + } + + // Use SoftDeletePolicy.newBuilder() to construct. + private SoftDeletePolicy(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SoftDeletePolicy() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_SoftDeletePolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.SoftDeletePolicy.class, + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder.class); + } + + private int bitField0_; + public static final int RETENTION_DURATION_FIELD_NUMBER = 1; + private com.google.protobuf.Duration retentionDuration_; + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return Whether the retentionDuration field is set. + */ + @java.lang.Override + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return The retentionDuration. + */ + @java.lang.Override + public com.google.protobuf.Duration getRetentionDuration() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + /** + * + * + *
+     * The period of time that soft-deleted objects in the bucket must be
+     * retained and cannot be permanently deleted. The duration must be greater
+     * than or equal to 7 days and less than 1 year.
+     * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + public static final int EFFECTIVE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp effectiveTime_; + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + @java.lang.Override + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEffectiveTime() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + /** + * + * + *
+     * Time from which the policy was effective. This is service-provided.
+     * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getRetentionDuration()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEffectiveTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRetentionDuration()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getEffectiveTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.SoftDeletePolicy)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.SoftDeletePolicy other = + (com.google.storage.v2.Bucket.SoftDeletePolicy) obj; + + if (hasRetentionDuration() != other.hasRetentionDuration()) return false; + if (hasRetentionDuration()) { + if (!getRetentionDuration().equals(other.getRetentionDuration())) return false; + } + if (hasEffectiveTime() != other.hasEffectiveTime()) return false; + if (hasEffectiveTime()) { + if (!getEffectiveTime().equals(other.getEffectiveTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRetentionDuration()) { + hash = (37 * hash) + RETENTION_DURATION_FIELD_NUMBER; + hash = (53 * hash) + getRetentionDuration().hashCode(); + } + if (hasEffectiveTime()) { + hash = (37 * hash) + EFFECTIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEffectiveTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.SoftDeletePolicy prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Soft delete policy properties of a bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.SoftDeletePolicy} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.SoftDeletePolicy) + com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_SoftDeletePolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.SoftDeletePolicy.class, + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.SoftDeletePolicy.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRetentionDurationFieldBuilder(); + internalGetEffectiveTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicy getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicy build() { + com.google.storage.v2.Bucket.SoftDeletePolicy result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicy buildPartial() { + com.google.storage.v2.Bucket.SoftDeletePolicy result = + new com.google.storage.v2.Bucket.SoftDeletePolicy(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.SoftDeletePolicy result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.retentionDuration_ = + retentionDurationBuilder_ == null + ? retentionDuration_ + : retentionDurationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.effectiveTime_ = + effectiveTimeBuilder_ == null ? effectiveTime_ : effectiveTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.SoftDeletePolicy) { + return mergeFrom((com.google.storage.v2.Bucket.SoftDeletePolicy) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.SoftDeletePolicy other) { + if (other == com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance()) + return this; + if (other.hasRetentionDuration()) { + mergeRetentionDuration(other.getRetentionDuration()); + } + if (other.hasEffectiveTime()) { + mergeEffectiveTime(other.getEffectiveTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetRetentionDurationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetEffectiveTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Duration retentionDuration_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + retentionDurationBuilder_; + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return Whether the retentionDuration field is set. + */ + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + * + * @return The retentionDuration. + */ + public com.google.protobuf.Duration getRetentionDuration() { + if (retentionDurationBuilder_ == null) { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } else { + return retentionDurationBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public Builder setRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionDuration_ = value; + } else { + retentionDurationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public Builder setRetentionDuration(com.google.protobuf.Duration.Builder builderForValue) { + if (retentionDurationBuilder_ == null) { + retentionDuration_ = builderForValue.build(); + } else { + retentionDurationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public Builder mergeRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && retentionDuration_ != null + && retentionDuration_ != com.google.protobuf.Duration.getDefaultInstance()) { + getRetentionDurationBuilder().mergeFrom(value); + } else { + retentionDuration_ = value; + } + } else { + retentionDurationBuilder_.mergeFrom(value); + } + if (retentionDuration_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public Builder clearRetentionDuration() { + bitField0_ = (bitField0_ & ~0x00000001); + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public com.google.protobuf.Duration.Builder getRetentionDurationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetRetentionDurationFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + if (retentionDurationBuilder_ != null) { + return retentionDurationBuilder_.getMessageOrBuilder(); + } else { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + } + + /** + * + * + *
+       * The period of time that soft-deleted objects in the bucket must be
+       * retained and cannot be permanently deleted. The duration must be greater
+       * than or equal to 7 days and less than 1 year.
+       * 
+ * + * optional .google.protobuf.Duration retention_duration = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetRetentionDurationFieldBuilder() { + if (retentionDurationBuilder_ == null) { + retentionDurationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getRetentionDuration(), getParentForChildren(), isClean()); + retentionDuration_ = null; + } + return retentionDurationBuilder_; + } + + private com.google.protobuf.Timestamp effectiveTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + effectiveTimeBuilder_; + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return Whether the effectiveTime field is set. + */ + public boolean hasEffectiveTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + * + * @return The effectiveTime. + */ + public com.google.protobuf.Timestamp getEffectiveTime() { + if (effectiveTimeBuilder_ == null) { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } else { + return effectiveTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + effectiveTime_ = value; + } else { + effectiveTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder setEffectiveTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (effectiveTimeBuilder_ == null) { + effectiveTime_ = builderForValue.build(); + } else { + effectiveTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder mergeEffectiveTime(com.google.protobuf.Timestamp value) { + if (effectiveTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && effectiveTime_ != null + && effectiveTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEffectiveTimeBuilder().mergeFrom(value); + } else { + effectiveTime_ = value; + } + } else { + effectiveTimeBuilder_.mergeFrom(value); + } + if (effectiveTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public Builder clearEffectiveTime() { + bitField0_ = (bitField0_ & ~0x00000002); + effectiveTime_ = null; + if (effectiveTimeBuilder_ != null) { + effectiveTimeBuilder_.dispose(); + effectiveTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getEffectiveTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEffectiveTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getEffectiveTimeOrBuilder() { + if (effectiveTimeBuilder_ != null) { + return effectiveTimeBuilder_.getMessageOrBuilder(); + } else { + return effectiveTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : effectiveTime_; + } + } + + /** + * + * + *
+       * Time from which the policy was effective. This is service-provided.
+       * 
+ * + * optional .google.protobuf.Timestamp effective_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEffectiveTimeFieldBuilder() { + if (effectiveTimeBuilder_ == null) { + effectiveTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEffectiveTime(), getParentForChildren(), isClean()); + effectiveTime_ = null; + } + return effectiveTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.SoftDeletePolicy) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.SoftDeletePolicy) + private static final com.google.storage.v2.Bucket.SoftDeletePolicy DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.SoftDeletePolicy(); + } + + public static com.google.storage.v2.Bucket.SoftDeletePolicy getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SoftDeletePolicy parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicy getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface VersioningOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Versioning) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. While set to true, versioning is fully enabled for this bucket.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + boolean getEnabled(); + } + + /** + * + * + *
+   * Properties of a bucket related to versioning.
+   * For more information about Cloud Storage versioning, see [Object
+   * versioning](https://cloud.google.com/storage/docs/object-versioning).
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Versioning} + */ + public static final class Versioning extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Versioning) + VersioningOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Versioning"); + } + + // Use Versioning.newBuilder() to construct. + private Versioning(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Versioning() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Versioning_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Versioning_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Versioning.class, + com.google.storage.v2.Bucket.Versioning.Builder.class); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+     * Optional. While set to true, versioning is fully enabled for this bucket.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Versioning)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Versioning other = (com.google.storage.v2.Bucket.Versioning) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Versioning parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Versioning parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Versioning parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Versioning prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Properties of a bucket related to versioning.
+     * For more information about Cloud Storage versioning, see [Object
+     * versioning](https://cloud.google.com/storage/docs/object-versioning).
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Versioning} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Versioning) + com.google.storage.v2.Bucket.VersioningOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Versioning_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Versioning_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Versioning.class, + com.google.storage.v2.Bucket.Versioning.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Versioning.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Versioning_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Versioning getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Versioning.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Versioning build() { + com.google.storage.v2.Bucket.Versioning result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Versioning buildPartial() { + com.google.storage.v2.Bucket.Versioning result = + new com.google.storage.v2.Bucket.Versioning(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Versioning result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Versioning) { + return mergeFrom((com.google.storage.v2.Bucket.Versioning) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Versioning other) { + if (other == com.google.storage.v2.Bucket.Versioning.getDefaultInstance()) return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+       * Optional. While set to true, versioning is fully enabled for this bucket.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+       * Optional. While set to true, versioning is fully enabled for this bucket.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. While set to true, versioning is fully enabled for this bucket.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Versioning) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Versioning) + private static final com.google.storage.v2.Bucket.Versioning DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Versioning(); + } + + public static com.google.storage.v2.Bucket.Versioning getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Versioning parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Versioning getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface WebsiteOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Website) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. If the requested object path is missing, the service ensures
+     * the path has a trailing '/', append this suffix, and attempt to retrieve
+     * the resulting object. This allows the creation of `index.html` objects to
+     * represent directory pages.
+     * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainPageSuffix. + */ + java.lang.String getMainPageSuffix(); + + /** + * + * + *
+     * Optional. If the requested object path is missing, the service ensures
+     * the path has a trailing '/', append this suffix, and attempt to retrieve
+     * the resulting object. This allows the creation of `index.html` objects to
+     * represent directory pages.
+     * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainPageSuffix. + */ + com.google.protobuf.ByteString getMainPageSuffixBytes(); + + /** + * + * + *
+     * Optional. If the requested object path is missing, and any
+     * `mainPageSuffix` object is missing, if applicable, the service
+     * returns the named object from this bucket as the content for a
+     * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+     * result.
+     * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The notFoundPage. + */ + java.lang.String getNotFoundPage(); + + /** + * + * + *
+     * Optional. If the requested object path is missing, and any
+     * `mainPageSuffix` object is missing, if applicable, the service
+     * returns the named object from this bucket as the content for a
+     * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+     * result.
+     * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for notFoundPage. + */ + com.google.protobuf.ByteString getNotFoundPageBytes(); + } + + /** + * + * + *
+   * Properties of a bucket related to accessing the contents as a static
+   * website. For details, see [hosting a static website using Cloud
+   * Storage](https://cloud.google.com/storage/docs/hosting-static-website).
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Website} + */ + public static final class Website extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Website) + WebsiteOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Website"); + } + + // Use Website.newBuilder() to construct. + private Website(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Website() { + mainPageSuffix_ = ""; + notFoundPage_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Website_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Website_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Website.class, + com.google.storage.v2.Bucket.Website.Builder.class); + } + + public static final int MAIN_PAGE_SUFFIX_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object mainPageSuffix_ = ""; + + /** + * + * + *
+     * Optional. If the requested object path is missing, the service ensures
+     * the path has a trailing '/', append this suffix, and attempt to retrieve
+     * the resulting object. This allows the creation of `index.html` objects to
+     * represent directory pages.
+     * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainPageSuffix. + */ + @java.lang.Override + public java.lang.String getMainPageSuffix() { + java.lang.Object ref = mainPageSuffix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPageSuffix_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. If the requested object path is missing, the service ensures
+     * the path has a trailing '/', append this suffix, and attempt to retrieve
+     * the resulting object. This allows the creation of `index.html` objects to
+     * represent directory pages.
+     * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainPageSuffix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMainPageSuffixBytes() { + java.lang.Object ref = mainPageSuffix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainPageSuffix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NOT_FOUND_PAGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object notFoundPage_ = ""; + + /** + * + * + *
+     * Optional. If the requested object path is missing, and any
+     * `mainPageSuffix` object is missing, if applicable, the service
+     * returns the named object from this bucket as the content for a
+     * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+     * result.
+     * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The notFoundPage. + */ + @java.lang.Override + public java.lang.String getNotFoundPage() { + java.lang.Object ref = notFoundPage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + notFoundPage_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. If the requested object path is missing, and any
+     * `mainPageSuffix` object is missing, if applicable, the service
+     * returns the named object from this bucket as the content for a
+     * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+     * result.
+     * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for notFoundPage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNotFoundPageBytes() { + java.lang.Object ref = notFoundPage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + notFoundPage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(mainPageSuffix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, mainPageSuffix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(notFoundPage_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, notFoundPage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(mainPageSuffix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, mainPageSuffix_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(notFoundPage_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, notFoundPage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Website)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Website other = (com.google.storage.v2.Bucket.Website) obj; + + if (!getMainPageSuffix().equals(other.getMainPageSuffix())) return false; + if (!getNotFoundPage().equals(other.getNotFoundPage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAIN_PAGE_SUFFIX_FIELD_NUMBER; + hash = (53 * hash) + getMainPageSuffix().hashCode(); + hash = (37 * hash) + NOT_FOUND_PAGE_FIELD_NUMBER; + hash = (53 * hash) + getNotFoundPage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Website parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Website parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Website parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Website parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Website parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Website parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Website prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Properties of a bucket related to accessing the contents as a static
+     * website. For details, see [hosting a static website using Cloud
+     * Storage](https://cloud.google.com/storage/docs/hosting-static-website).
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Website} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Website) + com.google.storage.v2.Bucket.WebsiteOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Website_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Website_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Website.class, + com.google.storage.v2.Bucket.Website.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Website.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mainPageSuffix_ = ""; + notFoundPage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Website_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Website getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Website.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Website build() { + com.google.storage.v2.Bucket.Website result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Website buildPartial() { + com.google.storage.v2.Bucket.Website result = + new com.google.storage.v2.Bucket.Website(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Website result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mainPageSuffix_ = mainPageSuffix_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.notFoundPage_ = notFoundPage_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Website) { + return mergeFrom((com.google.storage.v2.Bucket.Website) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Website other) { + if (other == com.google.storage.v2.Bucket.Website.getDefaultInstance()) return this; + if (!other.getMainPageSuffix().isEmpty()) { + mainPageSuffix_ = other.mainPageSuffix_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getNotFoundPage().isEmpty()) { + notFoundPage_ = other.notFoundPage_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + mainPageSuffix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + notFoundPage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object mainPageSuffix_ = ""; + + /** + * + * + *
+       * Optional. If the requested object path is missing, the service ensures
+       * the path has a trailing '/', append this suffix, and attempt to retrieve
+       * the resulting object. This allows the creation of `index.html` objects to
+       * represent directory pages.
+       * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainPageSuffix. + */ + public java.lang.String getMainPageSuffix() { + java.lang.Object ref = mainPageSuffix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPageSuffix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, the service ensures
+       * the path has a trailing '/', append this suffix, and attempt to retrieve
+       * the resulting object. This allows the creation of `index.html` objects to
+       * represent directory pages.
+       * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainPageSuffix. + */ + public com.google.protobuf.ByteString getMainPageSuffixBytes() { + java.lang.Object ref = mainPageSuffix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainPageSuffix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, the service ensures
+       * the path has a trailing '/', append this suffix, and attempt to retrieve
+       * the resulting object. This allows the creation of `index.html` objects to
+       * represent directory pages.
+       * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The mainPageSuffix to set. + * @return This builder for chaining. + */ + public Builder setMainPageSuffix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + mainPageSuffix_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, the service ensures
+       * the path has a trailing '/', append this suffix, and attempt to retrieve
+       * the resulting object. This allows the creation of `index.html` objects to
+       * represent directory pages.
+       * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMainPageSuffix() { + mainPageSuffix_ = getDefaultInstance().getMainPageSuffix(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, the service ensures
+       * the path has a trailing '/', append this suffix, and attempt to retrieve
+       * the resulting object. This allows the creation of `index.html` objects to
+       * represent directory pages.
+       * 
+ * + * string main_page_suffix = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for mainPageSuffix to set. + * @return This builder for chaining. + */ + public Builder setMainPageSuffixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + mainPageSuffix_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object notFoundPage_ = ""; + + /** + * + * + *
+       * Optional. If the requested object path is missing, and any
+       * `mainPageSuffix` object is missing, if applicable, the service
+       * returns the named object from this bucket as the content for a
+       * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+       * result.
+       * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The notFoundPage. + */ + public java.lang.String getNotFoundPage() { + java.lang.Object ref = notFoundPage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + notFoundPage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, and any
+       * `mainPageSuffix` object is missing, if applicable, the service
+       * returns the named object from this bucket as the content for a
+       * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+       * result.
+       * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for notFoundPage. + */ + public com.google.protobuf.ByteString getNotFoundPageBytes() { + java.lang.Object ref = notFoundPage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + notFoundPage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, and any
+       * `mainPageSuffix` object is missing, if applicable, the service
+       * returns the named object from this bucket as the content for a
+       * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+       * result.
+       * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The notFoundPage to set. + * @return This builder for chaining. + */ + public Builder setNotFoundPage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + notFoundPage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, and any
+       * `mainPageSuffix` object is missing, if applicable, the service
+       * returns the named object from this bucket as the content for a
+       * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+       * result.
+       * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearNotFoundPage() { + notFoundPage_ = getDefaultInstance().getNotFoundPage(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. If the requested object path is missing, and any
+       * `mainPageSuffix` object is missing, if applicable, the service
+       * returns the named object from this bucket as the content for a
+       * [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)
+       * result.
+       * 
+ * + * string not_found_page = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for notFoundPage to set. + * @return This builder for chaining. + */ + public Builder setNotFoundPageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + notFoundPage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Website) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Website) + private static final com.google.storage.v2.Bucket.Website DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Website(); + } + + public static com.google.storage.v2.Bucket.Website getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Website parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Website getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CustomPlacementConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.CustomPlacementConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dataLocations. + */ + java.util.List getDataLocationsList(); + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dataLocations. + */ + int getDataLocationsCount(); + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + java.lang.String getDataLocations(int index); + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + com.google.protobuf.ByteString getDataLocationsBytes(int index); + } + + /** + * + * + *
+   * Configuration for [configurable dual-
+   * regions](https://cloud.google.com/storage/docs/locations#configurable). It
+   * should specify precisely two eligible regions within the same multi-region.
+   * For details, see
+   * [locations](https://cloud.google.com/storage/docs/locations).
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.CustomPlacementConfig} + */ + public static final class CustomPlacementConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.CustomPlacementConfig) + CustomPlacementConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CustomPlacementConfig"); + } + + // Use CustomPlacementConfig.newBuilder() to construct. + private CustomPlacementConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CustomPlacementConfig() { + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_CustomPlacementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.CustomPlacementConfig.class, + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder.class); + } + + public static final int DATA_LOCATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList dataLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dataLocations. + */ + public com.google.protobuf.ProtocolStringList getDataLocationsList() { + return dataLocations_; + } + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dataLocations. + */ + public int getDataLocationsCount() { + return dataLocations_.size(); + } + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + public java.lang.String getDataLocations(int index) { + return dataLocations_.get(index); + } + + /** + * + * + *
+     * Optional. List of locations to use for data placement.
+     * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + public com.google.protobuf.ByteString getDataLocationsBytes(int index) { + return dataLocations_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < dataLocations_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, dataLocations_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < dataLocations_.size(); i++) { + dataSize += computeStringSizeNoTag(dataLocations_.getRaw(i)); + } + size += dataSize; + size += 1 * getDataLocationsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.CustomPlacementConfig)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.CustomPlacementConfig other = + (com.google.storage.v2.Bucket.CustomPlacementConfig) obj; + + if (!getDataLocationsList().equals(other.getDataLocationsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDataLocationsCount() > 0) { + hash = (37 * hash) + DATA_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getDataLocationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.CustomPlacementConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Configuration for [configurable dual-
+     * regions](https://cloud.google.com/storage/docs/locations#configurable). It
+     * should specify precisely two eligible regions within the same multi-region.
+     * For details, see
+     * [locations](https://cloud.google.com/storage/docs/locations).
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.CustomPlacementConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.CustomPlacementConfig) + com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_CustomPlacementConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.CustomPlacementConfig.class, + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.CustomPlacementConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfig getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfig build() { + com.google.storage.v2.Bucket.CustomPlacementConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfig buildPartial() { + com.google.storage.v2.Bucket.CustomPlacementConfig result = + new com.google.storage.v2.Bucket.CustomPlacementConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.CustomPlacementConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + dataLocations_.makeImmutable(); + result.dataLocations_ = dataLocations_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.CustomPlacementConfig) { + return mergeFrom((com.google.storage.v2.Bucket.CustomPlacementConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.CustomPlacementConfig other) { + if (other == com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance()) + return this; + if (!other.dataLocations_.isEmpty()) { + if (dataLocations_.isEmpty()) { + dataLocations_ = other.dataLocations_; + bitField0_ |= 0x00000001; + } else { + ensureDataLocationsIsMutable(); + dataLocations_.addAll(other.dataLocations_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureDataLocationsIsMutable(); + dataLocations_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList dataLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureDataLocationsIsMutable() { + if (!dataLocations_.isModifiable()) { + dataLocations_ = new com.google.protobuf.LazyStringArrayList(dataLocations_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dataLocations. + */ + public com.google.protobuf.ProtocolStringList getDataLocationsList() { + dataLocations_.makeImmutable(); + return dataLocations_; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dataLocations. + */ + public int getDataLocationsCount() { + return dataLocations_.size(); + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dataLocations at the given index. + */ + public java.lang.String getDataLocations(int index) { + return dataLocations_.get(index); + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dataLocations at the given index. + */ + public com.google.protobuf.ByteString getDataLocationsBytes(int index) { + return dataLocations_.getByteString(index); + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The dataLocations to set. + * @return This builder for chaining. + */ + public Builder setDataLocations(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDataLocationsIsMutable(); + dataLocations_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The dataLocations to add. + * @return This builder for chaining. + */ + public Builder addDataLocations(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDataLocationsIsMutable(); + dataLocations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The dataLocations to add. + * @return This builder for chaining. + */ + public Builder addAllDataLocations(java.lang.Iterable values) { + ensureDataLocationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, dataLocations_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDataLocations() { + dataLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. List of locations to use for data placement.
+       * 
+ * + * repeated string data_locations = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the dataLocations to add. + * @return This builder for chaining. + */ + public Builder addDataLocationsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureDataLocationsIsMutable(); + dataLocations_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.CustomPlacementConfig) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.CustomPlacementConfig) + private static final com.google.storage.v2.Bucket.CustomPlacementConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.CustomPlacementConfig(); + } + + public static com.google.storage.v2.Bucket.CustomPlacementConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CustomPlacementConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AutoclassOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.Autoclass) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Enables Autoclass.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + boolean getEnabled(); + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the toggleTime field is set. + */ + boolean hasToggleTime(); + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The toggleTime. + */ + com.google.protobuf.Timestamp getToggleTime(); + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getToggleTimeOrBuilder(); + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return Whether the terminalStorageClass field is set. + */ + boolean hasTerminalStorageClass(); + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The terminalStorageClass. + */ + java.lang.String getTerminalStorageClass(); + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The bytes for terminalStorageClass. + */ + com.google.protobuf.ByteString getTerminalStorageClassBytes(); + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the terminalStorageClassUpdateTime field is set. + */ + boolean hasTerminalStorageClassUpdateTime(); + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The terminalStorageClassUpdateTime. + */ + com.google.protobuf.Timestamp getTerminalStorageClassUpdateTime(); + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getTerminalStorageClassUpdateTimeOrBuilder(); + } + + /** + * + * + *
+   * Configuration for a bucket's Autoclass feature.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Autoclass} + */ + public static final class Autoclass extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.Autoclass) + AutoclassOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Autoclass"); + } + + // Use Autoclass.newBuilder() to construct. + private Autoclass(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Autoclass() { + terminalStorageClass_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Autoclass_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Autoclass_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Autoclass.class, + com.google.storage.v2.Bucket.Autoclass.Builder.class); + } + + private int bitField0_; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+     * Optional. Enables Autoclass.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + public static final int TOGGLE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp toggleTime_; + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the toggleTime field is set. + */ + @java.lang.Override + public boolean hasToggleTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The toggleTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getToggleTime() { + return toggleTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : toggleTime_; + } + + /** + * + * + *
+     * Output only. Latest instant at which the `enabled` field was set to true
+     * after being disabled/unconfigured or set to false after being enabled. If
+     * Autoclass is enabled when the bucket is created, the value of the
+     * `toggle_time` field is set to the bucket `create_time`.
+     * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getToggleTimeOrBuilder() { + return toggleTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : toggleTime_; + } + + public static final int TERMINAL_STORAGE_CLASS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object terminalStorageClass_ = ""; + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return Whether the terminalStorageClass field is set. + */ + @java.lang.Override + public boolean hasTerminalStorageClass() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The terminalStorageClass. + */ + @java.lang.Override + public java.lang.String getTerminalStorageClass() { + java.lang.Object ref = terminalStorageClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + terminalStorageClass_ = s; + return s; + } + } + + /** + * + * + *
+     * An object in an Autoclass bucket eventually cools down to the
+     * terminal storage class if there is no access to the object.
+     * The only valid values are NEARLINE and ARCHIVE.
+     * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The bytes for terminalStorageClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTerminalStorageClassBytes() { + java.lang.Object ref = terminalStorageClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + terminalStorageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TERMINAL_STORAGE_CLASS_UPDATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp terminalStorageClassUpdateTime_; + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the terminalStorageClassUpdateTime field is set. + */ + @java.lang.Override + public boolean hasTerminalStorageClassUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The terminalStorageClassUpdateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getTerminalStorageClassUpdateTime() { + return terminalStorageClassUpdateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : terminalStorageClassUpdateTime_; + } + + /** + * + * + *
+     * Output only. Latest instant at which the autoclass terminal storage class
+     * was updated.
+     * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getTerminalStorageClassUpdateTimeOrBuilder() { + return terminalStorageClassUpdateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : terminalStorageClassUpdateTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getToggleTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, terminalStorageClass_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getTerminalStorageClassUpdateTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getToggleTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, terminalStorageClass_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, getTerminalStorageClassUpdateTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.Autoclass)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.Autoclass other = (com.google.storage.v2.Bucket.Autoclass) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (hasToggleTime() != other.hasToggleTime()) return false; + if (hasToggleTime()) { + if (!getToggleTime().equals(other.getToggleTime())) return false; + } + if (hasTerminalStorageClass() != other.hasTerminalStorageClass()) return false; + if (hasTerminalStorageClass()) { + if (!getTerminalStorageClass().equals(other.getTerminalStorageClass())) return false; + } + if (hasTerminalStorageClassUpdateTime() != other.hasTerminalStorageClassUpdateTime()) + return false; + if (hasTerminalStorageClassUpdateTime()) { + if (!getTerminalStorageClassUpdateTime().equals(other.getTerminalStorageClassUpdateTime())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + if (hasToggleTime()) { + hash = (37 * hash) + TOGGLE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getToggleTime().hashCode(); + } + if (hasTerminalStorageClass()) { + hash = (37 * hash) + TERMINAL_STORAGE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getTerminalStorageClass().hashCode(); + } + if (hasTerminalStorageClassUpdateTime()) { + hash = (37 * hash) + TERMINAL_STORAGE_CLASS_UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getTerminalStorageClassUpdateTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Autoclass parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Autoclass parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.Autoclass parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.Autoclass prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Configuration for a bucket's Autoclass feature.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.Autoclass} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.Autoclass) + com.google.storage.v2.Bucket.AutoclassOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Autoclass_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Autoclass_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.Autoclass.class, + com.google.storage.v2.Bucket.Autoclass.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.Autoclass.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetToggleTimeFieldBuilder(); + internalGetTerminalStorageClassUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + toggleTime_ = null; + if (toggleTimeBuilder_ != null) { + toggleTimeBuilder_.dispose(); + toggleTimeBuilder_ = null; + } + terminalStorageClass_ = ""; + terminalStorageClassUpdateTime_ = null; + if (terminalStorageClassUpdateTimeBuilder_ != null) { + terminalStorageClassUpdateTimeBuilder_.dispose(); + terminalStorageClassUpdateTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_Autoclass_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Autoclass getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.Autoclass.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Autoclass build() { + com.google.storage.v2.Bucket.Autoclass result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Autoclass buildPartial() { + com.google.storage.v2.Bucket.Autoclass result = + new com.google.storage.v2.Bucket.Autoclass(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.Autoclass result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.toggleTime_ = + toggleTimeBuilder_ == null ? toggleTime_ : toggleTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.terminalStorageClass_ = terminalStorageClass_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.terminalStorageClassUpdateTime_ = + terminalStorageClassUpdateTimeBuilder_ == null + ? terminalStorageClassUpdateTime_ + : terminalStorageClassUpdateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.Autoclass) { + return mergeFrom((com.google.storage.v2.Bucket.Autoclass) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.Autoclass other) { + if (other == com.google.storage.v2.Bucket.Autoclass.getDefaultInstance()) return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + if (other.hasToggleTime()) { + mergeToggleTime(other.getToggleTime()); + } + if (other.hasTerminalStorageClass()) { + terminalStorageClass_ = other.terminalStorageClass_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasTerminalStorageClassUpdateTime()) { + mergeTerminalStorageClassUpdateTime(other.getTerminalStorageClassUpdateTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetToggleTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + terminalStorageClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetTerminalStorageClassUpdateTimeFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+       * Optional. Enables Autoclass.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+       * Optional. Enables Autoclass.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Enables Autoclass.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp toggleTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + toggleTimeBuilder_; + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the toggleTime field is set. + */ + public boolean hasToggleTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The toggleTime. + */ + public com.google.protobuf.Timestamp getToggleTime() { + if (toggleTimeBuilder_ == null) { + return toggleTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : toggleTime_; + } else { + return toggleTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setToggleTime(com.google.protobuf.Timestamp value) { + if (toggleTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + toggleTime_ = value; + } else { + toggleTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setToggleTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (toggleTimeBuilder_ == null) { + toggleTime_ = builderForValue.build(); + } else { + toggleTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeToggleTime(com.google.protobuf.Timestamp value) { + if (toggleTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && toggleTime_ != null + && toggleTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getToggleTimeBuilder().mergeFrom(value); + } else { + toggleTime_ = value; + } + } else { + toggleTimeBuilder_.mergeFrom(value); + } + if (toggleTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearToggleTime() { + bitField0_ = (bitField0_ & ~0x00000002); + toggleTime_ = null; + if (toggleTimeBuilder_ != null) { + toggleTimeBuilder_.dispose(); + toggleTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getToggleTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetToggleTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getToggleTimeOrBuilder() { + if (toggleTimeBuilder_ != null) { + return toggleTimeBuilder_.getMessageOrBuilder(); + } else { + return toggleTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : toggleTime_; + } + } + + /** + * + * + *
+       * Output only. Latest instant at which the `enabled` field was set to true
+       * after being disabled/unconfigured or set to false after being enabled. If
+       * Autoclass is enabled when the bucket is created, the value of the
+       * `toggle_time` field is set to the bucket `create_time`.
+       * 
+ * + * + * .google.protobuf.Timestamp toggle_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetToggleTimeFieldBuilder() { + if (toggleTimeBuilder_ == null) { + toggleTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getToggleTime(), getParentForChildren(), isClean()); + toggleTime_ = null; + } + return toggleTimeBuilder_; + } + + private java.lang.Object terminalStorageClass_ = ""; + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @return Whether the terminalStorageClass field is set. + */ + public boolean hasTerminalStorageClass() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The terminalStorageClass. + */ + public java.lang.String getTerminalStorageClass() { + java.lang.Object ref = terminalStorageClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + terminalStorageClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @return The bytes for terminalStorageClass. + */ + public com.google.protobuf.ByteString getTerminalStorageClassBytes() { + java.lang.Object ref = terminalStorageClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + terminalStorageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @param value The terminalStorageClass to set. + * @return This builder for chaining. + */ + public Builder setTerminalStorageClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + terminalStorageClass_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @return This builder for chaining. + */ + public Builder clearTerminalStorageClass() { + terminalStorageClass_ = getDefaultInstance().getTerminalStorageClass(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+       * An object in an Autoclass bucket eventually cools down to the
+       * terminal storage class if there is no access to the object.
+       * The only valid values are NEARLINE and ARCHIVE.
+       * 
+ * + * optional string terminal_storage_class = 3; + * + * @param value The bytes for terminalStorageClass to set. + * @return This builder for chaining. + */ + public Builder setTerminalStorageClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + terminalStorageClass_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp terminalStorageClassUpdateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + terminalStorageClassUpdateTimeBuilder_; + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the terminalStorageClassUpdateTime field is set. + */ + public boolean hasTerminalStorageClassUpdateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The terminalStorageClassUpdateTime. + */ + public com.google.protobuf.Timestamp getTerminalStorageClassUpdateTime() { + if (terminalStorageClassUpdateTimeBuilder_ == null) { + return terminalStorageClassUpdateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : terminalStorageClassUpdateTime_; + } else { + return terminalStorageClassUpdateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTerminalStorageClassUpdateTime(com.google.protobuf.Timestamp value) { + if (terminalStorageClassUpdateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + terminalStorageClassUpdateTime_ = value; + } else { + terminalStorageClassUpdateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTerminalStorageClassUpdateTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (terminalStorageClassUpdateTimeBuilder_ == null) { + terminalStorageClassUpdateTime_ = builderForValue.build(); + } else { + terminalStorageClassUpdateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeTerminalStorageClassUpdateTime(com.google.protobuf.Timestamp value) { + if (terminalStorageClassUpdateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && terminalStorageClassUpdateTime_ != null + && terminalStorageClassUpdateTime_ + != com.google.protobuf.Timestamp.getDefaultInstance()) { + getTerminalStorageClassUpdateTimeBuilder().mergeFrom(value); + } else { + terminalStorageClassUpdateTime_ = value; + } + } else { + terminalStorageClassUpdateTimeBuilder_.mergeFrom(value); + } + if (terminalStorageClassUpdateTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTerminalStorageClassUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + terminalStorageClassUpdateTime_ = null; + if (terminalStorageClassUpdateTimeBuilder_ != null) { + terminalStorageClassUpdateTimeBuilder_.dispose(); + terminalStorageClassUpdateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getTerminalStorageClassUpdateTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetTerminalStorageClassUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getTerminalStorageClassUpdateTimeOrBuilder() { + if (terminalStorageClassUpdateTimeBuilder_ != null) { + return terminalStorageClassUpdateTimeBuilder_.getMessageOrBuilder(); + } else { + return terminalStorageClassUpdateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : terminalStorageClassUpdateTime_; + } + } + + /** + * + * + *
+       * Output only. Latest instant at which the autoclass terminal storage class
+       * was updated.
+       * 
+ * + * + * optional .google.protobuf.Timestamp terminal_storage_class_update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetTerminalStorageClassUpdateTimeFieldBuilder() { + if (terminalStorageClassUpdateTimeBuilder_ == null) { + terminalStorageClassUpdateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getTerminalStorageClassUpdateTime(), getParentForChildren(), isClean()); + terminalStorageClassUpdateTime_ = null; + } + return terminalStorageClassUpdateTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.Autoclass) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.Autoclass) + private static final com.google.storage.v2.Bucket.Autoclass DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.Autoclass(); + } + + public static com.google.storage.v2.Bucket.Autoclass getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Autoclass parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.Autoclass getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface IpFilterOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.IpFilter) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return Whether the mode field is set. + */ + boolean hasMode(); + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return The mode. + */ + java.lang.String getMode(); + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return The bytes for mode. + */ + com.google.protobuf.ByteString getModeBytes(); + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return Whether the publicNetworkSource field is set. + */ + boolean hasPublicNetworkSource(); + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return The publicNetworkSource. + */ + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource getPublicNetworkSource(); + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder + getPublicNetworkSourceOrBuilder(); + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getVpcNetworkSourcesList(); + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getVpcNetworkSources(int index); + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getVpcNetworkSourcesCount(); + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getVpcNetworkSourcesOrBuilderList(); + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder getVpcNetworkSourcesOrBuilder( + int index); + + /** + * + * + *
+     * Optional. Whether or not to allow VPCs from orgs different than the
+     * bucket's parent org to access the bucket. When set to true, validations
+     * on the existence of the VPCs won't be performed. If set to false, each
+     * VPC network source is checked to belong to the same org as the bucket as
+     * well as validated for existence.
+     * 
+ * + * bool allow_cross_org_vpcs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The allowCrossOrgVpcs. + */ + boolean getAllowCrossOrgVpcs(); + + /** + * + * + *
+     * Whether or not to allow all P4SA access to the bucket. When set to true,
+     * IP filter config validation doesn't apply.
+     * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return Whether the allowAllServiceAgentAccess field is set. + */ + boolean hasAllowAllServiceAgentAccess(); + + /** + * + * + *
+     * Whether or not to allow all P4SA access to the bucket. When set to true,
+     * IP filter config validation doesn't apply.
+     * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return The allowAllServiceAgentAccess. + */ + boolean getAllowAllServiceAgentAccess(); + } + + /** + * + * + *
+   * The [bucket IP
+   * filtering](https://cloud.google.com/storage/docs/ip-filtering-overview)
+   * configuration. Specifies the network sources that can access the bucket, as
+   * well as its underlying objects.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter} + */ + public static final class IpFilter extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.IpFilter) + IpFilterOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IpFilter"); + } + + // Use IpFilter.newBuilder() to construct. + private IpFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IpFilter() { + mode_ = ""; + vpcNetworkSources_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.class, + com.google.storage.v2.Bucket.IpFilter.Builder.class); + } + + public interface PublicNetworkSourceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.IpFilter.PublicNetworkSource) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + java.util.List getAllowedIpCidrRangesList(); + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + int getAllowedIpCidrRangesCount(); + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + java.lang.String getAllowedIpCidrRanges(int index); + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index); + } + + /** + * + * + *
+     * The public network IP address ranges that can access the bucket and its
+     * data.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter.PublicNetworkSource} + */ + public static final class PublicNetworkSource extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.IpFilter.PublicNetworkSource) + PublicNetworkSourceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PublicNetworkSource"); + } + + // Use PublicNetworkSource.newBuilder() to construct. + private PublicNetworkSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PublicNetworkSource() { + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.class, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder.class); + } + + public static final int ALLOWED_IP_CIDR_RANGES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList allowedIpCidrRanges_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + public com.google.protobuf.ProtocolStringList getAllowedIpCidrRangesList() { + return allowedIpCidrRanges_; + } + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + public int getAllowedIpCidrRangesCount() { + return allowedIpCidrRanges_.size(); + } + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + public java.lang.String getAllowedIpCidrRanges(int index) { + return allowedIpCidrRanges_.get(index); + } + + /** + * + * + *
+       * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+       * operate or access the bucket and its underlying objects.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + public com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index) { + return allowedIpCidrRanges_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < allowedIpCidrRanges_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString( + output, 1, allowedIpCidrRanges_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < allowedIpCidrRanges_.size(); i++) { + dataSize += computeStringSizeNoTag(allowedIpCidrRanges_.getRaw(i)); + } + size += dataSize; + size += 1 * getAllowedIpCidrRangesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource other = + (com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource) obj; + + if (!getAllowedIpCidrRangesList().equals(other.getAllowedIpCidrRangesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getAllowedIpCidrRangesCount() > 0) { + hash = (37 * hash) + ALLOWED_IP_CIDR_RANGES_FIELD_NUMBER; + hash = (53 * hash) + getAllowedIpCidrRangesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * The public network IP address ranges that can access the bucket and its
+       * data.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter.PublicNetworkSource} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.IpFilter.PublicNetworkSource) + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.class, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource + getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource build() { + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource buildPartial() { + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource result = + new com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + allowedIpCidrRanges_.makeImmutable(); + result.allowedIpCidrRanges_ = allowedIpCidrRanges_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource) { + return mergeFrom((com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource other) { + if (other + == com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance()) + return this; + if (!other.allowedIpCidrRanges_.isEmpty()) { + if (allowedIpCidrRanges_.isEmpty()) { + allowedIpCidrRanges_ = other.allowedIpCidrRanges_; + bitField0_ |= 0x00000001; + } else { + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.addAll(other.allowedIpCidrRanges_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList allowedIpCidrRanges_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureAllowedIpCidrRangesIsMutable() { + if (!allowedIpCidrRanges_.isModifiable()) { + allowedIpCidrRanges_ = + new com.google.protobuf.LazyStringArrayList(allowedIpCidrRanges_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + public com.google.protobuf.ProtocolStringList getAllowedIpCidrRangesList() { + allowedIpCidrRanges_.makeImmutable(); + return allowedIpCidrRanges_; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + public int getAllowedIpCidrRangesCount() { + return allowedIpCidrRanges_.size(); + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + public java.lang.String getAllowedIpCidrRanges(int index) { + return allowedIpCidrRanges_.get(index); + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + public com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index) { + return allowedIpCidrRanges_.getByteString(index); + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The allowedIpCidrRanges to set. + * @return This builder for chaining. + */ + public Builder setAllowedIpCidrRanges(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllowedIpCidrRanges(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllAllowedIpCidrRanges(java.lang.Iterable values) { + ensureAllowedIpCidrRangesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, allowedIpCidrRanges_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAllowedIpCidrRanges() { + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to
+         * operate or access the bucket and its underlying objects.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllowedIpCidrRangesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.IpFilter.PublicNetworkSource) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.IpFilter.PublicNetworkSource) + private static final com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource(); + } + + public static com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PublicNetworkSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface VpcNetworkSourceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.IpFilter.VpcNetworkSource) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return Whether the network field is set. + */ + boolean hasNetwork(); + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return The network. + */ + java.lang.String getNetwork(); + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return The bytes for network. + */ + com.google.protobuf.ByteString getNetworkBytes(); + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + java.util.List getAllowedIpCidrRangesList(); + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + int getAllowedIpCidrRangesCount(); + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + java.lang.String getAllowedIpCidrRanges(int index); + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index); + } + + /** + * + * + *
+     * The list of VPC networks that can access the bucket.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter.VpcNetworkSource} + */ + public static final class VpcNetworkSource extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.IpFilter.VpcNetworkSource) + VpcNetworkSourceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "VpcNetworkSource"); + } + + // Use VpcNetworkSource.newBuilder() to construct. + private VpcNetworkSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private VpcNetworkSource() { + network_ = ""; + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.class, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder.class); + } + + private int bitField0_; + public static final int NETWORK_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object network_ = ""; + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return Whether the network field is set. + */ + @java.lang.Override + public boolean hasNetwork() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return The network. + */ + @java.lang.Override + public java.lang.String getNetwork() { + java.lang.Object ref = network_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + network_ = s; + return s; + } + } + + /** + * + * + *
+       * Name of the network.
+       *
+       * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+       * 
+ * + * optional string network = 1; + * + * @return The bytes for network. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNetworkBytes() { + java.lang.Object ref = network_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + network_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ALLOWED_IP_CIDR_RANGES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList allowedIpCidrRanges_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + public com.google.protobuf.ProtocolStringList getAllowedIpCidrRangesList() { + return allowedIpCidrRanges_; + } + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + public int getAllowedIpCidrRangesCount() { + return allowedIpCidrRanges_.size(); + } + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + public java.lang.String getAllowedIpCidrRanges(int index) { + return allowedIpCidrRanges_.get(index); + } + + /** + * + * + *
+       * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+       * can access the bucket. In the CIDR IP address block, the specified IP
+       * address must be properly truncated, meaning all the host bits must be
+       * zero or else the input is considered malformed. For example,
+       * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+       * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+       * 
+ * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + public com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index) { + return allowedIpCidrRanges_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, network_); + } + for (int i = 0; i < allowedIpCidrRanges_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString( + output, 2, allowedIpCidrRanges_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, network_); + } + { + int dataSize = 0; + for (int i = 0; i < allowedIpCidrRanges_.size(); i++) { + dataSize += computeStringSizeNoTag(allowedIpCidrRanges_.getRaw(i)); + } + size += dataSize; + size += 1 * getAllowedIpCidrRangesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource other = + (com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource) obj; + + if (hasNetwork() != other.hasNetwork()) return false; + if (hasNetwork()) { + if (!getNetwork().equals(other.getNetwork())) return false; + } + if (!getAllowedIpCidrRangesList().equals(other.getAllowedIpCidrRangesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasNetwork()) { + hash = (37 * hash) + NETWORK_FIELD_NUMBER; + hash = (53 * hash) + getNetwork().hashCode(); + } + if (getAllowedIpCidrRangesCount() > 0) { + hash = (37 * hash) + ALLOWED_IP_CIDR_RANGES_FIELD_NUMBER; + hash = (53 * hash) + getAllowedIpCidrRangesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * The list of VPC networks that can access the bucket.
+       * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter.VpcNetworkSource} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.IpFilter.VpcNetworkSource) + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.class, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + network_ = ""; + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource build() { + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource buildPartial() { + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource result = + new com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.network_ = network_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + allowedIpCidrRanges_.makeImmutable(); + result.allowedIpCidrRanges_ = allowedIpCidrRanges_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource) { + return mergeFrom((com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource other) { + if (other == com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.getDefaultInstance()) + return this; + if (other.hasNetwork()) { + network_ = other.network_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.allowedIpCidrRanges_.isEmpty()) { + if (allowedIpCidrRanges_.isEmpty()) { + allowedIpCidrRanges_ = other.allowedIpCidrRanges_; + bitField0_ |= 0x00000002; + } else { + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.addAll(other.allowedIpCidrRanges_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + network_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object network_ = ""; + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @return Whether the network field is set. + */ + public boolean hasNetwork() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @return The network. + */ + public java.lang.String getNetwork() { + java.lang.Object ref = network_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + network_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @return The bytes for network. + */ + public com.google.protobuf.ByteString getNetworkBytes() { + java.lang.Object ref = network_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + network_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @param value The network to set. + * @return This builder for chaining. + */ + public Builder setNetwork(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + network_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @return This builder for chaining. + */ + public Builder clearNetwork() { + network_ = getDefaultInstance().getNetwork(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+         * Name of the network.
+         *
+         * Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME`
+         * 
+ * + * optional string network = 1; + * + * @param value The bytes for network to set. + * @return This builder for chaining. + */ + public Builder setNetworkBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + network_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList allowedIpCidrRanges_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureAllowedIpCidrRangesIsMutable() { + if (!allowedIpCidrRanges_.isModifiable()) { + allowedIpCidrRanges_ = + new com.google.protobuf.LazyStringArrayList(allowedIpCidrRanges_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the allowedIpCidrRanges. + */ + public com.google.protobuf.ProtocolStringList getAllowedIpCidrRangesList() { + allowedIpCidrRanges_.makeImmutable(); + return allowedIpCidrRanges_; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of allowedIpCidrRanges. + */ + public int getAllowedIpCidrRangesCount() { + return allowedIpCidrRanges_.size(); + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The allowedIpCidrRanges at the given index. + */ + public java.lang.String getAllowedIpCidrRanges(int index) { + return allowedIpCidrRanges_.get(index); + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the allowedIpCidrRanges at the given index. + */ + public com.google.protobuf.ByteString getAllowedIpCidrRangesBytes(int index) { + return allowedIpCidrRanges_.getByteString(index); + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The allowedIpCidrRanges to set. + * @return This builder for chaining. + */ + public Builder setAllowedIpCidrRanges(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllowedIpCidrRanges(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllAllowedIpCidrRanges(java.lang.Iterable values) { + ensureAllowedIpCidrRangesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, allowedIpCidrRanges_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAllowedIpCidrRanges() { + allowedIpCidrRanges_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
+         * Optional. The list of public or private IPv4 and IPv6 CIDR ranges that
+         * can access the bucket. In the CIDR IP address block, the specified IP
+         * address must be properly truncated, meaning all the host bits must be
+         * zero or else the input is considered malformed. For example,
+         * `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for
+         * IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not.
+         * 
+ * + * + * repeated string allowed_ip_cidr_ranges = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the allowedIpCidrRanges to add. + * @return This builder for chaining. + */ + public Builder addAllowedIpCidrRangesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureAllowedIpCidrRangesIsMutable(); + allowedIpCidrRanges_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.IpFilter.VpcNetworkSource) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.IpFilter.VpcNetworkSource) + private static final com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource(); + } + + public static com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public VpcNetworkSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int MODE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object mode_ = ""; + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return Whether the mode field is set. + */ + @java.lang.Override + public boolean hasMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return The mode. + */ + @java.lang.Override + public java.lang.String getMode() { + java.lang.Object ref = mode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mode_ = s; + return s; + } + } + + /** + * + * + *
+     * The state of the IP filter configuration. Valid values are `Enabled` and
+     * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+     * bucket and all incoming requests to the bucket are evaluated against
+     * these rules. When set to `Disabled`, IP filtering rules are not applied
+     * to a bucket.
+     * 
+ * + * optional string mode = 1; + * + * @return The bytes for mode. + */ + @java.lang.Override + public com.google.protobuf.ByteString getModeBytes() { + java.lang.Object ref = mode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PUBLIC_NETWORK_SOURCE_FIELD_NUMBER = 2; + private com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource publicNetworkSource_; + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return Whether the publicNetworkSource field is set. + */ + @java.lang.Override + public boolean hasPublicNetworkSource() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return The publicNetworkSource. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource getPublicNetworkSource() { + return publicNetworkSource_ == null + ? com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance() + : publicNetworkSource_; + } + + /** + * + * + *
+     * Public IPs allowed to operate or access the bucket.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder + getPublicNetworkSourceOrBuilder() { + return publicNetworkSource_ == null + ? com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance() + : publicNetworkSource_; + } + + public static final int VPC_NETWORK_SOURCES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List + vpcNetworkSources_; + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getVpcNetworkSourcesList() { + return vpcNetworkSources_; + } + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getVpcNetworkSourcesOrBuilderList() { + return vpcNetworkSources_; + } + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getVpcNetworkSourcesCount() { + return vpcNetworkSources_.size(); + } + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getVpcNetworkSources(int index) { + return vpcNetworkSources_.get(index); + } + + /** + * + * + *
+     * Optional. The list of network sources that are allowed to access
+     * operations on the bucket or the underlying objects.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder + getVpcNetworkSourcesOrBuilder(int index) { + return vpcNetworkSources_.get(index); + } + + public static final int ALLOW_CROSS_ORG_VPCS_FIELD_NUMBER = 4; + private boolean allowCrossOrgVpcs_ = false; + + /** + * + * + *
+     * Optional. Whether or not to allow VPCs from orgs different than the
+     * bucket's parent org to access the bucket. When set to true, validations
+     * on the existence of the VPCs won't be performed. If set to false, each
+     * VPC network source is checked to belong to the same org as the bucket as
+     * well as validated for existence.
+     * 
+ * + * bool allow_cross_org_vpcs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The allowCrossOrgVpcs. + */ + @java.lang.Override + public boolean getAllowCrossOrgVpcs() { + return allowCrossOrgVpcs_; + } + + public static final int ALLOW_ALL_SERVICE_AGENT_ACCESS_FIELD_NUMBER = 5; + private boolean allowAllServiceAgentAccess_ = false; + + /** + * + * + *
+     * Whether or not to allow all P4SA access to the bucket. When set to true,
+     * IP filter config validation doesn't apply.
+     * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return Whether the allowAllServiceAgentAccess field is set. + */ + @java.lang.Override + public boolean hasAllowAllServiceAgentAccess() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Whether or not to allow all P4SA access to the bucket. When set to true,
+     * IP filter config validation doesn't apply.
+     * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return The allowAllServiceAgentAccess. + */ + @java.lang.Override + public boolean getAllowAllServiceAgentAccess() { + return allowAllServiceAgentAccess_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, mode_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getPublicNetworkSource()); + } + for (int i = 0; i < vpcNetworkSources_.size(); i++) { + output.writeMessage(3, vpcNetworkSources_.get(i)); + } + if (allowCrossOrgVpcs_ != false) { + output.writeBool(4, allowCrossOrgVpcs_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeBool(5, allowAllServiceAgentAccess_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, mode_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, getPublicNetworkSource()); + } + for (int i = 0; i < vpcNetworkSources_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(3, vpcNetworkSources_.get(i)); + } + if (allowCrossOrgVpcs_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, allowCrossOrgVpcs_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(5, allowAllServiceAgentAccess_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.IpFilter)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.IpFilter other = (com.google.storage.v2.Bucket.IpFilter) obj; + + if (hasMode() != other.hasMode()) return false; + if (hasMode()) { + if (!getMode().equals(other.getMode())) return false; + } + if (hasPublicNetworkSource() != other.hasPublicNetworkSource()) return false; + if (hasPublicNetworkSource()) { + if (!getPublicNetworkSource().equals(other.getPublicNetworkSource())) return false; + } + if (!getVpcNetworkSourcesList().equals(other.getVpcNetworkSourcesList())) return false; + if (getAllowCrossOrgVpcs() != other.getAllowCrossOrgVpcs()) return false; + if (hasAllowAllServiceAgentAccess() != other.hasAllowAllServiceAgentAccess()) return false; + if (hasAllowAllServiceAgentAccess()) { + if (getAllowAllServiceAgentAccess() != other.getAllowAllServiceAgentAccess()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMode()) { + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + getMode().hashCode(); + } + if (hasPublicNetworkSource()) { + hash = (37 * hash) + PUBLIC_NETWORK_SOURCE_FIELD_NUMBER; + hash = (53 * hash) + getPublicNetworkSource().hashCode(); + } + if (getVpcNetworkSourcesCount() > 0) { + hash = (37 * hash) + VPC_NETWORK_SOURCES_FIELD_NUMBER; + hash = (53 * hash) + getVpcNetworkSourcesList().hashCode(); + } + hash = (37 * hash) + ALLOW_CROSS_ORG_VPCS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAllowCrossOrgVpcs()); + if (hasAllowAllServiceAgentAccess()) { + hash = (37 * hash) + ALLOW_ALL_SERVICE_AGENT_ACCESS_FIELD_NUMBER; + hash = + (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAllowAllServiceAgentAccess()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.IpFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.IpFilter prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * The [bucket IP
+     * filtering](https://cloud.google.com/storage/docs/ip-filtering-overview)
+     * configuration. Specifies the network sources that can access the bucket, as
+     * well as its underlying objects.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.IpFilter} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.IpFilter) + com.google.storage.v2.Bucket.IpFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.IpFilter.class, + com.google.storage.v2.Bucket.IpFilter.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.IpFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPublicNetworkSourceFieldBuilder(); + internalGetVpcNetworkSourcesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mode_ = ""; + publicNetworkSource_ = null; + if (publicNetworkSourceBuilder_ != null) { + publicNetworkSourceBuilder_.dispose(); + publicNetworkSourceBuilder_ = null; + } + if (vpcNetworkSourcesBuilder_ == null) { + vpcNetworkSources_ = java.util.Collections.emptyList(); + } else { + vpcNetworkSources_ = null; + vpcNetworkSourcesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + allowCrossOrgVpcs_ = false; + allowAllServiceAgentAccess_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_IpFilter_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.IpFilter.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter build() { + com.google.storage.v2.Bucket.IpFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter buildPartial() { + com.google.storage.v2.Bucket.IpFilter result = + new com.google.storage.v2.Bucket.IpFilter(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.Bucket.IpFilter result) { + if (vpcNetworkSourcesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + vpcNetworkSources_ = java.util.Collections.unmodifiableList(vpcNetworkSources_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.vpcNetworkSources_ = vpcNetworkSources_; + } else { + result.vpcNetworkSources_ = vpcNetworkSourcesBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.Bucket.IpFilter result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mode_ = mode_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.publicNetworkSource_ = + publicNetworkSourceBuilder_ == null + ? publicNetworkSource_ + : publicNetworkSourceBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.allowCrossOrgVpcs_ = allowCrossOrgVpcs_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.allowAllServiceAgentAccess_ = allowAllServiceAgentAccess_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.IpFilter) { + return mergeFrom((com.google.storage.v2.Bucket.IpFilter) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.IpFilter other) { + if (other == com.google.storage.v2.Bucket.IpFilter.getDefaultInstance()) return this; + if (other.hasMode()) { + mode_ = other.mode_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasPublicNetworkSource()) { + mergePublicNetworkSource(other.getPublicNetworkSource()); + } + if (vpcNetworkSourcesBuilder_ == null) { + if (!other.vpcNetworkSources_.isEmpty()) { + if (vpcNetworkSources_.isEmpty()) { + vpcNetworkSources_ = other.vpcNetworkSources_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.addAll(other.vpcNetworkSources_); + } + onChanged(); + } + } else { + if (!other.vpcNetworkSources_.isEmpty()) { + if (vpcNetworkSourcesBuilder_.isEmpty()) { + vpcNetworkSourcesBuilder_.dispose(); + vpcNetworkSourcesBuilder_ = null; + vpcNetworkSources_ = other.vpcNetworkSources_; + bitField0_ = (bitField0_ & ~0x00000004); + vpcNetworkSourcesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetVpcNetworkSourcesFieldBuilder() + : null; + } else { + vpcNetworkSourcesBuilder_.addAllMessages(other.vpcNetworkSources_); + } + } + } + if (other.getAllowCrossOrgVpcs() != false) { + setAllowCrossOrgVpcs(other.getAllowCrossOrgVpcs()); + } + if (other.hasAllowAllServiceAgentAccess()) { + setAllowAllServiceAgentAccess(other.getAllowAllServiceAgentAccess()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + mode_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetPublicNetworkSourceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource m = + input.readMessage( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.parser(), + extensionRegistry); + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.add(m); + } else { + vpcNetworkSourcesBuilder_.addMessage(m); + } + break; + } // case 26 + case 32: + { + allowCrossOrgVpcs_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + allowAllServiceAgentAccess_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object mode_ = ""; + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @return Whether the mode field is set. + */ + public boolean hasMode() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @return The mode. + */ + public java.lang.String getMode() { + java.lang.Object ref = mode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @return The bytes for mode. + */ + public com.google.protobuf.ByteString getModeBytes() { + java.lang.Object ref = mode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @return This builder for chaining. + */ + public Builder clearMode() { + mode_ = getDefaultInstance().getMode(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * The state of the IP filter configuration. Valid values are `Enabled` and
+       * `Disabled`. When set to `Enabled`, IP filtering rules are applied to a
+       * bucket and all incoming requests to the bucket are evaluated against
+       * these rules. When set to `Disabled`, IP filtering rules are not applied
+       * to a bucket.
+       * 
+ * + * optional string mode = 1; + * + * @param value The bytes for mode to set. + * @return This builder for chaining. + */ + public Builder setModeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + mode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource publicNetworkSource_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder> + publicNetworkSourceBuilder_; + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return Whether the publicNetworkSource field is set. + */ + public boolean hasPublicNetworkSource() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + * + * @return The publicNetworkSource. + */ + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource getPublicNetworkSource() { + if (publicNetworkSourceBuilder_ == null) { + return publicNetworkSource_ == null + ? com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance() + : publicNetworkSource_; + } else { + return publicNetworkSourceBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public Builder setPublicNetworkSource( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource value) { + if (publicNetworkSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + publicNetworkSource_ = value; + } else { + publicNetworkSourceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public Builder setPublicNetworkSource( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder builderForValue) { + if (publicNetworkSourceBuilder_ == null) { + publicNetworkSource_ = builderForValue.build(); + } else { + publicNetworkSourceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public Builder mergePublicNetworkSource( + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource value) { + if (publicNetworkSourceBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && publicNetworkSource_ != null + && publicNetworkSource_ + != com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource + .getDefaultInstance()) { + getPublicNetworkSourceBuilder().mergeFrom(value); + } else { + publicNetworkSource_ = value; + } + } else { + publicNetworkSourceBuilder_.mergeFrom(value); + } + if (publicNetworkSource_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public Builder clearPublicNetworkSource() { + bitField0_ = (bitField0_ & ~0x00000002); + publicNetworkSource_ = null; + if (publicNetworkSourceBuilder_ != null) { + publicNetworkSourceBuilder_.dispose(); + publicNetworkSourceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder + getPublicNetworkSourceBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetPublicNetworkSourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + public com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder + getPublicNetworkSourceOrBuilder() { + if (publicNetworkSourceBuilder_ != null) { + return publicNetworkSourceBuilder_.getMessageOrBuilder(); + } else { + return publicNetworkSource_ == null + ? com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.getDefaultInstance() + : publicNetworkSource_; + } + } + + /** + * + * + *
+       * Public IPs allowed to operate or access the bucket.
+       * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter.PublicNetworkSource public_network_source = 2; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder> + internalGetPublicNetworkSourceFieldBuilder() { + if (publicNetworkSourceBuilder_ == null) { + publicNetworkSourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.PublicNetworkSourceOrBuilder>( + getPublicNetworkSource(), getParentForChildren(), isClean()); + publicNetworkSource_ = null; + } + return publicNetworkSourceBuilder_; + } + + private java.util.List + vpcNetworkSources_ = java.util.Collections.emptyList(); + + private void ensureVpcNetworkSourcesIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + vpcNetworkSources_ = + new java.util.ArrayList( + vpcNetworkSources_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder> + vpcNetworkSourcesBuilder_; + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getVpcNetworkSourcesList() { + if (vpcNetworkSourcesBuilder_ == null) { + return java.util.Collections.unmodifiableList(vpcNetworkSources_); + } else { + return vpcNetworkSourcesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getVpcNetworkSourcesCount() { + if (vpcNetworkSourcesBuilder_ == null) { + return vpcNetworkSources_.size(); + } else { + return vpcNetworkSourcesBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource getVpcNetworkSources( + int index) { + if (vpcNetworkSourcesBuilder_ == null) { + return vpcNetworkSources_.get(index); + } else { + return vpcNetworkSourcesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setVpcNetworkSources( + int index, com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource value) { + if (vpcNetworkSourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.set(index, value); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setVpcNetworkSources( + int index, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder builderForValue) { + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.set(index, builderForValue.build()); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addVpcNetworkSources( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource value) { + if (vpcNetworkSourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.add(value); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addVpcNetworkSources( + int index, com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource value) { + if (vpcNetworkSourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.add(index, value); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addVpcNetworkSources( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder builderForValue) { + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.add(builderForValue.build()); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addVpcNetworkSources( + int index, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder builderForValue) { + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.add(index, builderForValue.build()); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllVpcNetworkSources( + java.lang.Iterable + values) { + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, vpcNetworkSources_); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearVpcNetworkSources() { + if (vpcNetworkSourcesBuilder_ == null) { + vpcNetworkSources_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeVpcNetworkSources(int index) { + if (vpcNetworkSourcesBuilder_ == null) { + ensureVpcNetworkSourcesIsMutable(); + vpcNetworkSources_.remove(index); + onChanged(); + } else { + vpcNetworkSourcesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder + getVpcNetworkSourcesBuilder(int index) { + return internalGetVpcNetworkSourcesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder + getVpcNetworkSourcesOrBuilder(int index) { + if (vpcNetworkSourcesBuilder_ == null) { + return vpcNetworkSources_.get(index); + } else { + return vpcNetworkSourcesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder> + getVpcNetworkSourcesOrBuilderList() { + if (vpcNetworkSourcesBuilder_ != null) { + return vpcNetworkSourcesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(vpcNetworkSources_); + } + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder + addVpcNetworkSourcesBuilder() { + return internalGetVpcNetworkSourcesFieldBuilder() + .addBuilder( + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.getDefaultInstance()); + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder + addVpcNetworkSourcesBuilder(int index) { + return internalGetVpcNetworkSourcesFieldBuilder() + .addBuilder( + index, com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.getDefaultInstance()); + } + + /** + * + * + *
+       * Optional. The list of network sources that are allowed to access
+       * operations on the bucket or the underlying objects.
+       * 
+ * + * + * repeated .google.storage.v2.Bucket.IpFilter.VpcNetworkSource vpc_network_sources = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getVpcNetworkSourcesBuilderList() { + return internalGetVpcNetworkSourcesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder> + internalGetVpcNetworkSourcesFieldBuilder() { + if (vpcNetworkSourcesBuilder_ == null) { + vpcNetworkSourcesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSource.Builder, + com.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceOrBuilder>( + vpcNetworkSources_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + vpcNetworkSources_ = null; + } + return vpcNetworkSourcesBuilder_; + } + + private boolean allowCrossOrgVpcs_; + + /** + * + * + *
+       * Optional. Whether or not to allow VPCs from orgs different than the
+       * bucket's parent org to access the bucket. When set to true, validations
+       * on the existence of the VPCs won't be performed. If set to false, each
+       * VPC network source is checked to belong to the same org as the bucket as
+       * well as validated for existence.
+       * 
+ * + * bool allow_cross_org_vpcs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The allowCrossOrgVpcs. + */ + @java.lang.Override + public boolean getAllowCrossOrgVpcs() { + return allowCrossOrgVpcs_; + } + + /** + * + * + *
+       * Optional. Whether or not to allow VPCs from orgs different than the
+       * bucket's parent org to access the bucket. When set to true, validations
+       * on the existence of the VPCs won't be performed. If set to false, each
+       * VPC network source is checked to belong to the same org as the bucket as
+       * well as validated for existence.
+       * 
+ * + * bool allow_cross_org_vpcs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The allowCrossOrgVpcs to set. + * @return This builder for chaining. + */ + public Builder setAllowCrossOrgVpcs(boolean value) { + + allowCrossOrgVpcs_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Whether or not to allow VPCs from orgs different than the
+       * bucket's parent org to access the bucket. When set to true, validations
+       * on the existence of the VPCs won't be performed. If set to false, each
+       * VPC network source is checked to belong to the same org as the bucket as
+       * well as validated for existence.
+       * 
+ * + * bool allow_cross_org_vpcs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearAllowCrossOrgVpcs() { + bitField0_ = (bitField0_ & ~0x00000008); + allowCrossOrgVpcs_ = false; + onChanged(); + return this; + } + + private boolean allowAllServiceAgentAccess_; + + /** + * + * + *
+       * Whether or not to allow all P4SA access to the bucket. When set to true,
+       * IP filter config validation doesn't apply.
+       * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return Whether the allowAllServiceAgentAccess field is set. + */ + @java.lang.Override + public boolean hasAllowAllServiceAgentAccess() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+       * Whether or not to allow all P4SA access to the bucket. When set to true,
+       * IP filter config validation doesn't apply.
+       * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return The allowAllServiceAgentAccess. + */ + @java.lang.Override + public boolean getAllowAllServiceAgentAccess() { + return allowAllServiceAgentAccess_; + } + + /** + * + * + *
+       * Whether or not to allow all P4SA access to the bucket. When set to true,
+       * IP filter config validation doesn't apply.
+       * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @param value The allowAllServiceAgentAccess to set. + * @return This builder for chaining. + */ + public Builder setAllowAllServiceAgentAccess(boolean value) { + + allowAllServiceAgentAccess_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Whether or not to allow all P4SA access to the bucket. When set to true,
+       * IP filter config validation doesn't apply.
+       * 
+ * + * optional bool allow_all_service_agent_access = 5; + * + * @return This builder for chaining. + */ + public Builder clearAllowAllServiceAgentAccess() { + bitField0_ = (bitField0_ & ~0x00000010); + allowAllServiceAgentAccess_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.IpFilter) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.IpFilter) + private static final com.google.storage.v2.Bucket.IpFilter DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.IpFilter(); + } + + public static com.google.storage.v2.Bucket.IpFilter getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IpFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface HierarchicalNamespaceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket.HierarchicalNamespace) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Enables the hierarchical namespace feature.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + boolean getEnabled(); + } + + /** + * + * + *
+   * Configuration for a bucket's hierarchical namespace feature.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.HierarchicalNamespace} + */ + public static final class HierarchicalNamespace extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Bucket.HierarchicalNamespace) + HierarchicalNamespaceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HierarchicalNamespace"); + } + + // Use HierarchicalNamespace.newBuilder() to construct. + private HierarchicalNamespace(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HierarchicalNamespace() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_HierarchicalNamespace_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.HierarchicalNamespace.class, + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder.class); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_ = false; + + /** + * + * + *
+     * Optional. Enables the hierarchical namespace feature.
+     * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enabled_ != false) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket.HierarchicalNamespace)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket.HierarchicalNamespace other = + (com.google.storage.v2.Bucket.HierarchicalNamespace) obj; + + if (getEnabled() != other.getEnabled()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnabled()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket.HierarchicalNamespace prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Configuration for a bucket's hierarchical namespace feature.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Bucket.HierarchicalNamespace} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket.HierarchicalNamespace) + com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_HierarchicalNamespace_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.HierarchicalNamespace.class, + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.HierarchicalNamespace.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enabled_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespace getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespace build() { + com.google.storage.v2.Bucket.HierarchicalNamespace result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespace buildPartial() { + com.google.storage.v2.Bucket.HierarchicalNamespace result = + new com.google.storage.v2.Bucket.HierarchicalNamespace(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Bucket.HierarchicalNamespace result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enabled_ = enabled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket.HierarchicalNamespace) { + return mergeFrom((com.google.storage.v2.Bucket.HierarchicalNamespace) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket.HierarchicalNamespace other) { + if (other == com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance()) + return this; + if (other.getEnabled() != false) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enabled_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enabled_; + + /** + * + * + *
+       * Optional. Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enabled. + */ + @java.lang.Override + public boolean getEnabled() { + return enabled_; + } + + /** + * + * + *
+       * Optional. Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enabled to set. + * @return This builder for chaining. + */ + public Builder setEnabled(boolean value) { + + enabled_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Enables the hierarchical namespace feature.
+       * 
+ * + * bool enabled = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket.HierarchicalNamespace) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket.HierarchicalNamespace) + private static final com.google.storage.v2.Bucket.HierarchicalNamespace DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket.HierarchicalNamespace(); + } + + public static com.google.storage.v2.Bucket.HierarchicalNamespace getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HierarchicalNamespace parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespace getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Identifier. The name of the bucket.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Identifier. The name of the bucket.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BUCKET_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucketId_ = ""; + + /** + * + * + *
+   * Output only. The user-chosen part of the bucket name. The `{bucket}`
+   * portion of the `name` field. For globally unique buckets, this is equal to
+   * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+   * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bucketId. + */ + @java.lang.Override + public java.lang.String getBucketId() { + java.lang.Object ref = bucketId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucketId_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The user-chosen part of the bucket name. The `{bucket}`
+   * portion of the `name` field. For globally unique buckets, this is equal to
+   * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+   * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for bucketId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketIdBytes() { + java.lang.Object ref = bucketId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucketId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 29; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
+   * The etag of the bucket.
+   * If included in the metadata of an `UpdateBucketRequest`, the operation is
+   * only performed if the `etag` matches that of the bucket.
+   * 
+ * + * string etag = 29; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
+   * The etag of the bucket.
+   * If included in the metadata of an `UpdateBucketRequest`, the operation is
+   * only performed if the `etag` matches that of the bucket.
+   * 
+ * + * string etag = 29; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object project_ = ""; + + /** + * + * + *
+   * Immutable. The project which owns this bucket, in the format of
+   * `projects/{projectIdentifier}`.
+   * `{projectIdentifier}` can be the project ID or project number.
+   * Output values are always in the project number format.
+   * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The project. + */ + @java.lang.Override + public java.lang.String getProject() { + java.lang.Object ref = project_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + project_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The project which owns this bucket, in the format of
+   * `projects/{projectIdentifier}`.
+   * `{projectIdentifier}` can be the project ID or project number.
+   * Output values are always in the project number format.
+   * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for project. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectBytes() { + java.lang.Object ref = project_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + project_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METAGENERATION_FIELD_NUMBER = 4; + private long metageneration_ = 0L; + + /** + * + * + *
+   * Output only. The metadata generation of this bucket.
+   * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + public static final int LOCATION_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
+   * Immutable. The location of the bucket. Object data for objects in the
+   * bucket resides in physical storage within this region.  Defaults to `US`.
+   * Attempting to update this field after the bucket is created results in an
+   * error.
+   * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The location of the bucket. Object data for objects in the
+   * bucket resides in physical storage within this region.  Defaults to `US`.
+   * Attempting to update this field after the bucket is created results in an
+   * error.
+   * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_TYPE_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationType_ = ""; + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + @java.lang.Override + public java.lang.String getLocationType() { + java.lang.Object ref = locationType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationType_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationTypeBytes() { + java.lang.Object ref = locationType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STORAGE_CLASS_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object storageClass_ = ""; + + /** + * + * + *
+   * Optional. The bucket's default storage class, used whenever no storageClass
+   * is specified for a newly-created object. This defines how objects in the
+   * bucket are stored and determines the SLA and the cost of storage.
+   * If this value is not specified when the bucket is created, it defaults
+   * to `STANDARD`. For more information, see [Storage
+   * classes](https://developers.google.com/storage/docs/storage-classes).
+   * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + @java.lang.Override + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The bucket's default storage class, used whenever no storageClass
+   * is specified for a newly-created object. This defines how objects in the
+   * bucket are stored and determines the SLA and the cost of storage.
+   * If this value is not specified when the bucket is created, it defaults
+   * to `STANDARD`. For more information, see [Storage
+   * classes](https://developers.google.com/storage/docs/storage-classes).
+   * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int RPO_FIELD_NUMBER = 27; + + @SuppressWarnings("serial") + private volatile java.lang.Object rpo_ = ""; + + /** + * + * + *
+   * Optional. The recovery point objective for cross-region replication of the
+   * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+   * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+   * dual-region buckets only. If rpo is not specified when the bucket is
+   * created, it defaults to `DEFAULT`. For more information, see [Turbo
+   * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+   * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rpo. + */ + @java.lang.Override + public java.lang.String getRpo() { + java.lang.Object ref = rpo_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rpo_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The recovery point objective for cross-region replication of the
+   * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+   * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+   * dual-region buckets only. If rpo is not specified when the bucket is
+   * created, it defaults to `DEFAULT`. For more information, see [Turbo
+   * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+   * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rpo. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRpoBytes() { + java.lang.Object ref = rpo_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rpo_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ACL_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private java.util.List acl_; + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getAclList() { + return acl_; + } + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getAclOrBuilderList() { + return acl_; + } + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getAclCount() { + return acl_.size(); + } + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.BucketAccessControl getAcl(int index) { + return acl_.get(index); + } + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.BucketAccessControlOrBuilder getAclOrBuilder(int index) { + return acl_.get(index); + } + + public static final int DEFAULT_OBJECT_ACL_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private java.util.List defaultObjectAcl_; + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getDefaultObjectAclList() { + return defaultObjectAcl_; + } + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getDefaultObjectAclOrBuilderList() { + return defaultObjectAcl_; + } + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getDefaultObjectAclCount() { + return defaultObjectAcl_.size(); + } + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl getDefaultObjectAcl(int index) { + return defaultObjectAcl_.get(index); + } + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectAccessControlOrBuilder getDefaultObjectAclOrBuilder( + int index) { + return defaultObjectAcl_.get(index); + } + + public static final int LIFECYCLE_FIELD_NUMBER = 10; + private com.google.storage.v2.Bucket.Lifecycle lifecycle_; + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycle field is set. + */ + @java.lang.Override + public boolean hasLifecycle() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycle. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Lifecycle getLifecycle() { + return lifecycle_ == null + ? com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance() + : lifecycle_; + } + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.LifecycleOrBuilder getLifecycleOrBuilder() { + return lifecycle_ == null + ? com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance() + : lifecycle_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 11; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int CORS_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private java.util.List cors_; + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getCorsList() { + return cors_; + } + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getCorsOrBuilderList() { + return cors_; + } + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getCorsCount() { + return cors_.size(); + } + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Cors getCors(int index) { + return cors_.get(index); + } + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.CorsOrBuilder getCorsOrBuilder(int index) { + return cors_.get(index); + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 13; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int DEFAULT_EVENT_BASED_HOLD_FIELD_NUMBER = 14; + private boolean defaultEventBasedHold_ = false; + + /** + * + * + *
+   * Optional. The default value for event-based hold on newly created objects
+   * in this bucket.  Event-based hold is a way to retain objects indefinitely
+   * until an event occurs, signified by the hold's release. After being
+   * released, such objects are subject to bucket-level retention (if any).  One
+   * sample use case of this flag is for banks to hold loan documents for at
+   * least 3 years after loan is paid in full. Here, bucket-level retention is 3
+   * years and the event is loan being paid in full. In this example, these
+   * objects are held intact for any number of years until the event has
+   * occurred (event-based hold on the object is released) and then 3 more years
+   * after that. That means retention duration of the objects begins from the
+   * moment event-based hold transitioned from true to false.  Objects under
+   * event-based hold cannot be deleted, overwritten or archived until the hold
+   * is removed.
+   * 
+ * + * bool default_event_based_hold = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultEventBasedHold. + */ + @java.lang.Override + public boolean getDefaultEventBasedHold() { + return defaultEventBasedHold_; + } + + public static final int LABELS_FIELD_NUMBER = 15; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int WEBSITE_FIELD_NUMBER = 16; + private com.google.storage.v2.Bucket.Website website_; + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the website field is set. + */ + @java.lang.Override + public boolean hasWebsite() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The website. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Website getWebsite() { + return website_ == null ? com.google.storage.v2.Bucket.Website.getDefaultInstance() : website_; + } + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.WebsiteOrBuilder getWebsiteOrBuilder() { + return website_ == null ? com.google.storage.v2.Bucket.Website.getDefaultInstance() : website_; + } + + public static final int VERSIONING_FIELD_NUMBER = 17; + private com.google.storage.v2.Bucket.Versioning versioning_; + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the versioning field is set. + */ + @java.lang.Override + public boolean hasVersioning() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The versioning. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Versioning getVersioning() { + return versioning_ == null + ? com.google.storage.v2.Bucket.Versioning.getDefaultInstance() + : versioning_; + } + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.VersioningOrBuilder getVersioningOrBuilder() { + return versioning_ == null + ? com.google.storage.v2.Bucket.Versioning.getDefaultInstance() + : versioning_; + } + + public static final int LOGGING_FIELD_NUMBER = 18; + private com.google.storage.v2.Bucket.Logging logging_; + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the logging field is set. + */ + @java.lang.Override + public boolean hasLogging() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The logging. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Logging getLogging() { + return logging_ == null ? com.google.storage.v2.Bucket.Logging.getDefaultInstance() : logging_; + } + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.LoggingOrBuilder getLoggingOrBuilder() { + return logging_ == null ? com.google.storage.v2.Bucket.Logging.getDefaultInstance() : logging_; + } + + public static final int OWNER_FIELD_NUMBER = 19; + private com.google.storage.v2.Owner owner_; + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the owner field is set. + */ + @java.lang.Override + public boolean hasOwner() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The owner. + */ + @java.lang.Override + public com.google.storage.v2.Owner getOwner() { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + @java.lang.Override + public com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder() { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + + public static final int ENCRYPTION_FIELD_NUMBER = 20; + private com.google.storage.v2.Bucket.Encryption encryption_; + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryption field is set. + */ + @java.lang.Override + public boolean hasEncryption() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryption. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Encryption getEncryption() { + return encryption_ == null + ? com.google.storage.v2.Bucket.Encryption.getDefaultInstance() + : encryption_; + } + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.EncryptionOrBuilder getEncryptionOrBuilder() { + return encryption_ == null + ? com.google.storage.v2.Bucket.Encryption.getDefaultInstance() + : encryption_; + } + + public static final int BILLING_FIELD_NUMBER = 21; + private com.google.storage.v2.Bucket.Billing billing_; + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the billing field is set. + */ + @java.lang.Override + public boolean hasBilling() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The billing. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Billing getBilling() { + return billing_ == null ? com.google.storage.v2.Bucket.Billing.getDefaultInstance() : billing_; + } + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.BillingOrBuilder getBillingOrBuilder() { + return billing_ == null ? com.google.storage.v2.Bucket.Billing.getDefaultInstance() : billing_; + } + + public static final int RETENTION_POLICY_FIELD_NUMBER = 22; + private com.google.storage.v2.Bucket.RetentionPolicy retentionPolicy_; + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionPolicy field is set. + */ + @java.lang.Override + public boolean hasRetentionPolicy() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionPolicy. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicy getRetentionPolicy() { + return retentionPolicy_ == null + ? com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance() + : retentionPolicy_; + } + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.RetentionPolicyOrBuilder getRetentionPolicyOrBuilder() { + return retentionPolicy_ == null + ? com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance() + : retentionPolicy_; + } + + public static final int IAM_CONFIG_FIELD_NUMBER = 23; + private com.google.storage.v2.Bucket.IamConfig iamConfig_; + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the iamConfig field is set. + */ + @java.lang.Override + public boolean hasIamConfig() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The iamConfig. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfig getIamConfig() { + return iamConfig_ == null + ? com.google.storage.v2.Bucket.IamConfig.getDefaultInstance() + : iamConfig_; + } + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IamConfigOrBuilder getIamConfigOrBuilder() { + return iamConfig_ == null + ? com.google.storage.v2.Bucket.IamConfig.getDefaultInstance() + : iamConfig_; + } + + public static final int SATISFIES_PZS_FIELD_NUMBER = 25; + private boolean satisfiesPzs_ = false; + + /** + * + * + *
+   * Optional. Reserved for future use.
+   * 
+ * + * bool satisfies_pzs = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The satisfiesPzs. + */ + @java.lang.Override + public boolean getSatisfiesPzs() { + return satisfiesPzs_; + } + + public static final int CUSTOM_PLACEMENT_CONFIG_FIELD_NUMBER = 26; + private com.google.storage.v2.Bucket.CustomPlacementConfig customPlacementConfig_; + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + @java.lang.Override + public boolean hasCustomPlacementConfig() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customPlacementConfig. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfig getCustomPlacementConfig() { + return customPlacementConfig_ == null + ? com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder + getCustomPlacementConfigOrBuilder() { + return customPlacementConfig_ == null + ? com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + + public static final int AUTOCLASS_FIELD_NUMBER = 28; + private com.google.storage.v2.Bucket.Autoclass autoclass_; + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoclass field is set. + */ + @java.lang.Override + public boolean hasAutoclass() { + return ((bitField0_ & 0x00001000) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoclass. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.Autoclass getAutoclass() { + return autoclass_ == null + ? com.google.storage.v2.Bucket.Autoclass.getDefaultInstance() + : autoclass_; + } + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.AutoclassOrBuilder getAutoclassOrBuilder() { + return autoclass_ == null + ? com.google.storage.v2.Bucket.Autoclass.getDefaultInstance() + : autoclass_; + } + + public static final int HIERARCHICAL_NAMESPACE_FIELD_NUMBER = 32; + private com.google.storage.v2.Bucket.HierarchicalNamespace hierarchicalNamespace_; + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + @java.lang.Override + public boolean hasHierarchicalNamespace() { + return ((bitField0_ & 0x00002000) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The hierarchicalNamespace. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespace getHierarchicalNamespace() { + return hierarchicalNamespace_ == null + ? com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder + getHierarchicalNamespaceOrBuilder() { + return hierarchicalNamespace_ == null + ? com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + + public static final int SOFT_DELETE_POLICY_FIELD_NUMBER = 31; + private com.google.storage.v2.Bucket.SoftDeletePolicy softDeletePolicy_; + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the softDeletePolicy field is set. + */ + @java.lang.Override + public boolean hasSoftDeletePolicy() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The softDeletePolicy. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicy getSoftDeletePolicy() { + return softDeletePolicy_ == null + ? com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance() + : softDeletePolicy_; + } + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder getSoftDeletePolicyOrBuilder() { + return softDeletePolicy_ == null + ? com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance() + : softDeletePolicy_; + } + + public static final int OBJECT_RETENTION_FIELD_NUMBER = 33; + private com.google.storage.v2.Bucket.ObjectRetention objectRetention_; + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectRetention field is set. + */ + @java.lang.Override + public boolean hasObjectRetention() { + return ((bitField0_ & 0x00008000) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectRetention. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetention getObjectRetention() { + return objectRetention_ == null + ? com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance() + : objectRetention_; + } + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.ObjectRetentionOrBuilder getObjectRetentionOrBuilder() { + return objectRetention_ == null + ? com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance() + : objectRetention_; + } + + public static final int IP_FILTER_FIELD_NUMBER = 38; + private com.google.storage.v2.Bucket.IpFilter ipFilter_; + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ipFilter field is set. + */ + @java.lang.Override + public boolean hasIpFilter() { + return ((bitField0_ & 0x00010000) != 0); + } + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ipFilter. + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilter getIpFilter() { + return ipFilter_ == null + ? com.google.storage.v2.Bucket.IpFilter.getDefaultInstance() + : ipFilter_; + } + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Bucket.IpFilterOrBuilder getIpFilterOrBuilder() { + return ipFilter_ == null + ? com.google.storage.v2.Bucket.IpFilter.getDefaultInstance() + : ipFilter_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucketId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, bucketId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(project_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, project_); + } + if (metageneration_ != 0L) { + output.writeInt64(4, metageneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, location_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, locationType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, storageClass_); + } + for (int i = 0; i < acl_.size(); i++) { + output.writeMessage(8, acl_.get(i)); + } + for (int i = 0; i < defaultObjectAcl_.size(); i++) { + output.writeMessage(9, defaultObjectAcl_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(10, getLifecycle()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(11, getCreateTime()); + } + for (int i = 0; i < cors_.size(); i++) { + output.writeMessage(12, cors_.get(i)); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(13, getUpdateTime()); + } + if (defaultEventBasedHold_ != false) { + output.writeBool(14, defaultEventBasedHold_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 15); + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(16, getWebsite()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(17, getVersioning()); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(18, getLogging()); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(19, getOwner()); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeMessage(20, getEncryption()); + } + if (((bitField0_ & 0x00000100) != 0)) { + output.writeMessage(21, getBilling()); + } + if (((bitField0_ & 0x00000200) != 0)) { + output.writeMessage(22, getRetentionPolicy()); + } + if (((bitField0_ & 0x00000400) != 0)) { + output.writeMessage(23, getIamConfig()); + } + if (satisfiesPzs_ != false) { + output.writeBool(25, satisfiesPzs_); + } + if (((bitField0_ & 0x00000800) != 0)) { + output.writeMessage(26, getCustomPlacementConfig()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rpo_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 27, rpo_); + } + if (((bitField0_ & 0x00001000) != 0)) { + output.writeMessage(28, getAutoclass()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 29, etag_); + } + if (((bitField0_ & 0x00004000) != 0)) { + output.writeMessage(31, getSoftDeletePolicy()); + } + if (((bitField0_ & 0x00002000) != 0)) { + output.writeMessage(32, getHierarchicalNamespace()); + } + if (((bitField0_ & 0x00008000) != 0)) { + output.writeMessage(33, getObjectRetention()); + } + if (((bitField0_ & 0x00010000) != 0)) { + output.writeMessage(38, getIpFilter()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucketId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, bucketId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(project_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, project_); + } + if (metageneration_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, metageneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, location_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(locationType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, locationType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, storageClass_); + } + for (int i = 0; i < acl_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, acl_.get(i)); + } + for (int i = 0; i < defaultObjectAcl_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, defaultObjectAcl_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getLifecycle()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getCreateTime()); + } + for (int i = 0; i < cors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, cors_.get(i)); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getUpdateTime()); + } + if (defaultEventBasedHold_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, defaultEventBasedHold_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(15, labels__); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(16, getWebsite()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, getVersioning()); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getLogging()); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getOwner()); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getEncryption()); + } + if (((bitField0_ & 0x00000100) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(21, getBilling()); + } + if (((bitField0_ & 0x00000200) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(22, getRetentionPolicy()); + } + if (((bitField0_ & 0x00000400) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(23, getIamConfig()); + } + if (satisfiesPzs_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(25, satisfiesPzs_); + } + if (((bitField0_ & 0x00000800) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(26, getCustomPlacementConfig()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rpo_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(27, rpo_); + } + if (((bitField0_ & 0x00001000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(28, getAutoclass()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(29, etag_); + } + if (((bitField0_ & 0x00004000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(31, getSoftDeletePolicy()); + } + if (((bitField0_ & 0x00002000) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(32, getHierarchicalNamespace()); + } + if (((bitField0_ & 0x00008000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(33, getObjectRetention()); + } + if (((bitField0_ & 0x00010000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(38, getIpFilter()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Bucket)) { + return super.equals(obj); + } + com.google.storage.v2.Bucket other = (com.google.storage.v2.Bucket) obj; + + if (!getName().equals(other.getName())) return false; + if (!getBucketId().equals(other.getBucketId())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getProject().equals(other.getProject())) return false; + if (getMetageneration() != other.getMetageneration()) return false; + if (!getLocation().equals(other.getLocation())) return false; + if (!getLocationType().equals(other.getLocationType())) return false; + if (!getStorageClass().equals(other.getStorageClass())) return false; + if (!getRpo().equals(other.getRpo())) return false; + if (!getAclList().equals(other.getAclList())) return false; + if (!getDefaultObjectAclList().equals(other.getDefaultObjectAclList())) return false; + if (hasLifecycle() != other.hasLifecycle()) return false; + if (hasLifecycle()) { + if (!getLifecycle().equals(other.getLifecycle())) return false; + } + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (!getCorsList().equals(other.getCorsList())) return false; + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (getDefaultEventBasedHold() != other.getDefaultEventBasedHold()) return false; + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (hasWebsite() != other.hasWebsite()) return false; + if (hasWebsite()) { + if (!getWebsite().equals(other.getWebsite())) return false; + } + if (hasVersioning() != other.hasVersioning()) return false; + if (hasVersioning()) { + if (!getVersioning().equals(other.getVersioning())) return false; + } + if (hasLogging() != other.hasLogging()) return false; + if (hasLogging()) { + if (!getLogging().equals(other.getLogging())) return false; + } + if (hasOwner() != other.hasOwner()) return false; + if (hasOwner()) { + if (!getOwner().equals(other.getOwner())) return false; + } + if (hasEncryption() != other.hasEncryption()) return false; + if (hasEncryption()) { + if (!getEncryption().equals(other.getEncryption())) return false; + } + if (hasBilling() != other.hasBilling()) return false; + if (hasBilling()) { + if (!getBilling().equals(other.getBilling())) return false; + } + if (hasRetentionPolicy() != other.hasRetentionPolicy()) return false; + if (hasRetentionPolicy()) { + if (!getRetentionPolicy().equals(other.getRetentionPolicy())) return false; + } + if (hasIamConfig() != other.hasIamConfig()) return false; + if (hasIamConfig()) { + if (!getIamConfig().equals(other.getIamConfig())) return false; + } + if (getSatisfiesPzs() != other.getSatisfiesPzs()) return false; + if (hasCustomPlacementConfig() != other.hasCustomPlacementConfig()) return false; + if (hasCustomPlacementConfig()) { + if (!getCustomPlacementConfig().equals(other.getCustomPlacementConfig())) return false; + } + if (hasAutoclass() != other.hasAutoclass()) return false; + if (hasAutoclass()) { + if (!getAutoclass().equals(other.getAutoclass())) return false; + } + if (hasHierarchicalNamespace() != other.hasHierarchicalNamespace()) return false; + if (hasHierarchicalNamespace()) { + if (!getHierarchicalNamespace().equals(other.getHierarchicalNamespace())) return false; + } + if (hasSoftDeletePolicy() != other.hasSoftDeletePolicy()) return false; + if (hasSoftDeletePolicy()) { + if (!getSoftDeletePolicy().equals(other.getSoftDeletePolicy())) return false; + } + if (hasObjectRetention() != other.hasObjectRetention()) return false; + if (hasObjectRetention()) { + if (!getObjectRetention().equals(other.getObjectRetention())) return false; + } + if (hasIpFilter() != other.hasIpFilter()) return false; + if (hasIpFilter()) { + if (!getIpFilter().equals(other.getIpFilter())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + BUCKET_ID_FIELD_NUMBER; + hash = (53 * hash) + getBucketId().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (37 * hash) + PROJECT_FIELD_NUMBER; + hash = (53 * hash) + getProject().hashCode(); + hash = (37 * hash) + METAGENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMetageneration()); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (37 * hash) + LOCATION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getLocationType().hashCode(); + hash = (37 * hash) + STORAGE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getStorageClass().hashCode(); + hash = (37 * hash) + RPO_FIELD_NUMBER; + hash = (53 * hash) + getRpo().hashCode(); + if (getAclCount() > 0) { + hash = (37 * hash) + ACL_FIELD_NUMBER; + hash = (53 * hash) + getAclList().hashCode(); + } + if (getDefaultObjectAclCount() > 0) { + hash = (37 * hash) + DEFAULT_OBJECT_ACL_FIELD_NUMBER; + hash = (53 * hash) + getDefaultObjectAclList().hashCode(); + } + if (hasLifecycle()) { + hash = (37 * hash) + LIFECYCLE_FIELD_NUMBER; + hash = (53 * hash) + getLifecycle().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (getCorsCount() > 0) { + hash = (37 * hash) + CORS_FIELD_NUMBER; + hash = (53 * hash) + getCorsList().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (37 * hash) + DEFAULT_EVENT_BASED_HOLD_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDefaultEventBasedHold()); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasWebsite()) { + hash = (37 * hash) + WEBSITE_FIELD_NUMBER; + hash = (53 * hash) + getWebsite().hashCode(); + } + if (hasVersioning()) { + hash = (37 * hash) + VERSIONING_FIELD_NUMBER; + hash = (53 * hash) + getVersioning().hashCode(); + } + if (hasLogging()) { + hash = (37 * hash) + LOGGING_FIELD_NUMBER; + hash = (53 * hash) + getLogging().hashCode(); + } + if (hasOwner()) { + hash = (37 * hash) + OWNER_FIELD_NUMBER; + hash = (53 * hash) + getOwner().hashCode(); + } + if (hasEncryption()) { + hash = (37 * hash) + ENCRYPTION_FIELD_NUMBER; + hash = (53 * hash) + getEncryption().hashCode(); + } + if (hasBilling()) { + hash = (37 * hash) + BILLING_FIELD_NUMBER; + hash = (53 * hash) + getBilling().hashCode(); + } + if (hasRetentionPolicy()) { + hash = (37 * hash) + RETENTION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getRetentionPolicy().hashCode(); + } + if (hasIamConfig()) { + hash = (37 * hash) + IAM_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIamConfig().hashCode(); + } + hash = (37 * hash) + SATISFIES_PZS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSatisfiesPzs()); + if (hasCustomPlacementConfig()) { + hash = (37 * hash) + CUSTOM_PLACEMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCustomPlacementConfig().hashCode(); + } + if (hasAutoclass()) { + hash = (37 * hash) + AUTOCLASS_FIELD_NUMBER; + hash = (53 * hash) + getAutoclass().hashCode(); + } + if (hasHierarchicalNamespace()) { + hash = (37 * hash) + HIERARCHICAL_NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getHierarchicalNamespace().hashCode(); + } + if (hasSoftDeletePolicy()) { + hash = (37 * hash) + SOFT_DELETE_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getSoftDeletePolicy().hashCode(); + } + if (hasObjectRetention()) { + hash = (37 * hash) + OBJECT_RETENTION_FIELD_NUMBER; + hash = (53 * hash) + getObjectRetention().hashCode(); + } + if (hasIpFilter()) { + hash = (37 * hash) + IP_FILTER_FIELD_NUMBER; + hash = (53 * hash) + getIpFilter().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Bucket parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Bucket parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Bucket parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Bucket parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Bucket parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Bucket prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * A bucket.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Bucket} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Bucket) + com.google.storage.v2.BucketOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Bucket_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 15: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 15: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Bucket_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Bucket.class, com.google.storage.v2.Bucket.Builder.class); + } + + // Construct using com.google.storage.v2.Bucket.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAclFieldBuilder(); + internalGetDefaultObjectAclFieldBuilder(); + internalGetLifecycleFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetCorsFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + internalGetWebsiteFieldBuilder(); + internalGetVersioningFieldBuilder(); + internalGetLoggingFieldBuilder(); + internalGetOwnerFieldBuilder(); + internalGetEncryptionFieldBuilder(); + internalGetBillingFieldBuilder(); + internalGetRetentionPolicyFieldBuilder(); + internalGetIamConfigFieldBuilder(); + internalGetCustomPlacementConfigFieldBuilder(); + internalGetAutoclassFieldBuilder(); + internalGetHierarchicalNamespaceFieldBuilder(); + internalGetSoftDeletePolicyFieldBuilder(); + internalGetObjectRetentionFieldBuilder(); + internalGetIpFilterFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + bucketId_ = ""; + etag_ = ""; + project_ = ""; + metageneration_ = 0L; + location_ = ""; + locationType_ = ""; + storageClass_ = ""; + rpo_ = ""; + if (aclBuilder_ == null) { + acl_ = java.util.Collections.emptyList(); + } else { + acl_ = null; + aclBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + if (defaultObjectAclBuilder_ == null) { + defaultObjectAcl_ = java.util.Collections.emptyList(); + } else { + defaultObjectAcl_ = null; + defaultObjectAclBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + lifecycle_ = null; + if (lifecycleBuilder_ != null) { + lifecycleBuilder_.dispose(); + lifecycleBuilder_ = null; + } + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + if (corsBuilder_ == null) { + cors_ = java.util.Collections.emptyList(); + } else { + cors_ = null; + corsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00002000); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + defaultEventBasedHold_ = false; + internalGetMutableLabels().clear(); + website_ = null; + if (websiteBuilder_ != null) { + websiteBuilder_.dispose(); + websiteBuilder_ = null; + } + versioning_ = null; + if (versioningBuilder_ != null) { + versioningBuilder_.dispose(); + versioningBuilder_ = null; + } + logging_ = null; + if (loggingBuilder_ != null) { + loggingBuilder_.dispose(); + loggingBuilder_ = null; + } + owner_ = null; + if (ownerBuilder_ != null) { + ownerBuilder_.dispose(); + ownerBuilder_ = null; + } + encryption_ = null; + if (encryptionBuilder_ != null) { + encryptionBuilder_.dispose(); + encryptionBuilder_ = null; + } + billing_ = null; + if (billingBuilder_ != null) { + billingBuilder_.dispose(); + billingBuilder_ = null; + } + retentionPolicy_ = null; + if (retentionPolicyBuilder_ != null) { + retentionPolicyBuilder_.dispose(); + retentionPolicyBuilder_ = null; + } + iamConfig_ = null; + if (iamConfigBuilder_ != null) { + iamConfigBuilder_.dispose(); + iamConfigBuilder_ = null; + } + satisfiesPzs_ = false; + customPlacementConfig_ = null; + if (customPlacementConfigBuilder_ != null) { + customPlacementConfigBuilder_.dispose(); + customPlacementConfigBuilder_ = null; + } + autoclass_ = null; + if (autoclassBuilder_ != null) { + autoclassBuilder_.dispose(); + autoclassBuilder_ = null; + } + hierarchicalNamespace_ = null; + if (hierarchicalNamespaceBuilder_ != null) { + hierarchicalNamespaceBuilder_.dispose(); + hierarchicalNamespaceBuilder_ = null; + } + softDeletePolicy_ = null; + if (softDeletePolicyBuilder_ != null) { + softDeletePolicyBuilder_.dispose(); + softDeletePolicyBuilder_ = null; + } + objectRetention_ = null; + if (objectRetentionBuilder_ != null) { + objectRetentionBuilder_.dispose(); + objectRetentionBuilder_ = null; + } + ipFilter_ = null; + if (ipFilterBuilder_ != null) { + ipFilterBuilder_.dispose(); + ipFilterBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Bucket_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Bucket getDefaultInstanceForType() { + return com.google.storage.v2.Bucket.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Bucket build() { + com.google.storage.v2.Bucket result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Bucket buildPartial() { + com.google.storage.v2.Bucket result = new com.google.storage.v2.Bucket(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.Bucket result) { + if (aclBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0)) { + acl_ = java.util.Collections.unmodifiableList(acl_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.acl_ = acl_; + } else { + result.acl_ = aclBuilder_.build(); + } + if (defaultObjectAclBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0)) { + defaultObjectAcl_ = java.util.Collections.unmodifiableList(defaultObjectAcl_); + bitField0_ = (bitField0_ & ~0x00000400); + } + result.defaultObjectAcl_ = defaultObjectAcl_; + } else { + result.defaultObjectAcl_ = defaultObjectAclBuilder_.build(); + } + if (corsBuilder_ == null) { + if (((bitField0_ & 0x00002000) != 0)) { + cors_ = java.util.Collections.unmodifiableList(cors_); + bitField0_ = (bitField0_ & ~0x00002000); + } + result.cors_ = cors_; + } else { + result.cors_ = corsBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.Bucket result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.bucketId_ = bucketId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.project_ = project_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.metageneration_ = metageneration_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.location_ = location_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.locationType_ = locationType_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.storageClass_ = storageClass_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.rpo_ = rpo_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000800) != 0)) { + result.lifecycle_ = lifecycleBuilder_ == null ? lifecycle_ : lifecycleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00008000) != 0)) { + result.defaultEventBasedHold_ = defaultEventBasedHold_; + } + if (((from_bitField0_ & 0x00010000) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00020000) != 0)) { + result.website_ = websiteBuilder_ == null ? website_ : websiteBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00040000) != 0)) { + result.versioning_ = versioningBuilder_ == null ? versioning_ : versioningBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00080000) != 0)) { + result.logging_ = loggingBuilder_ == null ? logging_ : loggingBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00100000) != 0)) { + result.owner_ = ownerBuilder_ == null ? owner_ : ownerBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00200000) != 0)) { + result.encryption_ = encryptionBuilder_ == null ? encryption_ : encryptionBuilder_.build(); + to_bitField0_ |= 0x00000080; + } + if (((from_bitField0_ & 0x00400000) != 0)) { + result.billing_ = billingBuilder_ == null ? billing_ : billingBuilder_.build(); + to_bitField0_ |= 0x00000100; + } + if (((from_bitField0_ & 0x00800000) != 0)) { + result.retentionPolicy_ = + retentionPolicyBuilder_ == null ? retentionPolicy_ : retentionPolicyBuilder_.build(); + to_bitField0_ |= 0x00000200; + } + if (((from_bitField0_ & 0x01000000) != 0)) { + result.iamConfig_ = iamConfigBuilder_ == null ? iamConfig_ : iamConfigBuilder_.build(); + to_bitField0_ |= 0x00000400; + } + if (((from_bitField0_ & 0x02000000) != 0)) { + result.satisfiesPzs_ = satisfiesPzs_; + } + if (((from_bitField0_ & 0x04000000) != 0)) { + result.customPlacementConfig_ = + customPlacementConfigBuilder_ == null + ? customPlacementConfig_ + : customPlacementConfigBuilder_.build(); + to_bitField0_ |= 0x00000800; + } + if (((from_bitField0_ & 0x08000000) != 0)) { + result.autoclass_ = autoclassBuilder_ == null ? autoclass_ : autoclassBuilder_.build(); + to_bitField0_ |= 0x00001000; + } + if (((from_bitField0_ & 0x10000000) != 0)) { + result.hierarchicalNamespace_ = + hierarchicalNamespaceBuilder_ == null + ? hierarchicalNamespace_ + : hierarchicalNamespaceBuilder_.build(); + to_bitField0_ |= 0x00002000; + } + if (((from_bitField0_ & 0x20000000) != 0)) { + result.softDeletePolicy_ = + softDeletePolicyBuilder_ == null ? softDeletePolicy_ : softDeletePolicyBuilder_.build(); + to_bitField0_ |= 0x00004000; + } + if (((from_bitField0_ & 0x40000000) != 0)) { + result.objectRetention_ = + objectRetentionBuilder_ == null ? objectRetention_ : objectRetentionBuilder_.build(); + to_bitField0_ |= 0x00008000; + } + if (((from_bitField0_ & 0x80000000) != 0)) { + result.ipFilter_ = ipFilterBuilder_ == null ? ipFilter_ : ipFilterBuilder_.build(); + to_bitField0_ |= 0x00010000; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Bucket) { + return mergeFrom((com.google.storage.v2.Bucket) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Bucket other) { + if (other == com.google.storage.v2.Bucket.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBucketId().isEmpty()) { + bucketId_ = other.bucketId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getProject().isEmpty()) { + project_ = other.project_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.getMetageneration() != 0L) { + setMetageneration(other.getMetageneration()); + } + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (!other.getLocationType().isEmpty()) { + locationType_ = other.locationType_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (!other.getStorageClass().isEmpty()) { + storageClass_ = other.storageClass_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (!other.getRpo().isEmpty()) { + rpo_ = other.rpo_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (aclBuilder_ == null) { + if (!other.acl_.isEmpty()) { + if (acl_.isEmpty()) { + acl_ = other.acl_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureAclIsMutable(); + acl_.addAll(other.acl_); + } + onChanged(); + } + } else { + if (!other.acl_.isEmpty()) { + if (aclBuilder_.isEmpty()) { + aclBuilder_.dispose(); + aclBuilder_ = null; + acl_ = other.acl_; + bitField0_ = (bitField0_ & ~0x00000200); + aclBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetAclFieldBuilder() + : null; + } else { + aclBuilder_.addAllMessages(other.acl_); + } + } + } + if (defaultObjectAclBuilder_ == null) { + if (!other.defaultObjectAcl_.isEmpty()) { + if (defaultObjectAcl_.isEmpty()) { + defaultObjectAcl_ = other.defaultObjectAcl_; + bitField0_ = (bitField0_ & ~0x00000400); + } else { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.addAll(other.defaultObjectAcl_); + } + onChanged(); + } + } else { + if (!other.defaultObjectAcl_.isEmpty()) { + if (defaultObjectAclBuilder_.isEmpty()) { + defaultObjectAclBuilder_.dispose(); + defaultObjectAclBuilder_ = null; + defaultObjectAcl_ = other.defaultObjectAcl_; + bitField0_ = (bitField0_ & ~0x00000400); + defaultObjectAclBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetDefaultObjectAclFieldBuilder() + : null; + } else { + defaultObjectAclBuilder_.addAllMessages(other.defaultObjectAcl_); + } + } + } + if (other.hasLifecycle()) { + mergeLifecycle(other.getLifecycle()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (corsBuilder_ == null) { + if (!other.cors_.isEmpty()) { + if (cors_.isEmpty()) { + cors_ = other.cors_; + bitField0_ = (bitField0_ & ~0x00002000); + } else { + ensureCorsIsMutable(); + cors_.addAll(other.cors_); + } + onChanged(); + } + } else { + if (!other.cors_.isEmpty()) { + if (corsBuilder_.isEmpty()) { + corsBuilder_.dispose(); + corsBuilder_ = null; + cors_ = other.cors_; + bitField0_ = (bitField0_ & ~0x00002000); + corsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetCorsFieldBuilder() + : null; + } else { + corsBuilder_.addAllMessages(other.cors_); + } + } + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (other.getDefaultEventBasedHold() != false) { + setDefaultEventBasedHold(other.getDefaultEventBasedHold()); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00010000; + if (other.hasWebsite()) { + mergeWebsite(other.getWebsite()); + } + if (other.hasVersioning()) { + mergeVersioning(other.getVersioning()); + } + if (other.hasLogging()) { + mergeLogging(other.getLogging()); + } + if (other.hasOwner()) { + mergeOwner(other.getOwner()); + } + if (other.hasEncryption()) { + mergeEncryption(other.getEncryption()); + } + if (other.hasBilling()) { + mergeBilling(other.getBilling()); + } + if (other.hasRetentionPolicy()) { + mergeRetentionPolicy(other.getRetentionPolicy()); + } + if (other.hasIamConfig()) { + mergeIamConfig(other.getIamConfig()); + } + if (other.getSatisfiesPzs() != false) { + setSatisfiesPzs(other.getSatisfiesPzs()); + } + if (other.hasCustomPlacementConfig()) { + mergeCustomPlacementConfig(other.getCustomPlacementConfig()); + } + if (other.hasAutoclass()) { + mergeAutoclass(other.getAutoclass()); + } + if (other.hasHierarchicalNamespace()) { + mergeHierarchicalNamespace(other.getHierarchicalNamespace()); + } + if (other.hasSoftDeletePolicy()) { + mergeSoftDeletePolicy(other.getSoftDeletePolicy()); + } + if (other.hasObjectRetention()) { + mergeObjectRetention(other.getObjectRetention()); + } + if (other.hasIpFilter()) { + mergeIpFilter(other.getIpFilter()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + bucketId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + project_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 26 + case 32: + { + metageneration_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 32 + case 42: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 42 + case 50: + { + locationType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 50 + case 58: + { + storageClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 58 + case 66: + { + com.google.storage.v2.BucketAccessControl m = + input.readMessage( + com.google.storage.v2.BucketAccessControl.parser(), extensionRegistry); + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(m); + } else { + aclBuilder_.addMessage(m); + } + break; + } // case 66 + case 74: + { + com.google.storage.v2.ObjectAccessControl m = + input.readMessage( + com.google.storage.v2.ObjectAccessControl.parser(), extensionRegistry); + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.add(m); + } else { + defaultObjectAclBuilder_.addMessage(m); + } + break; + } // case 74 + case 82: + { + input.readMessage( + internalGetLifecycleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000800; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00001000; + break; + } // case 90 + case 98: + { + com.google.storage.v2.Bucket.Cors m = + input.readMessage( + com.google.storage.v2.Bucket.Cors.parser(), extensionRegistry); + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + cors_.add(m); + } else { + corsBuilder_.addMessage(m); + } + break; + } // case 98 + case 106: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00004000; + break; + } // case 106 + case 112: + { + defaultEventBasedHold_ = input.readBool(); + bitField0_ |= 0x00008000; + break; + } // case 112 + case 122: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00010000; + break; + } // case 122 + case 130: + { + input.readMessage(internalGetWebsiteFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00020000; + break; + } // case 130 + case 138: + { + input.readMessage( + internalGetVersioningFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00040000; + break; + } // case 138 + case 146: + { + input.readMessage(internalGetLoggingFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00080000; + break; + } // case 146 + case 154: + { + input.readMessage(internalGetOwnerFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00100000; + break; + } // case 154 + case 162: + { + input.readMessage( + internalGetEncryptionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00200000; + break; + } // case 162 + case 170: + { + input.readMessage(internalGetBillingFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00400000; + break; + } // case 170 + case 178: + { + input.readMessage( + internalGetRetentionPolicyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00800000; + break; + } // case 178 + case 186: + { + input.readMessage( + internalGetIamConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x01000000; + break; + } // case 186 + case 200: + { + satisfiesPzs_ = input.readBool(); + bitField0_ |= 0x02000000; + break; + } // case 200 + case 210: + { + input.readMessage( + internalGetCustomPlacementConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x04000000; + break; + } // case 210 + case 218: + { + rpo_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 218 + case 226: + { + input.readMessage( + internalGetAutoclassFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x08000000; + break; + } // case 226 + case 234: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 234 + case 250: + { + input.readMessage( + internalGetSoftDeletePolicyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x20000000; + break; + } // case 250 + case 258: + { + input.readMessage( + internalGetHierarchicalNamespaceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x10000000; + break; + } // case 258 + case 266: + { + input.readMessage( + internalGetObjectRetentionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x40000000; + break; + } // case 266 + case 306: + { + input.readMessage( + internalGetIpFilterFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x80000000; + break; + } // case 306 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Identifier. The name of the bucket.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of the bucket.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Identifier. The name of the bucket.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of the bucket.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Identifier. The name of the bucket.
+     * Format: `projects/{project}/buckets/{bucket}`
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object bucketId_ = ""; + + /** + * + * + *
+     * Output only. The user-chosen part of the bucket name. The `{bucket}`
+     * portion of the `name` field. For globally unique buckets, this is equal to
+     * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+     * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bucketId. + */ + public java.lang.String getBucketId() { + java.lang.Object ref = bucketId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucketId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The user-chosen part of the bucket name. The `{bucket}`
+     * portion of the `name` field. For globally unique buckets, this is equal to
+     * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+     * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for bucketId. + */ + public com.google.protobuf.ByteString getBucketIdBytes() { + java.lang.Object ref = bucketId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucketId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The user-chosen part of the bucket name. The `{bucket}`
+     * portion of the `name` field. For globally unique buckets, this is equal to
+     * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+     * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bucketId to set. + * @return This builder for chaining. + */ + public Builder setBucketId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucketId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The user-chosen part of the bucket name. The `{bucket}`
+     * portion of the `name` field. For globally unique buckets, this is equal to
+     * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+     * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearBucketId() { + bucketId_ = getDefaultInstance().getBucketId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The user-chosen part of the bucket name. The `{bucket}`
+     * portion of the `name` field. For globally unique buckets, this is equal to
+     * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+     * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for bucketId to set. + * @return This builder for chaining. + */ + public Builder setBucketIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucketId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
+     * The etag of the bucket.
+     * If included in the metadata of an `UpdateBucketRequest`, the operation is
+     * only performed if the `etag` matches that of the bucket.
+     * 
+ * + * string etag = 29; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The etag of the bucket.
+     * If included in the metadata of an `UpdateBucketRequest`, the operation is
+     * only performed if the `etag` matches that of the bucket.
+     * 
+ * + * string etag = 29; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The etag of the bucket.
+     * If included in the metadata of an `UpdateBucketRequest`, the operation is
+     * only performed if the `etag` matches that of the bucket.
+     * 
+ * + * string etag = 29; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The etag of the bucket.
+     * If included in the metadata of an `UpdateBucketRequest`, the operation is
+     * only performed if the `etag` matches that of the bucket.
+     * 
+ * + * string etag = 29; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * The etag of the bucket.
+     * If included in the metadata of an `UpdateBucketRequest`, the operation is
+     * only performed if the `etag` matches that of the bucket.
+     * 
+ * + * string etag = 29; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object project_ = ""; + + /** + * + * + *
+     * Immutable. The project which owns this bucket, in the format of
+     * `projects/{projectIdentifier}`.
+     * `{projectIdentifier}` can be the project ID or project number.
+     * Output values are always in the project number format.
+     * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The project. + */ + public java.lang.String getProject() { + java.lang.Object ref = project_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + project_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The project which owns this bucket, in the format of
+     * `projects/{projectIdentifier}`.
+     * `{projectIdentifier}` can be the project ID or project number.
+     * Output values are always in the project number format.
+     * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for project. + */ + public com.google.protobuf.ByteString getProjectBytes() { + java.lang.Object ref = project_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + project_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The project which owns this bucket, in the format of
+     * `projects/{projectIdentifier}`.
+     * `{projectIdentifier}` can be the project ID or project number.
+     * Output values are always in the project number format.
+     * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The project to set. + * @return This builder for chaining. + */ + public Builder setProject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + project_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The project which owns this bucket, in the format of
+     * `projects/{projectIdentifier}`.
+     * `{projectIdentifier}` can be the project ID or project number.
+     * Output values are always in the project number format.
+     * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearProject() { + project_ = getDefaultInstance().getProject(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The project which owns this bucket, in the format of
+     * `projects/{projectIdentifier}`.
+     * `{projectIdentifier}` can be the project ID or project number.
+     * Output values are always in the project number format.
+     * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for project to set. + * @return This builder for chaining. + */ + public Builder setProjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + project_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private long metageneration_; + + /** + * + * + *
+     * Output only. The metadata generation of this bucket.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + /** + * + * + *
+     * Output only. The metadata generation of this bucket.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The metageneration to set. + * @return This builder for chaining. + */ + public Builder setMetageneration(long value) { + + metageneration_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The metadata generation of this bucket.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearMetageneration() { + bitField0_ = (bitField0_ & ~0x00000010); + metageneration_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object location_ = ""; + + /** + * + * + *
+     * Immutable. The location of the bucket. Object data for objects in the
+     * bucket resides in physical storage within this region.  Defaults to `US`.
+     * Attempting to update this field after the bucket is created results in an
+     * error.
+     * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The location of the bucket. Object data for objects in the
+     * bucket resides in physical storage within this region.  Defaults to `US`.
+     * Attempting to update this field after the bucket is created results in an
+     * error.
+     * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The location of the bucket. Object data for objects in the
+     * bucket resides in physical storage within this region.  Defaults to `US`.
+     * Attempting to update this field after the bucket is created results in an
+     * error.
+     * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The location of the bucket. Object data for objects in the
+     * bucket resides in physical storage within this region.  Defaults to `US`.
+     * Attempting to update this field after the bucket is created results in an
+     * error.
+     * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The location of the bucket. Object data for objects in the
+     * bucket resides in physical storage within this region.  Defaults to `US`.
+     * Attempting to update this field after the bucket is created results in an
+     * error.
+     * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private java.lang.Object locationType_ = ""; + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + public java.lang.String getLocationType() { + java.lang.Object ref = locationType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + public com.google.protobuf.ByteString getLocationTypeBytes() { + java.lang.Object ref = locationType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The locationType to set. + * @return This builder for chaining. + */ + public Builder setLocationType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationType_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearLocationType() { + locationType_ = getDefaultInstance().getLocationType(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The location type of the bucket (region, dual-region,
+     * multi-region, etc).
+     * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for locationType to set. + * @return This builder for chaining. + */ + public Builder setLocationTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationType_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private java.lang.Object storageClass_ = ""; + + /** + * + * + *
+     * Optional. The bucket's default storage class, used whenever no storageClass
+     * is specified for a newly-created object. This defines how objects in the
+     * bucket are stored and determines the SLA and the cost of storage.
+     * If this value is not specified when the bucket is created, it defaults
+     * to `STANDARD`. For more information, see [Storage
+     * classes](https://developers.google.com/storage/docs/storage-classes).
+     * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The bucket's default storage class, used whenever no storageClass
+     * is specified for a newly-created object. This defines how objects in the
+     * bucket are stored and determines the SLA and the cost of storage.
+     * If this value is not specified when the bucket is created, it defaults
+     * to `STANDARD`. For more information, see [Storage
+     * classes](https://developers.google.com/storage/docs/storage-classes).
+     * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The bucket's default storage class, used whenever no storageClass
+     * is specified for a newly-created object. This defines how objects in the
+     * bucket are stored and determines the SLA and the cost of storage.
+     * If this value is not specified when the bucket is created, it defaults
+     * to `STANDARD`. For more information, see [Storage
+     * classes](https://developers.google.com/storage/docs/storage-classes).
+     * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + storageClass_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's default storage class, used whenever no storageClass
+     * is specified for a newly-created object. This defines how objects in the
+     * bucket are stored and determines the SLA and the cost of storage.
+     * If this value is not specified when the bucket is created, it defaults
+     * to `STANDARD`. For more information, see [Storage
+     * classes](https://developers.google.com/storage/docs/storage-classes).
+     * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearStorageClass() { + storageClass_ = getDefaultInstance().getStorageClass(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's default storage class, used whenever no storageClass
+     * is specified for a newly-created object. This defines how objects in the
+     * bucket are stored and determines the SLA and the cost of storage.
+     * If this value is not specified when the bucket is created, it defaults
+     * to `STANDARD`. For more information, see [Storage
+     * classes](https://developers.google.com/storage/docs/storage-classes).
+     * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + storageClass_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private java.lang.Object rpo_ = ""; + + /** + * + * + *
+     * Optional. The recovery point objective for cross-region replication of the
+     * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+     * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+     * dual-region buckets only. If rpo is not specified when the bucket is
+     * created, it defaults to `DEFAULT`. For more information, see [Turbo
+     * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+     * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rpo. + */ + public java.lang.String getRpo() { + java.lang.Object ref = rpo_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rpo_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The recovery point objective for cross-region replication of the
+     * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+     * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+     * dual-region buckets only. If rpo is not specified when the bucket is
+     * created, it defaults to `DEFAULT`. For more information, see [Turbo
+     * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+     * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rpo. + */ + public com.google.protobuf.ByteString getRpoBytes() { + java.lang.Object ref = rpo_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rpo_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The recovery point objective for cross-region replication of the
+     * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+     * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+     * dual-region buckets only. If rpo is not specified when the bucket is
+     * created, it defaults to `DEFAULT`. For more information, see [Turbo
+     * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+     * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The rpo to set. + * @return This builder for chaining. + */ + public Builder setRpo(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rpo_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The recovery point objective for cross-region replication of the
+     * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+     * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+     * dual-region buckets only. If rpo is not specified when the bucket is
+     * created, it defaults to `DEFAULT`. For more information, see [Turbo
+     * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+     * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRpo() { + rpo_ = getDefaultInstance().getRpo(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The recovery point objective for cross-region replication of the
+     * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+     * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+     * dual-region buckets only. If rpo is not specified when the bucket is
+     * created, it defaults to `DEFAULT`. For more information, see [Turbo
+     * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+     * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for rpo to set. + * @return This builder for chaining. + */ + public Builder setRpoBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rpo_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private java.util.List acl_ = + java.util.Collections.emptyList(); + + private void ensureAclIsMutable() { + if (!((bitField0_ & 0x00000200) != 0)) { + acl_ = new java.util.ArrayList(acl_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.BucketAccessControl, + com.google.storage.v2.BucketAccessControl.Builder, + com.google.storage.v2.BucketAccessControlOrBuilder> + aclBuilder_; + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getAclList() { + if (aclBuilder_ == null) { + return java.util.Collections.unmodifiableList(acl_); + } else { + return aclBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getAclCount() { + if (aclBuilder_ == null) { + return acl_.size(); + } else { + return aclBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BucketAccessControl getAcl(int index) { + if (aclBuilder_ == null) { + return acl_.get(index); + } else { + return aclBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAcl(int index, com.google.storage.v2.BucketAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.set(index, value); + onChanged(); + } else { + aclBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAcl( + int index, com.google.storage.v2.BucketAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.set(index, builderForValue.build()); + onChanged(); + } else { + aclBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(com.google.storage.v2.BucketAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.add(value); + onChanged(); + } else { + aclBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(int index, com.google.storage.v2.BucketAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.add(index, value); + onChanged(); + } else { + aclBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(com.google.storage.v2.BucketAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(builderForValue.build()); + onChanged(); + } else { + aclBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl( + int index, com.google.storage.v2.BucketAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(index, builderForValue.build()); + onChanged(); + } else { + aclBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllAcl( + java.lang.Iterable values) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, acl_); + onChanged(); + } else { + aclBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAcl() { + if (aclBuilder_ == null) { + acl_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + aclBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeAcl(int index) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.remove(index); + onChanged(); + } else { + aclBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BucketAccessControl.Builder getAclBuilder(int index) { + return internalGetAclFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BucketAccessControlOrBuilder getAclOrBuilder(int index) { + if (aclBuilder_ == null) { + return acl_.get(index); + } else { + return aclBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getAclOrBuilderList() { + if (aclBuilder_ != null) { + return aclBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(acl_); + } + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BucketAccessControl.Builder addAclBuilder() { + return internalGetAclFieldBuilder() + .addBuilder(com.google.storage.v2.BucketAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.BucketAccessControl.Builder addAclBuilder(int index) { + return internalGetAclFieldBuilder() + .addBuilder(index, com.google.storage.v2.BucketAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Access controls on the bucket.
+     * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+     * requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getAclBuilderList() { + return internalGetAclFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.BucketAccessControl, + com.google.storage.v2.BucketAccessControl.Builder, + com.google.storage.v2.BucketAccessControlOrBuilder> + internalGetAclFieldBuilder() { + if (aclBuilder_ == null) { + aclBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.BucketAccessControl, + com.google.storage.v2.BucketAccessControl.Builder, + com.google.storage.v2.BucketAccessControlOrBuilder>( + acl_, ((bitField0_ & 0x00000200) != 0), getParentForChildren(), isClean()); + acl_ = null; + } + return aclBuilder_; + } + + private java.util.List defaultObjectAcl_ = + java.util.Collections.emptyList(); + + private void ensureDefaultObjectAclIsMutable() { + if (!((bitField0_ & 0x00000400) != 0)) { + defaultObjectAcl_ = + new java.util.ArrayList(defaultObjectAcl_); + bitField0_ |= 0x00000400; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder> + defaultObjectAclBuilder_; + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getDefaultObjectAclList() { + if (defaultObjectAclBuilder_ == null) { + return java.util.Collections.unmodifiableList(defaultObjectAcl_); + } else { + return defaultObjectAclBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getDefaultObjectAclCount() { + if (defaultObjectAclBuilder_ == null) { + return defaultObjectAcl_.size(); + } else { + return defaultObjectAclBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl getDefaultObjectAcl(int index) { + if (defaultObjectAclBuilder_ == null) { + return defaultObjectAcl_.get(index); + } else { + return defaultObjectAclBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDefaultObjectAcl(int index, com.google.storage.v2.ObjectAccessControl value) { + if (defaultObjectAclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.set(index, value); + onChanged(); + } else { + defaultObjectAclBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDefaultObjectAcl( + int index, com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.set(index, builderForValue.build()); + onChanged(); + } else { + defaultObjectAclBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addDefaultObjectAcl(com.google.storage.v2.ObjectAccessControl value) { + if (defaultObjectAclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.add(value); + onChanged(); + } else { + defaultObjectAclBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addDefaultObjectAcl(int index, com.google.storage.v2.ObjectAccessControl value) { + if (defaultObjectAclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.add(index, value); + onChanged(); + } else { + defaultObjectAclBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addDefaultObjectAcl( + com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.add(builderForValue.build()); + onChanged(); + } else { + defaultObjectAclBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addDefaultObjectAcl( + int index, com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.add(index, builderForValue.build()); + onChanged(); + } else { + defaultObjectAclBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllDefaultObjectAcl( + java.lang.Iterable values) { + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, defaultObjectAcl_); + onChanged(); + } else { + defaultObjectAclBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearDefaultObjectAcl() { + if (defaultObjectAclBuilder_ == null) { + defaultObjectAcl_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + } else { + defaultObjectAclBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeDefaultObjectAcl(int index) { + if (defaultObjectAclBuilder_ == null) { + ensureDefaultObjectAclIsMutable(); + defaultObjectAcl_.remove(index); + onChanged(); + } else { + defaultObjectAclBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder getDefaultObjectAclBuilder(int index) { + return internalGetDefaultObjectAclFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControlOrBuilder getDefaultObjectAclOrBuilder( + int index) { + if (defaultObjectAclBuilder_ == null) { + return defaultObjectAcl_.get(index); + } else { + return defaultObjectAclBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getDefaultObjectAclOrBuilderList() { + if (defaultObjectAclBuilder_ != null) { + return defaultObjectAclBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(defaultObjectAcl_); + } + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder addDefaultObjectAclBuilder() { + return internalGetDefaultObjectAclFieldBuilder() + .addBuilder(com.google.storage.v2.ObjectAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder addDefaultObjectAclBuilder(int index) { + return internalGetDefaultObjectAclFieldBuilder() + .addBuilder(index, com.google.storage.v2.ObjectAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Default access controls to apply to new objects when no ACL is
+     * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getDefaultObjectAclBuilderList() { + return internalGetDefaultObjectAclFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder> + internalGetDefaultObjectAclFieldBuilder() { + if (defaultObjectAclBuilder_ == null) { + defaultObjectAclBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder>( + defaultObjectAcl_, + ((bitField0_ & 0x00000400) != 0), + getParentForChildren(), + isClean()); + defaultObjectAcl_ = null; + } + return defaultObjectAclBuilder_; + } + + private com.google.storage.v2.Bucket.Lifecycle lifecycle_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle, + com.google.storage.v2.Bucket.Lifecycle.Builder, + com.google.storage.v2.Bucket.LifecycleOrBuilder> + lifecycleBuilder_; + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycle field is set. + */ + public boolean hasLifecycle() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycle. + */ + public com.google.storage.v2.Bucket.Lifecycle getLifecycle() { + if (lifecycleBuilder_ == null) { + return lifecycle_ == null + ? com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance() + : lifecycle_; + } else { + return lifecycleBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLifecycle(com.google.storage.v2.Bucket.Lifecycle value) { + if (lifecycleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lifecycle_ = value; + } else { + lifecycleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLifecycle(com.google.storage.v2.Bucket.Lifecycle.Builder builderForValue) { + if (lifecycleBuilder_ == null) { + lifecycle_ = builderForValue.build(); + } else { + lifecycleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLifecycle(com.google.storage.v2.Bucket.Lifecycle value) { + if (lifecycleBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0) + && lifecycle_ != null + && lifecycle_ != com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance()) { + getLifecycleBuilder().mergeFrom(value); + } else { + lifecycle_ = value; + } + } else { + lifecycleBuilder_.mergeFrom(value); + } + if (lifecycle_ != null) { + bitField0_ |= 0x00000800; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLifecycle() { + bitField0_ = (bitField0_ & ~0x00000800); + lifecycle_ = null; + if (lifecycleBuilder_ != null) { + lifecycleBuilder_.dispose(); + lifecycleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Lifecycle.Builder getLifecycleBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return internalGetLifecycleFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.LifecycleOrBuilder getLifecycleOrBuilder() { + if (lifecycleBuilder_ != null) { + return lifecycleBuilder_.getMessageOrBuilder(); + } else { + return lifecycle_ == null + ? com.google.storage.v2.Bucket.Lifecycle.getDefaultInstance() + : lifecycle_; + } + } + + /** + * + * + *
+     * Optional. The bucket's lifecycle configuration. See [Lifecycle
+     * Management](https://developers.google.com/storage/docs/lifecycle) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle, + com.google.storage.v2.Bucket.Lifecycle.Builder, + com.google.storage.v2.Bucket.LifecycleOrBuilder> + internalGetLifecycleFieldBuilder() { + if (lifecycleBuilder_ == null) { + lifecycleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Lifecycle, + com.google.storage.v2.Bucket.Lifecycle.Builder, + com.google.storage.v2.Bucket.LifecycleOrBuilder>( + getLifecycle(), getParentForChildren(), isClean()); + lifecycle_ = null; + } + return lifecycleBuilder_; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00001000) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00001000) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00001000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00001000); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00001000; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private java.util.List cors_ = + java.util.Collections.emptyList(); + + private void ensureCorsIsMutable() { + if (!((bitField0_ & 0x00002000) != 0)) { + cors_ = new java.util.ArrayList(cors_); + bitField0_ |= 0x00002000; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Cors, + com.google.storage.v2.Bucket.Cors.Builder, + com.google.storage.v2.Bucket.CorsOrBuilder> + corsBuilder_; + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getCorsList() { + if (corsBuilder_ == null) { + return java.util.Collections.unmodifiableList(cors_); + } else { + return corsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getCorsCount() { + if (corsBuilder_ == null) { + return cors_.size(); + } else { + return corsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Cors getCors(int index) { + if (corsBuilder_ == null) { + return cors_.get(index); + } else { + return corsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCors(int index, com.google.storage.v2.Bucket.Cors value) { + if (corsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCorsIsMutable(); + cors_.set(index, value); + onChanged(); + } else { + corsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCors(int index, com.google.storage.v2.Bucket.Cors.Builder builderForValue) { + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + cors_.set(index, builderForValue.build()); + onChanged(); + } else { + corsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addCors(com.google.storage.v2.Bucket.Cors value) { + if (corsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCorsIsMutable(); + cors_.add(value); + onChanged(); + } else { + corsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addCors(int index, com.google.storage.v2.Bucket.Cors value) { + if (corsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCorsIsMutable(); + cors_.add(index, value); + onChanged(); + } else { + corsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addCors(com.google.storage.v2.Bucket.Cors.Builder builderForValue) { + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + cors_.add(builderForValue.build()); + onChanged(); + } else { + corsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addCors(int index, com.google.storage.v2.Bucket.Cors.Builder builderForValue) { + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + cors_.add(index, builderForValue.build()); + onChanged(); + } else { + corsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllCors( + java.lang.Iterable values) { + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, cors_); + onChanged(); + } else { + corsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCors() { + if (corsBuilder_ == null) { + cors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00002000); + onChanged(); + } else { + corsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeCors(int index) { + if (corsBuilder_ == null) { + ensureCorsIsMutable(); + cors_.remove(index); + onChanged(); + } else { + corsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Cors.Builder getCorsBuilder(int index) { + return internalGetCorsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.CorsOrBuilder getCorsOrBuilder(int index) { + if (corsBuilder_ == null) { + return cors_.get(index); + } else { + return corsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getCorsOrBuilderList() { + if (corsBuilder_ != null) { + return corsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(cors_); + } + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Cors.Builder addCorsBuilder() { + return internalGetCorsFieldBuilder() + .addBuilder(com.google.storage.v2.Bucket.Cors.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Cors.Builder addCorsBuilder(int index) { + return internalGetCorsFieldBuilder() + .addBuilder(index, com.google.storage.v2.Bucket.Cors.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+     * configuration.
+     * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getCorsBuilderList() { + return internalGetCorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Cors, + com.google.storage.v2.Bucket.Cors.Builder, + com.google.storage.v2.Bucket.CorsOrBuilder> + internalGetCorsFieldBuilder() { + if (corsBuilder_ == null) { + corsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket.Cors, + com.google.storage.v2.Bucket.Cors.Builder, + com.google.storage.v2.Bucket.CorsOrBuilder>( + cors_, ((bitField0_ & 0x00002000) != 0), getParentForChildren(), isClean()); + cors_ = null; + } + return corsBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00004000); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The modification time of the bucket.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private boolean defaultEventBasedHold_; + + /** + * + * + *
+     * Optional. The default value for event-based hold on newly created objects
+     * in this bucket.  Event-based hold is a way to retain objects indefinitely
+     * until an event occurs, signified by the hold's release. After being
+     * released, such objects are subject to bucket-level retention (if any).  One
+     * sample use case of this flag is for banks to hold loan documents for at
+     * least 3 years after loan is paid in full. Here, bucket-level retention is 3
+     * years and the event is loan being paid in full. In this example, these
+     * objects are held intact for any number of years until the event has
+     * occurred (event-based hold on the object is released) and then 3 more years
+     * after that. That means retention duration of the objects begins from the
+     * moment event-based hold transitioned from true to false.  Objects under
+     * event-based hold cannot be deleted, overwritten or archived until the hold
+     * is removed.
+     * 
+ * + * bool default_event_based_hold = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultEventBasedHold. + */ + @java.lang.Override + public boolean getDefaultEventBasedHold() { + return defaultEventBasedHold_; + } + + /** + * + * + *
+     * Optional. The default value for event-based hold on newly created objects
+     * in this bucket.  Event-based hold is a way to retain objects indefinitely
+     * until an event occurs, signified by the hold's release. After being
+     * released, such objects are subject to bucket-level retention (if any).  One
+     * sample use case of this flag is for banks to hold loan documents for at
+     * least 3 years after loan is paid in full. Here, bucket-level retention is 3
+     * years and the event is loan being paid in full. In this example, these
+     * objects are held intact for any number of years until the event has
+     * occurred (event-based hold on the object is released) and then 3 more years
+     * after that. That means retention duration of the objects begins from the
+     * moment event-based hold transitioned from true to false.  Objects under
+     * event-based hold cannot be deleted, overwritten or archived until the hold
+     * is removed.
+     * 
+ * + * bool default_event_based_hold = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The defaultEventBasedHold to set. + * @return This builder for chaining. + */ + public Builder setDefaultEventBasedHold(boolean value) { + + defaultEventBasedHold_ = value; + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The default value for event-based hold on newly created objects
+     * in this bucket.  Event-based hold is a way to retain objects indefinitely
+     * until an event occurs, signified by the hold's release. After being
+     * released, such objects are subject to bucket-level retention (if any).  One
+     * sample use case of this flag is for banks to hold loan documents for at
+     * least 3 years after loan is paid in full. Here, bucket-level retention is 3
+     * years and the event is loan being paid in full. In this example, these
+     * objects are held intact for any number of years until the event has
+     * occurred (event-based hold on the object is released) and then 3 more years
+     * after that. That means retention duration of the objects begins from the
+     * moment event-based hold transitioned from true to false.  Objects under
+     * event-based hold cannot be deleted, overwritten or archived until the hold
+     * is removed.
+     * 
+ * + * bool default_event_based_hold = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDefaultEventBasedHold() { + bitField0_ = (bitField0_ & ~0x00008000); + defaultEventBasedHold_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00010000; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00010000); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00010000; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00010000; + return this; + } + + /** + * + * + *
+     * Optional. User-provided labels, in key/value pairs.
+     * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00010000; + return this; + } + + private com.google.storage.v2.Bucket.Website website_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Website, + com.google.storage.v2.Bucket.Website.Builder, + com.google.storage.v2.Bucket.WebsiteOrBuilder> + websiteBuilder_; + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the website field is set. + */ + public boolean hasWebsite() { + return ((bitField0_ & 0x00020000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The website. + */ + public com.google.storage.v2.Bucket.Website getWebsite() { + if (websiteBuilder_ == null) { + return website_ == null + ? com.google.storage.v2.Bucket.Website.getDefaultInstance() + : website_; + } else { + return websiteBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setWebsite(com.google.storage.v2.Bucket.Website value) { + if (websiteBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + website_ = value; + } else { + websiteBuilder_.setMessage(value); + } + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setWebsite(com.google.storage.v2.Bucket.Website.Builder builderForValue) { + if (websiteBuilder_ == null) { + website_ = builderForValue.build(); + } else { + websiteBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeWebsite(com.google.storage.v2.Bucket.Website value) { + if (websiteBuilder_ == null) { + if (((bitField0_ & 0x00020000) != 0) + && website_ != null + && website_ != com.google.storage.v2.Bucket.Website.getDefaultInstance()) { + getWebsiteBuilder().mergeFrom(value); + } else { + website_ = value; + } + } else { + websiteBuilder_.mergeFrom(value); + } + if (website_ != null) { + bitField0_ |= 0x00020000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearWebsite() { + bitField0_ = (bitField0_ & ~0x00020000); + website_ = null; + if (websiteBuilder_ != null) { + websiteBuilder_.dispose(); + websiteBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Website.Builder getWebsiteBuilder() { + bitField0_ |= 0x00020000; + onChanged(); + return internalGetWebsiteFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.WebsiteOrBuilder getWebsiteOrBuilder() { + if (websiteBuilder_ != null) { + return websiteBuilder_.getMessageOrBuilder(); + } else { + return website_ == null + ? com.google.storage.v2.Bucket.Website.getDefaultInstance() + : website_; + } + } + + /** + * + * + *
+     * Optional. The bucket's website config, controlling how the service behaves
+     * when accessing bucket contents as a web site. See the [Static website
+     * examples](https://cloud.google.com/storage/docs/static-website) for more
+     * information.
+     * 
+ * + * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Website, + com.google.storage.v2.Bucket.Website.Builder, + com.google.storage.v2.Bucket.WebsiteOrBuilder> + internalGetWebsiteFieldBuilder() { + if (websiteBuilder_ == null) { + websiteBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Website, + com.google.storage.v2.Bucket.Website.Builder, + com.google.storage.v2.Bucket.WebsiteOrBuilder>( + getWebsite(), getParentForChildren(), isClean()); + website_ = null; + } + return websiteBuilder_; + } + + private com.google.storage.v2.Bucket.Versioning versioning_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Versioning, + com.google.storage.v2.Bucket.Versioning.Builder, + com.google.storage.v2.Bucket.VersioningOrBuilder> + versioningBuilder_; + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the versioning field is set. + */ + public boolean hasVersioning() { + return ((bitField0_ & 0x00040000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The versioning. + */ + public com.google.storage.v2.Bucket.Versioning getVersioning() { + if (versioningBuilder_ == null) { + return versioning_ == null + ? com.google.storage.v2.Bucket.Versioning.getDefaultInstance() + : versioning_; + } else { + return versioningBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setVersioning(com.google.storage.v2.Bucket.Versioning value) { + if (versioningBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versioning_ = value; + } else { + versioningBuilder_.setMessage(value); + } + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setVersioning(com.google.storage.v2.Bucket.Versioning.Builder builderForValue) { + if (versioningBuilder_ == null) { + versioning_ = builderForValue.build(); + } else { + versioningBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeVersioning(com.google.storage.v2.Bucket.Versioning value) { + if (versioningBuilder_ == null) { + if (((bitField0_ & 0x00040000) != 0) + && versioning_ != null + && versioning_ != com.google.storage.v2.Bucket.Versioning.getDefaultInstance()) { + getVersioningBuilder().mergeFrom(value); + } else { + versioning_ = value; + } + } else { + versioningBuilder_.mergeFrom(value); + } + if (versioning_ != null) { + bitField0_ |= 0x00040000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearVersioning() { + bitField0_ = (bitField0_ & ~0x00040000); + versioning_ = null; + if (versioningBuilder_ != null) { + versioningBuilder_.dispose(); + versioningBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Versioning.Builder getVersioningBuilder() { + bitField0_ |= 0x00040000; + onChanged(); + return internalGetVersioningFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.VersioningOrBuilder getVersioningOrBuilder() { + if (versioningBuilder_ != null) { + return versioningBuilder_.getMessageOrBuilder(); + } else { + return versioning_ == null + ? com.google.storage.v2.Bucket.Versioning.getDefaultInstance() + : versioning_; + } + } + + /** + * + * + *
+     * Optional. The bucket's versioning configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Versioning, + com.google.storage.v2.Bucket.Versioning.Builder, + com.google.storage.v2.Bucket.VersioningOrBuilder> + internalGetVersioningFieldBuilder() { + if (versioningBuilder_ == null) { + versioningBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Versioning, + com.google.storage.v2.Bucket.Versioning.Builder, + com.google.storage.v2.Bucket.VersioningOrBuilder>( + getVersioning(), getParentForChildren(), isClean()); + versioning_ = null; + } + return versioningBuilder_; + } + + private com.google.storage.v2.Bucket.Logging logging_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Logging, + com.google.storage.v2.Bucket.Logging.Builder, + com.google.storage.v2.Bucket.LoggingOrBuilder> + loggingBuilder_; + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the logging field is set. + */ + public boolean hasLogging() { + return ((bitField0_ & 0x00080000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The logging. + */ + public com.google.storage.v2.Bucket.Logging getLogging() { + if (loggingBuilder_ == null) { + return logging_ == null + ? com.google.storage.v2.Bucket.Logging.getDefaultInstance() + : logging_; + } else { + return loggingBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLogging(com.google.storage.v2.Bucket.Logging value) { + if (loggingBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + logging_ = value; + } else { + loggingBuilder_.setMessage(value); + } + bitField0_ |= 0x00080000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLogging(com.google.storage.v2.Bucket.Logging.Builder builderForValue) { + if (loggingBuilder_ == null) { + logging_ = builderForValue.build(); + } else { + loggingBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00080000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLogging(com.google.storage.v2.Bucket.Logging value) { + if (loggingBuilder_ == null) { + if (((bitField0_ & 0x00080000) != 0) + && logging_ != null + && logging_ != com.google.storage.v2.Bucket.Logging.getDefaultInstance()) { + getLoggingBuilder().mergeFrom(value); + } else { + logging_ = value; + } + } else { + loggingBuilder_.mergeFrom(value); + } + if (logging_ != null) { + bitField0_ |= 0x00080000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLogging() { + bitField0_ = (bitField0_ & ~0x00080000); + logging_ = null; + if (loggingBuilder_ != null) { + loggingBuilder_.dispose(); + loggingBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Logging.Builder getLoggingBuilder() { + bitField0_ |= 0x00080000; + onChanged(); + return internalGetLoggingFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.LoggingOrBuilder getLoggingOrBuilder() { + if (loggingBuilder_ != null) { + return loggingBuilder_.getMessageOrBuilder(); + } else { + return logging_ == null + ? com.google.storage.v2.Bucket.Logging.getDefaultInstance() + : logging_; + } + } + + /** + * + * + *
+     * Optional. The bucket's logging config, which defines the destination bucket
+     * and name prefix (if any) for the current bucket's logs.
+     * 
+ * + * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Logging, + com.google.storage.v2.Bucket.Logging.Builder, + com.google.storage.v2.Bucket.LoggingOrBuilder> + internalGetLoggingFieldBuilder() { + if (loggingBuilder_ == null) { + loggingBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Logging, + com.google.storage.v2.Bucket.Logging.Builder, + com.google.storage.v2.Bucket.LoggingOrBuilder>( + getLogging(), getParentForChildren(), isClean()); + logging_ = null; + } + return loggingBuilder_; + } + + private com.google.storage.v2.Owner owner_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder> + ownerBuilder_; + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the owner field is set. + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00100000) != 0); + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The owner. + */ + public com.google.storage.v2.Owner getOwner() { + if (ownerBuilder_ == null) { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } else { + return ownerBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOwner(com.google.storage.v2.Owner value) { + if (ownerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + owner_ = value; + } else { + ownerBuilder_.setMessage(value); + } + bitField0_ |= 0x00100000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOwner(com.google.storage.v2.Owner.Builder builderForValue) { + if (ownerBuilder_ == null) { + owner_ = builderForValue.build(); + } else { + ownerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00100000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeOwner(com.google.storage.v2.Owner value) { + if (ownerBuilder_ == null) { + if (((bitField0_ & 0x00100000) != 0) + && owner_ != null + && owner_ != com.google.storage.v2.Owner.getDefaultInstance()) { + getOwnerBuilder().mergeFrom(value); + } else { + owner_ = value; + } + } else { + ownerBuilder_.mergeFrom(value); + } + if (owner_ != null) { + bitField0_ |= 0x00100000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00100000); + owner_ = null; + if (ownerBuilder_ != null) { + ownerBuilder_.dispose(); + ownerBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.Owner.Builder getOwnerBuilder() { + bitField0_ |= 0x00100000; + onChanged(); + return internalGetOwnerFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder() { + if (ownerBuilder_ != null) { + return ownerBuilder_.getMessageOrBuilder(); + } else { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + } + + /** + * + * + *
+     * Output only. The owner of the bucket. This is always the project team's
+     * owner group.
+     * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder> + internalGetOwnerFieldBuilder() { + if (ownerBuilder_ == null) { + ownerBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder>( + getOwner(), getParentForChildren(), isClean()); + owner_ = null; + } + return ownerBuilder_; + } + + private com.google.storage.v2.Bucket.Encryption encryption_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption, + com.google.storage.v2.Bucket.Encryption.Builder, + com.google.storage.v2.Bucket.EncryptionOrBuilder> + encryptionBuilder_; + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryption field is set. + */ + public boolean hasEncryption() { + return ((bitField0_ & 0x00200000) != 0); + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryption. + */ + public com.google.storage.v2.Bucket.Encryption getEncryption() { + if (encryptionBuilder_ == null) { + return encryption_ == null + ? com.google.storage.v2.Bucket.Encryption.getDefaultInstance() + : encryption_; + } else { + return encryptionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryption(com.google.storage.v2.Bucket.Encryption value) { + if (encryptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryption_ = value; + } else { + encryptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryption(com.google.storage.v2.Bucket.Encryption.Builder builderForValue) { + if (encryptionBuilder_ == null) { + encryption_ = builderForValue.build(); + } else { + encryptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryption(com.google.storage.v2.Bucket.Encryption value) { + if (encryptionBuilder_ == null) { + if (((bitField0_ & 0x00200000) != 0) + && encryption_ != null + && encryption_ != com.google.storage.v2.Bucket.Encryption.getDefaultInstance()) { + getEncryptionBuilder().mergeFrom(value); + } else { + encryption_ = value; + } + } else { + encryptionBuilder_.mergeFrom(value); + } + if (encryption_ != null) { + bitField0_ |= 0x00200000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryption() { + bitField0_ = (bitField0_ & ~0x00200000); + encryption_ = null; + if (encryptionBuilder_ != null) { + encryptionBuilder_.dispose(); + encryptionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Encryption.Builder getEncryptionBuilder() { + bitField0_ |= 0x00200000; + onChanged(); + return internalGetEncryptionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.EncryptionOrBuilder getEncryptionOrBuilder() { + if (encryptionBuilder_ != null) { + return encryptionBuilder_.getMessageOrBuilder(); + } else { + return encryption_ == null + ? com.google.storage.v2.Bucket.Encryption.getDefaultInstance() + : encryption_; + } + } + + /** + * + * + *
+     * Optional. Encryption config for a bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption, + com.google.storage.v2.Bucket.Encryption.Builder, + com.google.storage.v2.Bucket.EncryptionOrBuilder> + internalGetEncryptionFieldBuilder() { + if (encryptionBuilder_ == null) { + encryptionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Encryption, + com.google.storage.v2.Bucket.Encryption.Builder, + com.google.storage.v2.Bucket.EncryptionOrBuilder>( + getEncryption(), getParentForChildren(), isClean()); + encryption_ = null; + } + return encryptionBuilder_; + } + + private com.google.storage.v2.Bucket.Billing billing_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Billing, + com.google.storage.v2.Bucket.Billing.Builder, + com.google.storage.v2.Bucket.BillingOrBuilder> + billingBuilder_; + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the billing field is set. + */ + public boolean hasBilling() { + return ((bitField0_ & 0x00400000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The billing. + */ + public com.google.storage.v2.Bucket.Billing getBilling() { + if (billingBuilder_ == null) { + return billing_ == null + ? com.google.storage.v2.Bucket.Billing.getDefaultInstance() + : billing_; + } else { + return billingBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setBilling(com.google.storage.v2.Bucket.Billing value) { + if (billingBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + billing_ = value; + } else { + billingBuilder_.setMessage(value); + } + bitField0_ |= 0x00400000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setBilling(com.google.storage.v2.Bucket.Billing.Builder builderForValue) { + if (billingBuilder_ == null) { + billing_ = builderForValue.build(); + } else { + billingBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00400000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeBilling(com.google.storage.v2.Bucket.Billing value) { + if (billingBuilder_ == null) { + if (((bitField0_ & 0x00400000) != 0) + && billing_ != null + && billing_ != com.google.storage.v2.Bucket.Billing.getDefaultInstance()) { + getBillingBuilder().mergeFrom(value); + } else { + billing_ = value; + } + } else { + billingBuilder_.mergeFrom(value); + } + if (billing_ != null) { + bitField0_ |= 0x00400000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearBilling() { + bitField0_ = (bitField0_ & ~0x00400000); + billing_ = null; + if (billingBuilder_ != null) { + billingBuilder_.dispose(); + billingBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Billing.Builder getBillingBuilder() { + bitField0_ |= 0x00400000; + onChanged(); + return internalGetBillingFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.BillingOrBuilder getBillingOrBuilder() { + if (billingBuilder_ != null) { + return billingBuilder_.getMessageOrBuilder(); + } else { + return billing_ == null + ? com.google.storage.v2.Bucket.Billing.getDefaultInstance() + : billing_; + } + } + + /** + * + * + *
+     * Optional. The bucket's billing configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Billing, + com.google.storage.v2.Bucket.Billing.Builder, + com.google.storage.v2.Bucket.BillingOrBuilder> + internalGetBillingFieldBuilder() { + if (billingBuilder_ == null) { + billingBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Billing, + com.google.storage.v2.Bucket.Billing.Builder, + com.google.storage.v2.Bucket.BillingOrBuilder>( + getBilling(), getParentForChildren(), isClean()); + billing_ = null; + } + return billingBuilder_; + } + + private com.google.storage.v2.Bucket.RetentionPolicy retentionPolicy_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.RetentionPolicy, + com.google.storage.v2.Bucket.RetentionPolicy.Builder, + com.google.storage.v2.Bucket.RetentionPolicyOrBuilder> + retentionPolicyBuilder_; + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionPolicy field is set. + */ + public boolean hasRetentionPolicy() { + return ((bitField0_ & 0x00800000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionPolicy. + */ + public com.google.storage.v2.Bucket.RetentionPolicy getRetentionPolicy() { + if (retentionPolicyBuilder_ == null) { + return retentionPolicy_ == null + ? com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance() + : retentionPolicy_; + } else { + return retentionPolicyBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionPolicy(com.google.storage.v2.Bucket.RetentionPolicy value) { + if (retentionPolicyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionPolicy_ = value; + } else { + retentionPolicyBuilder_.setMessage(value); + } + bitField0_ |= 0x00800000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionPolicy( + com.google.storage.v2.Bucket.RetentionPolicy.Builder builderForValue) { + if (retentionPolicyBuilder_ == null) { + retentionPolicy_ = builderForValue.build(); + } else { + retentionPolicyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00800000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetentionPolicy(com.google.storage.v2.Bucket.RetentionPolicy value) { + if (retentionPolicyBuilder_ == null) { + if (((bitField0_ & 0x00800000) != 0) + && retentionPolicy_ != null + && retentionPolicy_ + != com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance()) { + getRetentionPolicyBuilder().mergeFrom(value); + } else { + retentionPolicy_ = value; + } + } else { + retentionPolicyBuilder_.mergeFrom(value); + } + if (retentionPolicy_ != null) { + bitField0_ |= 0x00800000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetentionPolicy() { + bitField0_ = (bitField0_ & ~0x00800000); + retentionPolicy_ = null; + if (retentionPolicyBuilder_ != null) { + retentionPolicyBuilder_.dispose(); + retentionPolicyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.RetentionPolicy.Builder getRetentionPolicyBuilder() { + bitField0_ |= 0x00800000; + onChanged(); + return internalGetRetentionPolicyFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.RetentionPolicyOrBuilder getRetentionPolicyOrBuilder() { + if (retentionPolicyBuilder_ != null) { + return retentionPolicyBuilder_.getMessageOrBuilder(); + } else { + return retentionPolicy_ == null + ? com.google.storage.v2.Bucket.RetentionPolicy.getDefaultInstance() + : retentionPolicy_; + } + } + + /** + * + * + *
+     * Optional. The bucket's retention policy. The retention policy enforces a
+     * minimum retention time for all objects contained in the bucket, based on
+     * their creation time. Any attempt to overwrite or delete objects younger
+     * than the retention period results in a `PERMISSION_DENIED` error.  An
+     * unlocked retention policy can be modified or removed from the bucket via a
+     * storage.buckets.update operation. A locked retention policy cannot be
+     * removed or shortened in duration for the lifetime of the bucket.
+     * Attempting to remove or decrease period of a locked retention policy
+     * results in a `PERMISSION_DENIED` error.
+     * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.RetentionPolicy, + com.google.storage.v2.Bucket.RetentionPolicy.Builder, + com.google.storage.v2.Bucket.RetentionPolicyOrBuilder> + internalGetRetentionPolicyFieldBuilder() { + if (retentionPolicyBuilder_ == null) { + retentionPolicyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.RetentionPolicy, + com.google.storage.v2.Bucket.RetentionPolicy.Builder, + com.google.storage.v2.Bucket.RetentionPolicyOrBuilder>( + getRetentionPolicy(), getParentForChildren(), isClean()); + retentionPolicy_ = null; + } + return retentionPolicyBuilder_; + } + + private com.google.storage.v2.Bucket.IamConfig iamConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig, + com.google.storage.v2.Bucket.IamConfig.Builder, + com.google.storage.v2.Bucket.IamConfigOrBuilder> + iamConfigBuilder_; + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the iamConfig field is set. + */ + public boolean hasIamConfig() { + return ((bitField0_ & 0x01000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The iamConfig. + */ + public com.google.storage.v2.Bucket.IamConfig getIamConfig() { + if (iamConfigBuilder_ == null) { + return iamConfig_ == null + ? com.google.storage.v2.Bucket.IamConfig.getDefaultInstance() + : iamConfig_; + } else { + return iamConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIamConfig(com.google.storage.v2.Bucket.IamConfig value) { + if (iamConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + iamConfig_ = value; + } else { + iamConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x01000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIamConfig(com.google.storage.v2.Bucket.IamConfig.Builder builderForValue) { + if (iamConfigBuilder_ == null) { + iamConfig_ = builderForValue.build(); + } else { + iamConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x01000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeIamConfig(com.google.storage.v2.Bucket.IamConfig value) { + if (iamConfigBuilder_ == null) { + if (((bitField0_ & 0x01000000) != 0) + && iamConfig_ != null + && iamConfig_ != com.google.storage.v2.Bucket.IamConfig.getDefaultInstance()) { + getIamConfigBuilder().mergeFrom(value); + } else { + iamConfig_ = value; + } + } else { + iamConfigBuilder_.mergeFrom(value); + } + if (iamConfig_ != null) { + bitField0_ |= 0x01000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearIamConfig() { + bitField0_ = (bitField0_ & ~0x01000000); + iamConfig_ = null; + if (iamConfigBuilder_ != null) { + iamConfigBuilder_.dispose(); + iamConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IamConfig.Builder getIamConfigBuilder() { + bitField0_ |= 0x01000000; + onChanged(); + return internalGetIamConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IamConfigOrBuilder getIamConfigOrBuilder() { + if (iamConfigBuilder_ != null) { + return iamConfigBuilder_.getMessageOrBuilder(); + } else { + return iamConfig_ == null + ? com.google.storage.v2.Bucket.IamConfig.getDefaultInstance() + : iamConfig_; + } + } + + /** + * + * + *
+     * Optional. The bucket's IAM configuration.
+     * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig, + com.google.storage.v2.Bucket.IamConfig.Builder, + com.google.storage.v2.Bucket.IamConfigOrBuilder> + internalGetIamConfigFieldBuilder() { + if (iamConfigBuilder_ == null) { + iamConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IamConfig, + com.google.storage.v2.Bucket.IamConfig.Builder, + com.google.storage.v2.Bucket.IamConfigOrBuilder>( + getIamConfig(), getParentForChildren(), isClean()); + iamConfig_ = null; + } + return iamConfigBuilder_; + } + + private boolean satisfiesPzs_; + + /** + * + * + *
+     * Optional. Reserved for future use.
+     * 
+ * + * bool satisfies_pzs = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The satisfiesPzs. + */ + @java.lang.Override + public boolean getSatisfiesPzs() { + return satisfiesPzs_; + } + + /** + * + * + *
+     * Optional. Reserved for future use.
+     * 
+ * + * bool satisfies_pzs = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The satisfiesPzs to set. + * @return This builder for chaining. + */ + public Builder setSatisfiesPzs(boolean value) { + + satisfiesPzs_ = value; + bitField0_ |= 0x02000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Reserved for future use.
+     * 
+ * + * bool satisfies_pzs = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSatisfiesPzs() { + bitField0_ = (bitField0_ & ~0x02000000); + satisfiesPzs_ = false; + onChanged(); + return this; + } + + private com.google.storage.v2.Bucket.CustomPlacementConfig customPlacementConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.CustomPlacementConfig, + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder, + com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder> + customPlacementConfigBuilder_; + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + public boolean hasCustomPlacementConfig() { + return ((bitField0_ & 0x04000000) != 0); + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customPlacementConfig. + */ + public com.google.storage.v2.Bucket.CustomPlacementConfig getCustomPlacementConfig() { + if (customPlacementConfigBuilder_ == null) { + return customPlacementConfig_ == null + ? com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } else { + return customPlacementConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomPlacementConfig( + com.google.storage.v2.Bucket.CustomPlacementConfig value) { + if (customPlacementConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customPlacementConfig_ = value; + } else { + customPlacementConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x04000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomPlacementConfig( + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder builderForValue) { + if (customPlacementConfigBuilder_ == null) { + customPlacementConfig_ = builderForValue.build(); + } else { + customPlacementConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x04000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomPlacementConfig( + com.google.storage.v2.Bucket.CustomPlacementConfig value) { + if (customPlacementConfigBuilder_ == null) { + if (((bitField0_ & 0x04000000) != 0) + && customPlacementConfig_ != null + && customPlacementConfig_ + != com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance()) { + getCustomPlacementConfigBuilder().mergeFrom(value); + } else { + customPlacementConfig_ = value; + } + } else { + customPlacementConfigBuilder_.mergeFrom(value); + } + if (customPlacementConfig_ != null) { + bitField0_ |= 0x04000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomPlacementConfig() { + bitField0_ = (bitField0_ & ~0x04000000); + customPlacementConfig_ = null; + if (customPlacementConfigBuilder_ != null) { + customPlacementConfigBuilder_.dispose(); + customPlacementConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.CustomPlacementConfig.Builder + getCustomPlacementConfigBuilder() { + bitField0_ |= 0x04000000; + onChanged(); + return internalGetCustomPlacementConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder + getCustomPlacementConfigOrBuilder() { + if (customPlacementConfigBuilder_ != null) { + return customPlacementConfigBuilder_.getMessageOrBuilder(); + } else { + return customPlacementConfig_ == null + ? com.google.storage.v2.Bucket.CustomPlacementConfig.getDefaultInstance() + : customPlacementConfig_; + } + } + + /** + * + * + *
+     * Optional. Configuration that, if present, specifies the data placement for
+     * a [configurable
+     * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+     * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.CustomPlacementConfig, + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder, + com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder> + internalGetCustomPlacementConfigFieldBuilder() { + if (customPlacementConfigBuilder_ == null) { + customPlacementConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.CustomPlacementConfig, + com.google.storage.v2.Bucket.CustomPlacementConfig.Builder, + com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder>( + getCustomPlacementConfig(), getParentForChildren(), isClean()); + customPlacementConfig_ = null; + } + return customPlacementConfigBuilder_; + } + + private com.google.storage.v2.Bucket.Autoclass autoclass_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Autoclass, + com.google.storage.v2.Bucket.Autoclass.Builder, + com.google.storage.v2.Bucket.AutoclassOrBuilder> + autoclassBuilder_; + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoclass field is set. + */ + public boolean hasAutoclass() { + return ((bitField0_ & 0x08000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoclass. + */ + public com.google.storage.v2.Bucket.Autoclass getAutoclass() { + if (autoclassBuilder_ == null) { + return autoclass_ == null + ? com.google.storage.v2.Bucket.Autoclass.getDefaultInstance() + : autoclass_; + } else { + return autoclassBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoclass(com.google.storage.v2.Bucket.Autoclass value) { + if (autoclassBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoclass_ = value; + } else { + autoclassBuilder_.setMessage(value); + } + bitField0_ |= 0x08000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoclass(com.google.storage.v2.Bucket.Autoclass.Builder builderForValue) { + if (autoclassBuilder_ == null) { + autoclass_ = builderForValue.build(); + } else { + autoclassBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x08000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoclass(com.google.storage.v2.Bucket.Autoclass value) { + if (autoclassBuilder_ == null) { + if (((bitField0_ & 0x08000000) != 0) + && autoclass_ != null + && autoclass_ != com.google.storage.v2.Bucket.Autoclass.getDefaultInstance()) { + getAutoclassBuilder().mergeFrom(value); + } else { + autoclass_ = value; + } + } else { + autoclassBuilder_.mergeFrom(value); + } + if (autoclass_ != null) { + bitField0_ |= 0x08000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoclass() { + bitField0_ = (bitField0_ & ~0x08000000); + autoclass_ = null; + if (autoclassBuilder_ != null) { + autoclassBuilder_.dispose(); + autoclassBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.Autoclass.Builder getAutoclassBuilder() { + bitField0_ |= 0x08000000; + onChanged(); + return internalGetAutoclassFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.AutoclassOrBuilder getAutoclassOrBuilder() { + if (autoclassBuilder_ != null) { + return autoclassBuilder_.getMessageOrBuilder(); + } else { + return autoclass_ == null + ? com.google.storage.v2.Bucket.Autoclass.getDefaultInstance() + : autoclass_; + } + } + + /** + * + * + *
+     * Optional. The bucket's Autoclass configuration. If there is no
+     * configuration, the Autoclass feature is disabled and has no effect on the
+     * bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Autoclass, + com.google.storage.v2.Bucket.Autoclass.Builder, + com.google.storage.v2.Bucket.AutoclassOrBuilder> + internalGetAutoclassFieldBuilder() { + if (autoclassBuilder_ == null) { + autoclassBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.Autoclass, + com.google.storage.v2.Bucket.Autoclass.Builder, + com.google.storage.v2.Bucket.AutoclassOrBuilder>( + getAutoclass(), getParentForChildren(), isClean()); + autoclass_ = null; + } + return autoclassBuilder_; + } + + private com.google.storage.v2.Bucket.HierarchicalNamespace hierarchicalNamespace_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.HierarchicalNamespace, + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder, + com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder> + hierarchicalNamespaceBuilder_; + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + public boolean hasHierarchicalNamespace() { + return ((bitField0_ & 0x10000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The hierarchicalNamespace. + */ + public com.google.storage.v2.Bucket.HierarchicalNamespace getHierarchicalNamespace() { + if (hierarchicalNamespaceBuilder_ == null) { + return hierarchicalNamespace_ == null + ? com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } else { + return hierarchicalNamespaceBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setHierarchicalNamespace( + com.google.storage.v2.Bucket.HierarchicalNamespace value) { + if (hierarchicalNamespaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hierarchicalNamespace_ = value; + } else { + hierarchicalNamespaceBuilder_.setMessage(value); + } + bitField0_ |= 0x10000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setHierarchicalNamespace( + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder builderForValue) { + if (hierarchicalNamespaceBuilder_ == null) { + hierarchicalNamespace_ = builderForValue.build(); + } else { + hierarchicalNamespaceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x10000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeHierarchicalNamespace( + com.google.storage.v2.Bucket.HierarchicalNamespace value) { + if (hierarchicalNamespaceBuilder_ == null) { + if (((bitField0_ & 0x10000000) != 0) + && hierarchicalNamespace_ != null + && hierarchicalNamespace_ + != com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance()) { + getHierarchicalNamespaceBuilder().mergeFrom(value); + } else { + hierarchicalNamespace_ = value; + } + } else { + hierarchicalNamespaceBuilder_.mergeFrom(value); + } + if (hierarchicalNamespace_ != null) { + bitField0_ |= 0x10000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearHierarchicalNamespace() { + bitField0_ = (bitField0_ & ~0x10000000); + hierarchicalNamespace_ = null; + if (hierarchicalNamespaceBuilder_ != null) { + hierarchicalNamespaceBuilder_.dispose(); + hierarchicalNamespaceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.HierarchicalNamespace.Builder + getHierarchicalNamespaceBuilder() { + bitField0_ |= 0x10000000; + onChanged(); + return internalGetHierarchicalNamespaceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder + getHierarchicalNamespaceOrBuilder() { + if (hierarchicalNamespaceBuilder_ != null) { + return hierarchicalNamespaceBuilder_.getMessageOrBuilder(); + } else { + return hierarchicalNamespace_ == null + ? com.google.storage.v2.Bucket.HierarchicalNamespace.getDefaultInstance() + : hierarchicalNamespace_; + } + } + + /** + * + * + *
+     * Optional. The bucket's hierarchical namespace configuration. If there is no
+     * configuration, the hierarchical namespace feature is disabled and has
+     * no effect on the bucket.
+     * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.HierarchicalNamespace, + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder, + com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder> + internalGetHierarchicalNamespaceFieldBuilder() { + if (hierarchicalNamespaceBuilder_ == null) { + hierarchicalNamespaceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.HierarchicalNamespace, + com.google.storage.v2.Bucket.HierarchicalNamespace.Builder, + com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder>( + getHierarchicalNamespace(), getParentForChildren(), isClean()); + hierarchicalNamespace_ = null; + } + return hierarchicalNamespaceBuilder_; + } + + private com.google.storage.v2.Bucket.SoftDeletePolicy softDeletePolicy_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.SoftDeletePolicy, + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder, + com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder> + softDeletePolicyBuilder_; + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the softDeletePolicy field is set. + */ + public boolean hasSoftDeletePolicy() { + return ((bitField0_ & 0x20000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The softDeletePolicy. + */ + public com.google.storage.v2.Bucket.SoftDeletePolicy getSoftDeletePolicy() { + if (softDeletePolicyBuilder_ == null) { + return softDeletePolicy_ == null + ? com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance() + : softDeletePolicy_; + } else { + return softDeletePolicyBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSoftDeletePolicy(com.google.storage.v2.Bucket.SoftDeletePolicy value) { + if (softDeletePolicyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + softDeletePolicy_ = value; + } else { + softDeletePolicyBuilder_.setMessage(value); + } + bitField0_ |= 0x20000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSoftDeletePolicy( + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder builderForValue) { + if (softDeletePolicyBuilder_ == null) { + softDeletePolicy_ = builderForValue.build(); + } else { + softDeletePolicyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x20000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSoftDeletePolicy(com.google.storage.v2.Bucket.SoftDeletePolicy value) { + if (softDeletePolicyBuilder_ == null) { + if (((bitField0_ & 0x20000000) != 0) + && softDeletePolicy_ != null + && softDeletePolicy_ + != com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance()) { + getSoftDeletePolicyBuilder().mergeFrom(value); + } else { + softDeletePolicy_ = value; + } + } else { + softDeletePolicyBuilder_.mergeFrom(value); + } + if (softDeletePolicy_ != null) { + bitField0_ |= 0x20000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSoftDeletePolicy() { + bitField0_ = (bitField0_ & ~0x20000000); + softDeletePolicy_ = null; + if (softDeletePolicyBuilder_ != null) { + softDeletePolicyBuilder_.dispose(); + softDeletePolicyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.SoftDeletePolicy.Builder getSoftDeletePolicyBuilder() { + bitField0_ |= 0x20000000; + onChanged(); + return internalGetSoftDeletePolicyFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder getSoftDeletePolicyOrBuilder() { + if (softDeletePolicyBuilder_ != null) { + return softDeletePolicyBuilder_.getMessageOrBuilder(); + } else { + return softDeletePolicy_ == null + ? com.google.storage.v2.Bucket.SoftDeletePolicy.getDefaultInstance() + : softDeletePolicy_; + } + } + + /** + * + * + *
+     * Optional. The bucket's soft delete policy. The soft delete policy prevents
+     * soft-deleted objects from being permanently deleted.
+     * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.SoftDeletePolicy, + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder, + com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder> + internalGetSoftDeletePolicyFieldBuilder() { + if (softDeletePolicyBuilder_ == null) { + softDeletePolicyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.SoftDeletePolicy, + com.google.storage.v2.Bucket.SoftDeletePolicy.Builder, + com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder>( + getSoftDeletePolicy(), getParentForChildren(), isClean()); + softDeletePolicy_ = null; + } + return softDeletePolicyBuilder_; + } + + private com.google.storage.v2.Bucket.ObjectRetention objectRetention_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.ObjectRetention, + com.google.storage.v2.Bucket.ObjectRetention.Builder, + com.google.storage.v2.Bucket.ObjectRetentionOrBuilder> + objectRetentionBuilder_; + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectRetention field is set. + */ + public boolean hasObjectRetention() { + return ((bitField0_ & 0x40000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectRetention. + */ + public com.google.storage.v2.Bucket.ObjectRetention getObjectRetention() { + if (objectRetentionBuilder_ == null) { + return objectRetention_ == null + ? com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance() + : objectRetention_; + } else { + return objectRetentionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectRetention(com.google.storage.v2.Bucket.ObjectRetention value) { + if (objectRetentionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectRetention_ = value; + } else { + objectRetentionBuilder_.setMessage(value); + } + bitField0_ |= 0x40000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectRetention( + com.google.storage.v2.Bucket.ObjectRetention.Builder builderForValue) { + if (objectRetentionBuilder_ == null) { + objectRetention_ = builderForValue.build(); + } else { + objectRetentionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x40000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectRetention(com.google.storage.v2.Bucket.ObjectRetention value) { + if (objectRetentionBuilder_ == null) { + if (((bitField0_ & 0x40000000) != 0) + && objectRetention_ != null + && objectRetention_ + != com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance()) { + getObjectRetentionBuilder().mergeFrom(value); + } else { + objectRetention_ = value; + } + } else { + objectRetentionBuilder_.mergeFrom(value); + } + if (objectRetention_ != null) { + bitField0_ |= 0x40000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectRetention() { + bitField0_ = (bitField0_ & ~0x40000000); + objectRetention_ = null; + if (objectRetentionBuilder_ != null) { + objectRetentionBuilder_.dispose(); + objectRetentionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.ObjectRetention.Builder getObjectRetentionBuilder() { + bitField0_ |= 0x40000000; + onChanged(); + return internalGetObjectRetentionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.ObjectRetentionOrBuilder getObjectRetentionOrBuilder() { + if (objectRetentionBuilder_ != null) { + return objectRetentionBuilder_.getMessageOrBuilder(); + } else { + return objectRetention_ == null + ? com.google.storage.v2.Bucket.ObjectRetention.getDefaultInstance() + : objectRetention_; + } + } + + /** + * + * + *
+     * Optional. The bucket's object retention configuration. Must be enabled
+     * before objects in the bucket might have retention configured.
+     * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.ObjectRetention, + com.google.storage.v2.Bucket.ObjectRetention.Builder, + com.google.storage.v2.Bucket.ObjectRetentionOrBuilder> + internalGetObjectRetentionFieldBuilder() { + if (objectRetentionBuilder_ == null) { + objectRetentionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.ObjectRetention, + com.google.storage.v2.Bucket.ObjectRetention.Builder, + com.google.storage.v2.Bucket.ObjectRetentionOrBuilder>( + getObjectRetention(), getParentForChildren(), isClean()); + objectRetention_ = null; + } + return objectRetentionBuilder_; + } + + private com.google.storage.v2.Bucket.IpFilter ipFilter_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter, + com.google.storage.v2.Bucket.IpFilter.Builder, + com.google.storage.v2.Bucket.IpFilterOrBuilder> + ipFilterBuilder_; + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ipFilter field is set. + */ + public boolean hasIpFilter() { + return ((bitField0_ & 0x80000000) != 0); + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ipFilter. + */ + public com.google.storage.v2.Bucket.IpFilter getIpFilter() { + if (ipFilterBuilder_ == null) { + return ipFilter_ == null + ? com.google.storage.v2.Bucket.IpFilter.getDefaultInstance() + : ipFilter_; + } else { + return ipFilterBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIpFilter(com.google.storage.v2.Bucket.IpFilter value) { + if (ipFilterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ipFilter_ = value; + } else { + ipFilterBuilder_.setMessage(value); + } + bitField0_ |= 0x80000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIpFilter(com.google.storage.v2.Bucket.IpFilter.Builder builderForValue) { + if (ipFilterBuilder_ == null) { + ipFilter_ = builderForValue.build(); + } else { + ipFilterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x80000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeIpFilter(com.google.storage.v2.Bucket.IpFilter value) { + if (ipFilterBuilder_ == null) { + if (((bitField0_ & 0x80000000) != 0) + && ipFilter_ != null + && ipFilter_ != com.google.storage.v2.Bucket.IpFilter.getDefaultInstance()) { + getIpFilterBuilder().mergeFrom(value); + } else { + ipFilter_ = value; + } + } else { + ipFilterBuilder_.mergeFrom(value); + } + if (ipFilter_ != null) { + bitField0_ |= 0x80000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearIpFilter() { + bitField0_ = (bitField0_ & ~0x80000000); + ipFilter_ = null; + if (ipFilterBuilder_ != null) { + ipFilterBuilder_.dispose(); + ipFilterBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilter.Builder getIpFilterBuilder() { + bitField0_ |= 0x80000000; + onChanged(); + return internalGetIpFilterFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Bucket.IpFilterOrBuilder getIpFilterOrBuilder() { + if (ipFilterBuilder_ != null) { + return ipFilterBuilder_.getMessageOrBuilder(); + } else { + return ipFilter_ == null + ? com.google.storage.v2.Bucket.IpFilter.getDefaultInstance() + : ipFilter_; + } + } + + /** + * + * + *
+     * Optional. The bucket's IP filter configuration.
+     * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter, + com.google.storage.v2.Bucket.IpFilter.Builder, + com.google.storage.v2.Bucket.IpFilterOrBuilder> + internalGetIpFilterFieldBuilder() { + if (ipFilterBuilder_ == null) { + ipFilterBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket.IpFilter, + com.google.storage.v2.Bucket.IpFilter.Builder, + com.google.storage.v2.Bucket.IpFilterOrBuilder>( + getIpFilter(), getParentForChildren(), isClean()); + ipFilter_ = null; + } + return ipFilterBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Bucket) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Bucket) + private static final com.google.storage.v2.Bucket DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Bucket(); + } + + public static com.google.storage.v2.Bucket getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Bucket parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Bucket getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControl.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControl.java new file mode 100644 index 000000000000..20ba8322342f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControl.java @@ -0,0 +1,2382 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * An access-control entry.
+ * 
+ * + * Protobuf type {@code google.storage.v2.BucketAccessControl} + */ +@com.google.protobuf.Generated +public final class BucketAccessControl extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.BucketAccessControl) + BucketAccessControlOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BucketAccessControl"); + } + + // Use BucketAccessControl.newBuilder() to construct. + private BucketAccessControl(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BucketAccessControl() { + role_ = ""; + id_ = ""; + entity_ = ""; + entityAlt_ = ""; + entityId_ = ""; + etag_ = ""; + email_ = ""; + domain_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BucketAccessControl_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BucketAccessControl_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BucketAccessControl.class, + com.google.storage.v2.BucketAccessControl.Builder.class); + } + + private int bitField0_; + public static final int ROLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object role_ = ""; + + /** + * + * + *
+   * Optional. The access permission for the entity.
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + @java.lang.Override + public java.lang.String getRole() { + java.lang.Object ref = role_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + role_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The access permission for the entity.
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoleBytes() { + java.lang.Object ref = role_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + role_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object id_ = ""; + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + @java.lang.Override + public java.lang.String getId() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object entity_ = ""; + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned on response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned on response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_ALT_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityAlt_ = ""; + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + @java.lang.Override + public java.lang.String getEntityAlt() { + java.lang.Object ref = entityAlt_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityAlt_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityAltBytes() { + java.lang.Object ref = entityAlt_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityAlt_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityId_ = ""; + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + @java.lang.Override + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
+   * Optional. The `etag` of the `BucketAccessControl`.
+   * If included in the metadata of an update or delete request message, the
+   * operation operation is only performed if the etag matches that of the
+   * bucket's `BucketAccessControl`.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The `etag` of the `BucketAccessControl`.
+   * If included in the metadata of an update or delete request message, the
+   * operation operation is only performed if the etag matches that of the
+   * bucket's `BucketAccessControl`.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EMAIL_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object email_ = ""; + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + @java.lang.Override + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEmailBytes() { + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DOMAIN_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object domain_ = ""; + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + @java.lang.Override + public java.lang.String getDomain() { + java.lang.Object ref = domain_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + domain_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDomainBytes() { + java.lang.Object ref = domain_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + domain_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_TEAM_FIELD_NUMBER = 7; + private com.google.storage.v2.ProjectTeam projectTeam_; + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + @java.lang.Override + public boolean hasProjectTeam() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + @java.lang.Override + public com.google.storage.v2.ProjectTeam getProjectTeam() { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder() { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(role_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, role_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(id_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, id_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, entityId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(email_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, email_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(domain_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, domain_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(7, getProjectTeam()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, etag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityAlt_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, entityAlt_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(role_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, role_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(id_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, id_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, entityId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(email_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, email_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(domain_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, domain_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getProjectTeam()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, etag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityAlt_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, entityAlt_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.BucketAccessControl)) { + return super.equals(obj); + } + com.google.storage.v2.BucketAccessControl other = + (com.google.storage.v2.BucketAccessControl) obj; + + if (!getRole().equals(other.getRole())) return false; + if (!getId().equals(other.getId())) return false; + if (!getEntity().equals(other.getEntity())) return false; + if (!getEntityAlt().equals(other.getEntityAlt())) return false; + if (!getEntityId().equals(other.getEntityId())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getEmail().equals(other.getEmail())) return false; + if (!getDomain().equals(other.getDomain())) return false; + if (hasProjectTeam() != other.hasProjectTeam()) return false; + if (hasProjectTeam()) { + if (!getProjectTeam().equals(other.getProjectTeam())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROLE_FIELD_NUMBER; + hash = (53 * hash) + getRole().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ENTITY_ALT_FIELD_NUMBER; + hash = (53 * hash) + getEntityAlt().hashCode(); + hash = (37 * hash) + ENTITY_ID_FIELD_NUMBER; + hash = (53 * hash) + getEntityId().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (37 * hash) + EMAIL_FIELD_NUMBER; + hash = (53 * hash) + getEmail().hashCode(); + hash = (37 * hash) + DOMAIN_FIELD_NUMBER; + hash = (53 * hash) + getDomain().hashCode(); + if (hasProjectTeam()) { + hash = (37 * hash) + PROJECT_TEAM_FIELD_NUMBER; + hash = (53 * hash) + getProjectTeam().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.BucketAccessControl parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BucketAccessControl parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BucketAccessControl parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.BucketAccessControl parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.BucketAccessControl prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * An access-control entry.
+   * 
+ * + * Protobuf type {@code google.storage.v2.BucketAccessControl} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.BucketAccessControl) + com.google.storage.v2.BucketAccessControlOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BucketAccessControl_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BucketAccessControl_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.BucketAccessControl.class, + com.google.storage.v2.BucketAccessControl.Builder.class); + } + + // Construct using com.google.storage.v2.BucketAccessControl.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProjectTeamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + role_ = ""; + id_ = ""; + entity_ = ""; + entityAlt_ = ""; + entityId_ = ""; + etag_ = ""; + email_ = ""; + domain_ = ""; + projectTeam_ = null; + if (projectTeamBuilder_ != null) { + projectTeamBuilder_.dispose(); + projectTeamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_BucketAccessControl_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.BucketAccessControl getDefaultInstanceForType() { + return com.google.storage.v2.BucketAccessControl.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.BucketAccessControl build() { + com.google.storage.v2.BucketAccessControl result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.BucketAccessControl buildPartial() { + com.google.storage.v2.BucketAccessControl result = + new com.google.storage.v2.BucketAccessControl(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.BucketAccessControl result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.role_ = role_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.id_ = id_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.entity_ = entity_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.entityAlt_ = entityAlt_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.entityId_ = entityId_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.email_ = email_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.domain_ = domain_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000100) != 0)) { + result.projectTeam_ = + projectTeamBuilder_ == null ? projectTeam_ : projectTeamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.BucketAccessControl) { + return mergeFrom((com.google.storage.v2.BucketAccessControl) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.BucketAccessControl other) { + if (other == com.google.storage.v2.BucketAccessControl.getDefaultInstance()) return this; + if (!other.getRole().isEmpty()) { + role_ = other.role_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getId().isEmpty()) { + id_ = other.id_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getEntityAlt().isEmpty()) { + entityAlt_ = other.entityAlt_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getEntityId().isEmpty()) { + entityId_ = other.entityId_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (!other.getEmail().isEmpty()) { + email_ = other.email_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (!other.getDomain().isEmpty()) { + domain_ = other.domain_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (other.hasProjectTeam()) { + mergeProjectTeam(other.getProjectTeam()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + role_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + id_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + entity_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + entityId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 42: + { + email_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 42 + case 50: + { + domain_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetProjectTeamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 58 + case 66: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 66 + case 74: + { + entityAlt_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object role_ = ""; + + /** + * + * + *
+     * Optional. The access permission for the entity.
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + public java.lang.String getRole() { + java.lang.Object ref = role_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + role_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The access permission for the entity.
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + public com.google.protobuf.ByteString getRoleBytes() { + java.lang.Object ref = role_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + role_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The access permission for the entity.
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The role to set. + * @return This builder for chaining. + */ + public Builder setRole(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + role_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The access permission for the entity.
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRole() { + role_ = getDefaultInstance().getRole(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The access permission for the entity.
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for role to set. + * @return This builder for chaining. + */ + public Builder setRoleBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + role_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object id_ = ""; + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + public com.google.protobuf.ByteString getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearId() { + id_ = getDefaultInstance().getId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for id to set. + * @return This builder for chaining. + */ + public Builder setIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + id_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object entity_ = ""; + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned on response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned on response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned on response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned on response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + entity_ = getDefaultInstance().getEntity(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned on response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entity_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object entityAlt_ = ""; + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + public java.lang.String getEntityAlt() { + java.lang.Object ref = entityAlt_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityAlt_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + public com.google.protobuf.ByteString getEntityAltBytes() { + java.lang.Object ref = entityAlt_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityAlt_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The entityAlt to set. + * @return This builder for chaining. + */ + public Builder setEntityAlt(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityAlt_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearEntityAlt() { + entityAlt_ = getDefaultInstance().getEntityAlt(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for entityAlt to set. + * @return This builder for chaining. + */ + public Builder setEntityAltBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityAlt_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object entityId_ = ""; + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntityId() { + entityId_ = getDefaultInstance().getEntityId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
+     * Optional. The `etag` of the `BucketAccessControl`.
+     * If included in the metadata of an update or delete request message, the
+     * operation operation is only performed if the etag matches that of the
+     * bucket's `BucketAccessControl`.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The `etag` of the `BucketAccessControl`.
+     * If included in the metadata of an update or delete request message, the
+     * operation operation is only performed if the etag matches that of the
+     * bucket's `BucketAccessControl`.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The `etag` of the `BucketAccessControl`.
+     * If included in the metadata of an update or delete request message, the
+     * operation operation is only performed if the etag matches that of the
+     * bucket's `BucketAccessControl`.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The `etag` of the `BucketAccessControl`.
+     * If included in the metadata of an update or delete request message, the
+     * operation operation is only performed if the etag matches that of the
+     * bucket's `BucketAccessControl`.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The `etag` of the `BucketAccessControl`.
+     * If included in the metadata of an update or delete request message, the
+     * operation operation is only performed if the etag matches that of the
+     * bucket's `BucketAccessControl`.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private java.lang.Object email_ = ""; + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + public com.google.protobuf.ByteString getEmailBytes() { + java.lang.Object ref = email_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The email to set. + * @return This builder for chaining. + */ + public Builder setEmail(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + email_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEmail() { + email_ = getDefaultInstance().getEmail(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for email to set. + * @return This builder for chaining. + */ + public Builder setEmailBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + email_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private java.lang.Object domain_ = ""; + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + public java.lang.String getDomain() { + java.lang.Object ref = domain_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + domain_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + public com.google.protobuf.ByteString getDomainBytes() { + java.lang.Object ref = domain_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + domain_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The domain to set. + * @return This builder for chaining. + */ + public Builder setDomain(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + domain_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDomain() { + domain_ = getDefaultInstance().getDomain(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for domain to set. + * @return This builder for chaining. + */ + public Builder setDomainBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + domain_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private com.google.storage.v2.ProjectTeam projectTeam_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder> + projectTeamBuilder_; + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + public boolean hasProjectTeam() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + public com.google.storage.v2.ProjectTeam getProjectTeam() { + if (projectTeamBuilder_ == null) { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } else { + return projectTeamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setProjectTeam(com.google.storage.v2.ProjectTeam value) { + if (projectTeamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + projectTeam_ = value; + } else { + projectTeamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setProjectTeam(com.google.storage.v2.ProjectTeam.Builder builderForValue) { + if (projectTeamBuilder_ == null) { + projectTeam_ = builderForValue.build(); + } else { + projectTeamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeProjectTeam(com.google.storage.v2.ProjectTeam value) { + if (projectTeamBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && projectTeam_ != null + && projectTeam_ != com.google.storage.v2.ProjectTeam.getDefaultInstance()) { + getProjectTeamBuilder().mergeFrom(value); + } else { + projectTeam_ = value; + } + } else { + projectTeamBuilder_.mergeFrom(value); + } + if (projectTeam_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearProjectTeam() { + bitField0_ = (bitField0_ & ~0x00000100); + projectTeam_ = null; + if (projectTeamBuilder_ != null) { + projectTeamBuilder_.dispose(); + projectTeamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ProjectTeam.Builder getProjectTeamBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetProjectTeamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder() { + if (projectTeamBuilder_ != null) { + return projectTeamBuilder_.getMessageOrBuilder(); + } else { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder> + internalGetProjectTeamFieldBuilder() { + if (projectTeamBuilder_ == null) { + projectTeamBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder>( + getProjectTeam(), getParentForChildren(), isClean()); + projectTeam_ = null; + } + return projectTeamBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.BucketAccessControl) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.BucketAccessControl) + private static final com.google.storage.v2.BucketAccessControl DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.BucketAccessControl(); + } + + public static com.google.storage.v2.BucketAccessControl getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BucketAccessControl parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.BucketAccessControl getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControlOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControlOrBuilder.java new file mode 100644 index 000000000000..e2f87d949d6a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketAccessControlOrBuilder.java @@ -0,0 +1,321 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BucketAccessControlOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.BucketAccessControl) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The access permission for the entity.
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + java.lang.String getRole(); + + /** + * + * + *
+   * Optional. The access permission for the entity.
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + com.google.protobuf.ByteString getRoleBytes(); + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + java.lang.String getId(); + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + com.google.protobuf.ByteString getIdBytes(); + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned on response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + java.lang.String getEntity(); + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned on response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + java.lang.String getEntityAlt(); + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + com.google.protobuf.ByteString getEntityAltBytes(); + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + java.lang.String getEntityId(); + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + com.google.protobuf.ByteString getEntityIdBytes(); + + /** + * + * + *
+   * Optional. The `etag` of the `BucketAccessControl`.
+   * If included in the metadata of an update or delete request message, the
+   * operation operation is only performed if the etag matches that of the
+   * bucket's `BucketAccessControl`.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
+   * Optional. The `etag` of the `BucketAccessControl`.
+   * If included in the metadata of an update or delete request message, the
+   * operation operation is only performed if the etag matches that of the
+   * bucket's `BucketAccessControl`.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + java.lang.String getEmail(); + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + com.google.protobuf.ByteString getEmailBytes(); + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + java.lang.String getDomain(); + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + com.google.protobuf.ByteString getDomainBytes(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + boolean hasProjectTeam(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + com.google.storage.v2.ProjectTeam getProjectTeam(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketName.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketName.java new file mode 100644 index 000000000000..67619b665fdb --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketName.java @@ -0,0 +1,191 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BucketName implements ResourceName { + private static final PathTemplate PROJECT_BUCKET = + PathTemplate.createWithoutUrlEncoding("projects/{project}/buckets/{bucket}"); + private volatile Map fieldValuesMap; + private final String project; + private final String bucket; + + @Deprecated + protected BucketName() { + project = null; + bucket = null; + } + + private BucketName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + bucket = Preconditions.checkNotNull(builder.getBucket()); + } + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BucketName of(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build(); + } + + public static String format(String project, String bucket) { + return newBuilder().setProject(project).setBucket(bucket).build().toString(); + } + + public static BucketName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_BUCKET.validatedMatch( + formattedString, "BucketName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("bucket")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BucketName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_BUCKET.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (bucket != null) { + fieldMapBuilder.put("bucket", bucket); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_BUCKET.instantiate("project", project, "bucket", bucket); + } + + @Override + public boolean equals(java.lang.Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + BucketName that = ((BucketName) o); + return Objects.equals(this.project, that.project) && Objects.equals(this.bucket, that.bucket); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(bucket); + return h; + } + + /** Builder for projects/{project}/buckets/{bucket}. */ + public static class Builder { + private String project; + private String bucket; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getBucket() { + return bucket; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setBucket(String bucket) { + this.bucket = bucket; + return this; + } + + private Builder(BucketName bucketName) { + this.project = bucketName.project; + this.bucket = bucketName.bucket; + } + + public BucketName build() { + return new BucketName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketOrBuilder.java new file mode 100644 index 000000000000..8031dfb75a66 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/BucketOrBuilder.java @@ -0,0 +1,1399 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface BucketOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Bucket) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Identifier. The name of the bucket.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Identifier. The name of the bucket.
+   * Format: `projects/{project}/buckets/{bucket}`
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. The user-chosen part of the bucket name. The `{bucket}`
+   * portion of the `name` field. For globally unique buckets, this is equal to
+   * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+   * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bucketId. + */ + java.lang.String getBucketId(); + + /** + * + * + *
+   * Output only. The user-chosen part of the bucket name. The `{bucket}`
+   * portion of the `name` field. For globally unique buckets, this is equal to
+   * the `bucket name` of other Cloud Storage APIs. Example: `pub`.
+   * 
+ * + * string bucket_id = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for bucketId. + */ + com.google.protobuf.ByteString getBucketIdBytes(); + + /** + * + * + *
+   * The etag of the bucket.
+   * If included in the metadata of an `UpdateBucketRequest`, the operation is
+   * only performed if the `etag` matches that of the bucket.
+   * 
+ * + * string etag = 29; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
+   * The etag of the bucket.
+   * If included in the metadata of an `UpdateBucketRequest`, the operation is
+   * only performed if the `etag` matches that of the bucket.
+   * 
+ * + * string etag = 29; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
+   * Immutable. The project which owns this bucket, in the format of
+   * `projects/{projectIdentifier}`.
+   * `{projectIdentifier}` can be the project ID or project number.
+   * Output values are always in the project number format.
+   * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The project. + */ + java.lang.String getProject(); + + /** + * + * + *
+   * Immutable. The project which owns this bucket, in the format of
+   * `projects/{projectIdentifier}`.
+   * `{projectIdentifier}` can be the project ID or project number.
+   * Output values are always in the project number format.
+   * 
+ * + * + * string project = 3 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for project. + */ + com.google.protobuf.ByteString getProjectBytes(); + + /** + * + * + *
+   * Output only. The metadata generation of this bucket.
+   * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + long getMetageneration(); + + /** + * + * + *
+   * Immutable. The location of the bucket. Object data for objects in the
+   * bucket resides in physical storage within this region.  Defaults to `US`.
+   * Attempting to update this field after the bucket is created results in an
+   * error.
+   * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
+   * Immutable. The location of the bucket. Object data for objects in the
+   * bucket resides in physical storage within this region.  Defaults to `US`.
+   * Attempting to update this field after the bucket is created results in an
+   * error.
+   * 
+ * + * string location = 5 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The locationType. + */ + java.lang.String getLocationType(); + + /** + * + * + *
+   * Output only. The location type of the bucket (region, dual-region,
+   * multi-region, etc).
+   * 
+ * + * string location_type = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for locationType. + */ + com.google.protobuf.ByteString getLocationTypeBytes(); + + /** + * + * + *
+   * Optional. The bucket's default storage class, used whenever no storageClass
+   * is specified for a newly-created object. This defines how objects in the
+   * bucket are stored and determines the SLA and the cost of storage.
+   * If this value is not specified when the bucket is created, it defaults
+   * to `STANDARD`. For more information, see [Storage
+   * classes](https://developers.google.com/storage/docs/storage-classes).
+   * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + java.lang.String getStorageClass(); + + /** + * + * + *
+   * Optional. The bucket's default storage class, used whenever no storageClass
+   * is specified for a newly-created object. This defines how objects in the
+   * bucket are stored and determines the SLA and the cost of storage.
+   * If this value is not specified when the bucket is created, it defaults
+   * to `STANDARD`. For more information, see [Storage
+   * classes](https://developers.google.com/storage/docs/storage-classes).
+   * 
+ * + * string storage_class = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + com.google.protobuf.ByteString getStorageClassBytes(); + + /** + * + * + *
+   * Optional. The recovery point objective for cross-region replication of the
+   * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+   * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+   * dual-region buckets only. If rpo is not specified when the bucket is
+   * created, it defaults to `DEFAULT`. For more information, see [Turbo
+   * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+   * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rpo. + */ + java.lang.String getRpo(); + + /** + * + * + *
+   * Optional. The recovery point objective for cross-region replication of the
+   * bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses
+   * default replication. `ASYNC_TURBO` enables turbo replication, valid for
+   * dual-region buckets only. If rpo is not specified when the bucket is
+   * created, it defaults to `DEFAULT`. For more information, see [Turbo
+   * replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication).
+   * 
+ * + * string rpo = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rpo. + */ + com.google.protobuf.ByteString getRpoBytes(); + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getAclList(); + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.BucketAccessControl getAcl(int index); + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getAclCount(); + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getAclOrBuilderList(); + + /** + * + * + *
+   * Optional. Access controls on the bucket.
+   * If `iam_config.uniform_bucket_level_access` is enabled on this bucket,
+   * requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.BucketAccessControl acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.BucketAccessControlOrBuilder getAclOrBuilder(int index); + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getDefaultObjectAclList(); + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectAccessControl getDefaultObjectAcl(int index); + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getDefaultObjectAclCount(); + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getDefaultObjectAclOrBuilderList(); + + /** + * + * + *
+   * Optional. Default access controls to apply to new objects when no ACL is
+   * provided. If `iam_config.uniform_bucket_level_access` is enabled on this
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectAccessControlOrBuilder getDefaultObjectAclOrBuilder(int index); + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycle field is set. + */ + boolean hasLifecycle(); + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycle. + */ + com.google.storage.v2.Bucket.Lifecycle getLifecycle(); + + /** + * + * + *
+   * Optional. The bucket's lifecycle configuration. See [Lifecycle
+   * Management](https://developers.google.com/storage/docs/lifecycle) for more
+   * information.
+   * 
+ * + * + * .google.storage.v2.Bucket.Lifecycle lifecycle = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.LifecycleOrBuilder getLifecycleOrBuilder(); + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getCorsList(); + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.Cors getCors(int index); + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getCorsCount(); + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getCorsOrBuilderList(); + + /** + * + * + *
+   * Optional. The bucket's [CORS](https://www.w3.org/TR/cors/)
+   * configuration.
+   * 
+ * + * + * repeated .google.storage.v2.Bucket.Cors cors = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.CorsOrBuilder getCorsOrBuilder(int index); + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the bucket.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. The default value for event-based hold on newly created objects
+   * in this bucket.  Event-based hold is a way to retain objects indefinitely
+   * until an event occurs, signified by the hold's release. After being
+   * released, such objects are subject to bucket-level retention (if any).  One
+   * sample use case of this flag is for banks to hold loan documents for at
+   * least 3 years after loan is paid in full. Here, bucket-level retention is 3
+   * years and the event is loan being paid in full. In this example, these
+   * objects are held intact for any number of years until the event has
+   * occurred (event-based hold on the object is released) and then 3 more years
+   * after that. That means retention duration of the objects begins from the
+   * moment event-based hold transitioned from true to false.  Objects under
+   * event-based hold cannot be deleted, overwritten or archived until the hold
+   * is removed.
+   * 
+ * + * bool default_event_based_hold = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultEventBasedHold. + */ + boolean getDefaultEventBasedHold(); + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getLabelsCount(); + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. User-provided labels, in key/value pairs.
+   * 
+ * + * map<string, string> labels = 15 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the website field is set. + */ + boolean hasWebsite(); + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The website. + */ + com.google.storage.v2.Bucket.Website getWebsite(); + + /** + * + * + *
+   * Optional. The bucket's website config, controlling how the service behaves
+   * when accessing bucket contents as a web site. See the [Static website
+   * examples](https://cloud.google.com/storage/docs/static-website) for more
+   * information.
+   * 
+ * + * .google.storage.v2.Bucket.Website website = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.WebsiteOrBuilder getWebsiteOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the versioning field is set. + */ + boolean hasVersioning(); + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The versioning. + */ + com.google.storage.v2.Bucket.Versioning getVersioning(); + + /** + * + * + *
+   * Optional. The bucket's versioning configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.Versioning versioning = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.VersioningOrBuilder getVersioningOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the logging field is set. + */ + boolean hasLogging(); + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The logging. + */ + com.google.storage.v2.Bucket.Logging getLogging(); + + /** + * + * + *
+   * Optional. The bucket's logging config, which defines the destination bucket
+   * and name prefix (if any) for the current bucket's logs.
+   * 
+ * + * .google.storage.v2.Bucket.Logging logging = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.LoggingOrBuilder getLoggingOrBuilder(); + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the owner field is set. + */ + boolean hasOwner(); + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The owner. + */ + com.google.storage.v2.Owner getOwner(); + + /** + * + * + *
+   * Output only. The owner of the bucket. This is always the project team's
+   * owner group.
+   * 
+ * + * .google.storage.v2.Owner owner = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder(); + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryption field is set. + */ + boolean hasEncryption(); + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryption. + */ + com.google.storage.v2.Bucket.Encryption getEncryption(); + + /** + * + * + *
+   * Optional. Encryption config for a bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Encryption encryption = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.EncryptionOrBuilder getEncryptionOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the billing field is set. + */ + boolean hasBilling(); + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The billing. + */ + com.google.storage.v2.Bucket.Billing getBilling(); + + /** + * + * + *
+   * Optional. The bucket's billing configuration.
+   * 
+ * + * .google.storage.v2.Bucket.Billing billing = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.BillingOrBuilder getBillingOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionPolicy field is set. + */ + boolean hasRetentionPolicy(); + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionPolicy. + */ + com.google.storage.v2.Bucket.RetentionPolicy getRetentionPolicy(); + + /** + * + * + *
+   * Optional. The bucket's retention policy. The retention policy enforces a
+   * minimum retention time for all objects contained in the bucket, based on
+   * their creation time. Any attempt to overwrite or delete objects younger
+   * than the retention period results in a `PERMISSION_DENIED` error.  An
+   * unlocked retention policy can be modified or removed from the bucket via a
+   * storage.buckets.update operation. A locked retention policy cannot be
+   * removed or shortened in duration for the lifetime of the bucket.
+   * Attempting to remove or decrease period of a locked retention policy
+   * results in a `PERMISSION_DENIED` error.
+   * 
+ * + * + * .google.storage.v2.Bucket.RetentionPolicy retention_policy = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.RetentionPolicyOrBuilder getRetentionPolicyOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the iamConfig field is set. + */ + boolean hasIamConfig(); + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The iamConfig. + */ + com.google.storage.v2.Bucket.IamConfig getIamConfig(); + + /** + * + * + *
+   * Optional. The bucket's IAM configuration.
+   * 
+ * + * + * .google.storage.v2.Bucket.IamConfig iam_config = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.IamConfigOrBuilder getIamConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Reserved for future use.
+   * 
+ * + * bool satisfies_pzs = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The satisfiesPzs. + */ + boolean getSatisfiesPzs(); + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customPlacementConfig field is set. + */ + boolean hasCustomPlacementConfig(); + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customPlacementConfig. + */ + com.google.storage.v2.Bucket.CustomPlacementConfig getCustomPlacementConfig(); + + /** + * + * + *
+   * Optional. Configuration that, if present, specifies the data placement for
+   * a [configurable
+   * dual-region](https://cloud.google.com/storage/docs/locations#location-dr).
+   * 
+ * + * + * .google.storage.v2.Bucket.CustomPlacementConfig custom_placement_config = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.CustomPlacementConfigOrBuilder getCustomPlacementConfigOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoclass field is set. + */ + boolean hasAutoclass(); + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoclass. + */ + com.google.storage.v2.Bucket.Autoclass getAutoclass(); + + /** + * + * + *
+   * Optional. The bucket's Autoclass configuration. If there is no
+   * configuration, the Autoclass feature is disabled and has no effect on the
+   * bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.Autoclass autoclass = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.AutoclassOrBuilder getAutoclassOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the hierarchicalNamespace field is set. + */ + boolean hasHierarchicalNamespace(); + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The hierarchicalNamespace. + */ + com.google.storage.v2.Bucket.HierarchicalNamespace getHierarchicalNamespace(); + + /** + * + * + *
+   * Optional. The bucket's hierarchical namespace configuration. If there is no
+   * configuration, the hierarchical namespace feature is disabled and has
+   * no effect on the bucket.
+   * 
+ * + * + * .google.storage.v2.Bucket.HierarchicalNamespace hierarchical_namespace = 32 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.HierarchicalNamespaceOrBuilder getHierarchicalNamespaceOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the softDeletePolicy field is set. + */ + boolean hasSoftDeletePolicy(); + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The softDeletePolicy. + */ + com.google.storage.v2.Bucket.SoftDeletePolicy getSoftDeletePolicy(); + + /** + * + * + *
+   * Optional. The bucket's soft delete policy. The soft delete policy prevents
+   * soft-deleted objects from being permanently deleted.
+   * 
+ * + * + * .google.storage.v2.Bucket.SoftDeletePolicy soft_delete_policy = 31 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.SoftDeletePolicyOrBuilder getSoftDeletePolicyOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectRetention field is set. + */ + boolean hasObjectRetention(); + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectRetention. + */ + com.google.storage.v2.Bucket.ObjectRetention getObjectRetention(); + + /** + * + * + *
+   * Optional. The bucket's object retention configuration. Must be enabled
+   * before objects in the bucket might have retention configured.
+   * 
+ * + * + * .google.storage.v2.Bucket.ObjectRetention object_retention = 33 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.ObjectRetentionOrBuilder getObjectRetentionOrBuilder(); + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ipFilter field is set. + */ + boolean hasIpFilter(); + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ipFilter. + */ + com.google.storage.v2.Bucket.IpFilter getIpFilter(); + + /** + * + * + *
+   * Optional. The bucket's IP filter configuration.
+   * 
+ * + * + * optional .google.storage.v2.Bucket.IpFilter ip_filter = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Bucket.IpFilterOrBuilder getIpFilterOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequest.java new file mode 100644 index 000000000000..ac91c580b1b0 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequest.java @@ -0,0 +1,605 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite].
+ * 
+ * + * Protobuf type {@code google.storage.v2.CancelResumableWriteRequest} + */ +@com.google.protobuf.Generated +public final class CancelResumableWriteRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.CancelResumableWriteRequest) + CancelResumableWriteRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CancelResumableWriteRequest"); + } + + // Use CancelResumableWriteRequest.newBuilder() to construct. + private CancelResumableWriteRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CancelResumableWriteRequest() { + uploadId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CancelResumableWriteRequest.class, + com.google.storage.v2.CancelResumableWriteRequest.Builder.class); + } + + public static final int UPLOAD_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object uploadId_ = ""; + + /** + * + * + *
+   * Required. The upload_id of the resumable upload to cancel. This should be
+   * copied from the `upload_id` field of `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + @java.lang.Override + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The upload_id of the resumable upload to cancel. This should be
+   * copied from the `upload_id` field of `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, uploadId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, uploadId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.CancelResumableWriteRequest)) { + return super.equals(obj); + } + com.google.storage.v2.CancelResumableWriteRequest other = + (com.google.storage.v2.CancelResumableWriteRequest) obj; + + if (!getUploadId().equals(other.getUploadId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + UPLOAD_ID_FIELD_NUMBER; + hash = (53 * hash) + getUploadId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.CancelResumableWriteRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite].
+   * 
+ * + * Protobuf type {@code google.storage.v2.CancelResumableWriteRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.CancelResumableWriteRequest) + com.google.storage.v2.CancelResumableWriteRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CancelResumableWriteRequest.class, + com.google.storage.v2.CancelResumableWriteRequest.Builder.class); + } + + // Construct using com.google.storage.v2.CancelResumableWriteRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + uploadId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteRequest getDefaultInstanceForType() { + return com.google.storage.v2.CancelResumableWriteRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteRequest build() { + com.google.storage.v2.CancelResumableWriteRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteRequest buildPartial() { + com.google.storage.v2.CancelResumableWriteRequest result = + new com.google.storage.v2.CancelResumableWriteRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.CancelResumableWriteRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.uploadId_ = uploadId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.CancelResumableWriteRequest) { + return mergeFrom((com.google.storage.v2.CancelResumableWriteRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.CancelResumableWriteRequest other) { + if (other == com.google.storage.v2.CancelResumableWriteRequest.getDefaultInstance()) + return this; + if (!other.getUploadId().isEmpty()) { + uploadId_ = other.uploadId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + uploadId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object uploadId_ = ""; + + /** + * + * + *
+     * Required. The upload_id of the resumable upload to cancel. This should be
+     * copied from the `upload_id` field of `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The upload_id of the resumable upload to cancel. This should be
+     * copied from the `upload_id` field of `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The upload_id of the resumable upload to cancel. This should be
+     * copied from the `upload_id` field of `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The upload_id of the resumable upload to cancel. This should be
+     * copied from the `upload_id` field of `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearUploadId() { + uploadId_ = getDefaultInstance().getUploadId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The upload_id of the resumable upload to cancel. This should be
+     * copied from the `upload_id` field of `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.CancelResumableWriteRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.CancelResumableWriteRequest) + private static final com.google.storage.v2.CancelResumableWriteRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.CancelResumableWriteRequest(); + } + + public static com.google.storage.v2.CancelResumableWriteRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CancelResumableWriteRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequestOrBuilder.java new file mode 100644 index 000000000000..3f5db0fd813e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteRequestOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface CancelResumableWriteRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.CancelResumableWriteRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The upload_id of the resumable upload to cancel. This should be
+   * copied from the `upload_id` field of `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + java.lang.String getUploadId(); + + /** + * + * + *
+   * Required. The upload_id of the resumable upload to cancel. This should be
+   * copied from the `upload_id` field of `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + com.google.protobuf.ByteString getUploadIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponse.java new file mode 100644 index 000000000000..f914f8e5863b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponse.java @@ -0,0 +1,398 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Empty response message for canceling an in-progress resumable upload, is
+ * extended as needed.
+ * 
+ * + * Protobuf type {@code google.storage.v2.CancelResumableWriteResponse} + */ +@com.google.protobuf.Generated +public final class CancelResumableWriteResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.CancelResumableWriteResponse) + CancelResumableWriteResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CancelResumableWriteResponse"); + } + + // Use CancelResumableWriteResponse.newBuilder() to construct. + private CancelResumableWriteResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CancelResumableWriteResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CancelResumableWriteResponse.class, + com.google.storage.v2.CancelResumableWriteResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.CancelResumableWriteResponse)) { + return super.equals(obj); + } + com.google.storage.v2.CancelResumableWriteResponse other = + (com.google.storage.v2.CancelResumableWriteResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CancelResumableWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.CancelResumableWriteResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Empty response message for canceling an in-progress resumable upload, is
+   * extended as needed.
+   * 
+ * + * Protobuf type {@code google.storage.v2.CancelResumableWriteResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.CancelResumableWriteResponse) + com.google.storage.v2.CancelResumableWriteResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CancelResumableWriteResponse.class, + com.google.storage.v2.CancelResumableWriteResponse.Builder.class); + } + + // Construct using com.google.storage.v2.CancelResumableWriteResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteResponse getDefaultInstanceForType() { + return com.google.storage.v2.CancelResumableWriteResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteResponse build() { + com.google.storage.v2.CancelResumableWriteResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteResponse buildPartial() { + com.google.storage.v2.CancelResumableWriteResponse result = + new com.google.storage.v2.CancelResumableWriteResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.CancelResumableWriteResponse) { + return mergeFrom((com.google.storage.v2.CancelResumableWriteResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.CancelResumableWriteResponse other) { + if (other == com.google.storage.v2.CancelResumableWriteResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.CancelResumableWriteResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.CancelResumableWriteResponse) + private static final com.google.storage.v2.CancelResumableWriteResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.CancelResumableWriteResponse(); + } + + public static com.google.storage.v2.CancelResumableWriteResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CancelResumableWriteResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.CancelResumableWriteResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponseOrBuilder.java new file mode 100644 index 000000000000..bc59110ea214 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CancelResumableWriteResponseOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface CancelResumableWriteResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.CancelResumableWriteResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedData.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedData.java new file mode 100644 index 000000000000..5444cfa65d03 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedData.java @@ -0,0 +1,644 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Message used to convey content being read or written, along with an optional
+ * checksum.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ChecksummedData} + */ +@com.google.protobuf.Generated +public final class ChecksummedData extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ChecksummedData) + ChecksummedDataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChecksummedData"); + } + + // Use ChecksummedData.newBuilder() to construct. + private ChecksummedData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChecksummedData() { + content_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ChecksummedData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ChecksummedData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ChecksummedData.class, + com.google.storage.v2.ChecksummedData.Builder.class); + } + + private int bitField0_; + public static final int CONTENT_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString content_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. The data.
+   * 
+ * + * bytes content = 1 [ctype = CORD, (.google.api.field_behavior) = OPTIONAL]; + * + * @return The content. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContent() { + return content_; + } + + public static final int CRC32C_FIELD_NUMBER = 2; + private int crc32C_ = 0; + + /** + * + * + *
+   * If set, the CRC32C digest of the content field.
+   * 
+ * + * optional fixed32 crc32c = 2; + * + * @return Whether the crc32c field is set. + */ + @java.lang.Override + public boolean hasCrc32C() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If set, the CRC32C digest of the content field.
+   * 
+ * + * optional fixed32 crc32c = 2; + * + * @return The crc32c. + */ + @java.lang.Override + public int getCrc32C() { + return crc32C_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!content_.isEmpty()) { + output.writeBytes(1, content_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeFixed32(2, crc32C_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!content_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, content_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeFixed32Size(2, crc32C_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ChecksummedData)) { + return super.equals(obj); + } + com.google.storage.v2.ChecksummedData other = (com.google.storage.v2.ChecksummedData) obj; + + if (!getContent().equals(other.getContent())) return false; + if (hasCrc32C() != other.hasCrc32C()) return false; + if (hasCrc32C()) { + if (getCrc32C() != other.getCrc32C()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONTENT_FIELD_NUMBER; + hash = (53 * hash) + getContent().hashCode(); + if (hasCrc32C()) { + hash = (37 * hash) + CRC32C_FIELD_NUMBER; + hash = (53 * hash) + getCrc32C(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ChecksummedData parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ChecksummedData parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ChecksummedData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ChecksummedData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ChecksummedData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ChecksummedData parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ChecksummedData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ChecksummedData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message used to convey content being read or written, along with an optional
+   * checksum.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ChecksummedData} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ChecksummedData) + com.google.storage.v2.ChecksummedDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ChecksummedData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ChecksummedData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ChecksummedData.class, + com.google.storage.v2.ChecksummedData.Builder.class); + } + + // Construct using com.google.storage.v2.ChecksummedData.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + content_ = com.google.protobuf.ByteString.EMPTY; + crc32C_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ChecksummedData_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ChecksummedData getDefaultInstanceForType() { + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ChecksummedData build() { + com.google.storage.v2.ChecksummedData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ChecksummedData buildPartial() { + com.google.storage.v2.ChecksummedData result = + new com.google.storage.v2.ChecksummedData(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ChecksummedData result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.content_ = content_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.crc32C_ = crc32C_; + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ChecksummedData) { + return mergeFrom((com.google.storage.v2.ChecksummedData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ChecksummedData other) { + if (other == com.google.storage.v2.ChecksummedData.getDefaultInstance()) return this; + if (!other.getContent().isEmpty()) { + setContent(other.getContent()); + } + if (other.hasCrc32C()) { + setCrc32C(other.getCrc32C()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + content_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 21: + { + crc32C_ = input.readFixed32(); + bitField0_ |= 0x00000002; + break; + } // case 21 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString content_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. The data.
+     * 
+ * + * bytes content = 1 [ctype = CORD, (.google.api.field_behavior) = OPTIONAL]; + * + * @return The content. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContent() { + return content_; + } + + /** + * + * + *
+     * Optional. The data.
+     * 
+ * + * bytes content = 1 [ctype = CORD, (.google.api.field_behavior) = OPTIONAL]; + * + * @param value The content to set. + * @return This builder for chaining. + */ + public Builder setContent(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + content_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The data.
+     * 
+ * + * bytes content = 1 [ctype = CORD, (.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContent() { + bitField0_ = (bitField0_ & ~0x00000001); + content_ = getDefaultInstance().getContent(); + onChanged(); + return this; + } + + private int crc32C_; + + /** + * + * + *
+     * If set, the CRC32C digest of the content field.
+     * 
+ * + * optional fixed32 crc32c = 2; + * + * @return Whether the crc32c field is set. + */ + @java.lang.Override + public boolean hasCrc32C() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If set, the CRC32C digest of the content field.
+     * 
+ * + * optional fixed32 crc32c = 2; + * + * @return The crc32c. + */ + @java.lang.Override + public int getCrc32C() { + return crc32C_; + } + + /** + * + * + *
+     * If set, the CRC32C digest of the content field.
+     * 
+ * + * optional fixed32 crc32c = 2; + * + * @param value The crc32c to set. + * @return This builder for chaining. + */ + public Builder setCrc32C(int value) { + + crc32C_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, the CRC32C digest of the content field.
+     * 
+ * + * optional fixed32 crc32c = 2; + * + * @return This builder for chaining. + */ + public Builder clearCrc32C() { + bitField0_ = (bitField0_ & ~0x00000002); + crc32C_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ChecksummedData) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ChecksummedData) + private static final com.google.storage.v2.ChecksummedData DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ChecksummedData(); + } + + public static com.google.storage.v2.ChecksummedData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChecksummedData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ChecksummedData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedDataOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedDataOrBuilder.java new file mode 100644 index 000000000000..5e58ba7acf66 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ChecksummedDataOrBuilder.java @@ -0,0 +1,67 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ChecksummedDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ChecksummedData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The data.
+   * 
+ * + * bytes content = 1 [ctype = CORD, (.google.api.field_behavior) = OPTIONAL]; + * + * @return The content. + */ + com.google.protobuf.ByteString getContent(); + + /** + * + * + *
+   * If set, the CRC32C digest of the content field.
+   * 
+ * + * optional fixed32 crc32c = 2; + * + * @return Whether the crc32c field is set. + */ + boolean hasCrc32C(); + + /** + * + * + *
+   * If set, the CRC32C digest of the content field.
+   * 
+ * + * optional fixed32 crc32c = 2; + * + * @return The crc32c. + */ + int getCrc32C(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParams.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParams.java new file mode 100644 index 000000000000..914d90f0b7a4 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParams.java @@ -0,0 +1,814 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Parameters that can be passed to any object request.
+ * 
+ * + * Protobuf type {@code google.storage.v2.CommonObjectRequestParams} + */ +@com.google.protobuf.Generated +public final class CommonObjectRequestParams extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.CommonObjectRequestParams) + CommonObjectRequestParamsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommonObjectRequestParams"); + } + + // Use CommonObjectRequestParams.newBuilder() to construct. + private CommonObjectRequestParams(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CommonObjectRequestParams() { + encryptionAlgorithm_ = ""; + encryptionKeyBytes_ = com.google.protobuf.ByteString.EMPTY; + encryptionKeySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CommonObjectRequestParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CommonObjectRequestParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CommonObjectRequestParams.class, + com.google.storage.v2.CommonObjectRequestParams.Builder.class); + } + + public static final int ENCRYPTION_ALGORITHM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object encryptionAlgorithm_ = ""; + + /** + * + * + *
+   * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+   * Keys feature.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + @java.lang.Override + public java.lang.String getEncryptionAlgorithm() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + encryptionAlgorithm_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+   * Keys feature.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + encryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENCRYPTION_KEY_BYTES_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString encryptionKeyBytes_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. Encryption key used with the Customer-Supplied Encryption Keys
+   * feature. In raw bytes format (not base64-encoded).
+   * 
+ * + * bytes encryption_key_bytes = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeyBytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionKeyBytes() { + return encryptionKeyBytes_; + } + + public static final int ENCRYPTION_KEY_SHA256_BYTES_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString encryptionKeySha256Bytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. SHA256 hash of encryption key used with the Customer-supplied
+   * encryption keys feature.
+   * 
+ * + * bytes encryption_key_sha256_bytes = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionKeySha256Bytes() { + return encryptionKeySha256Bytes_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(encryptionAlgorithm_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, encryptionAlgorithm_); + } + if (!encryptionKeyBytes_.isEmpty()) { + output.writeBytes(4, encryptionKeyBytes_); + } + if (!encryptionKeySha256Bytes_.isEmpty()) { + output.writeBytes(5, encryptionKeySha256Bytes_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(encryptionAlgorithm_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, encryptionAlgorithm_); + } + if (!encryptionKeyBytes_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(4, encryptionKeyBytes_); + } + if (!encryptionKeySha256Bytes_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(5, encryptionKeySha256Bytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.CommonObjectRequestParams)) { + return super.equals(obj); + } + com.google.storage.v2.CommonObjectRequestParams other = + (com.google.storage.v2.CommonObjectRequestParams) obj; + + if (!getEncryptionAlgorithm().equals(other.getEncryptionAlgorithm())) return false; + if (!getEncryptionKeyBytes().equals(other.getEncryptionKeyBytes())) return false; + if (!getEncryptionKeySha256Bytes().equals(other.getEncryptionKeySha256Bytes())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_ALGORITHM_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionAlgorithm().hashCode(); + hash = (37 * hash) + ENCRYPTION_KEY_BYTES_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionKeyBytes().hashCode(); + hash = (37 * hash) + ENCRYPTION_KEY_SHA256_BYTES_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionKeySha256Bytes().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CommonObjectRequestParams parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.CommonObjectRequestParams prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Parameters that can be passed to any object request.
+   * 
+ * + * Protobuf type {@code google.storage.v2.CommonObjectRequestParams} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.CommonObjectRequestParams) + com.google.storage.v2.CommonObjectRequestParamsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CommonObjectRequestParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CommonObjectRequestParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CommonObjectRequestParams.class, + com.google.storage.v2.CommonObjectRequestParams.Builder.class); + } + + // Construct using com.google.storage.v2.CommonObjectRequestParams.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionAlgorithm_ = ""; + encryptionKeyBytes_ = com.google.protobuf.ByteString.EMPTY; + encryptionKeySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CommonObjectRequestParams_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getDefaultInstanceForType() { + return com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams build() { + com.google.storage.v2.CommonObjectRequestParams result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams buildPartial() { + com.google.storage.v2.CommonObjectRequestParams result = + new com.google.storage.v2.CommonObjectRequestParams(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.CommonObjectRequestParams result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionAlgorithm_ = encryptionAlgorithm_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.encryptionKeyBytes_ = encryptionKeyBytes_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.encryptionKeySha256Bytes_ = encryptionKeySha256Bytes_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.CommonObjectRequestParams) { + return mergeFrom((com.google.storage.v2.CommonObjectRequestParams) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.CommonObjectRequestParams other) { + if (other == com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) + return this; + if (!other.getEncryptionAlgorithm().isEmpty()) { + encryptionAlgorithm_ = other.encryptionAlgorithm_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getEncryptionKeyBytes().isEmpty()) { + setEncryptionKeyBytes(other.getEncryptionKeyBytes()); + } + if (!other.getEncryptionKeySha256Bytes().isEmpty()) { + setEncryptionKeySha256Bytes(other.getEncryptionKeySha256Bytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + encryptionAlgorithm_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 34: + { + encryptionKeyBytes_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 34 + case 42: + { + encryptionKeySha256Bytes_ = input.readBytes(); + bitField0_ |= 0x00000004; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object encryptionAlgorithm_ = ""; + + /** + * + * + *
+     * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+     * Keys feature.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + public java.lang.String getEncryptionAlgorithm() { + java.lang.Object ref = encryptionAlgorithm_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + encryptionAlgorithm_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+     * Keys feature.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + encryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+     * Keys feature.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The encryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setEncryptionAlgorithm(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + encryptionAlgorithm_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+     * Keys feature.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEncryptionAlgorithm() { + encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+     * Keys feature.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for encryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setEncryptionAlgorithmBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + encryptionAlgorithm_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString encryptionKeyBytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. Encryption key used with the Customer-Supplied Encryption Keys
+     * feature. In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes encryption_key_bytes = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeyBytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionKeyBytes() { + return encryptionKeyBytes_; + } + + /** + * + * + *
+     * Optional. Encryption key used with the Customer-Supplied Encryption Keys
+     * feature. In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes encryption_key_bytes = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The encryptionKeyBytes to set. + * @return This builder for chaining. + */ + public Builder setEncryptionKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + encryptionKeyBytes_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Encryption key used with the Customer-Supplied Encryption Keys
+     * feature. In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes encryption_key_bytes = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEncryptionKeyBytes() { + bitField0_ = (bitField0_ & ~0x00000002); + encryptionKeyBytes_ = getDefaultInstance().getEncryptionKeyBytes(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString encryptionKeySha256Bytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. SHA256 hash of encryption key used with the Customer-supplied
+     * encryption keys feature.
+     * 
+ * + * bytes encryption_key_sha256_bytes = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionKeySha256Bytes() { + return encryptionKeySha256Bytes_; + } + + /** + * + * + *
+     * Optional. SHA256 hash of encryption key used with the Customer-supplied
+     * encryption keys feature.
+     * 
+ * + * bytes encryption_key_sha256_bytes = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The encryptionKeySha256Bytes to set. + * @return This builder for chaining. + */ + public Builder setEncryptionKeySha256Bytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + encryptionKeySha256Bytes_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SHA256 hash of encryption key used with the Customer-supplied
+     * encryption keys feature.
+     * 
+ * + * bytes encryption_key_sha256_bytes = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEncryptionKeySha256Bytes() { + bitField0_ = (bitField0_ & ~0x00000004); + encryptionKeySha256Bytes_ = getDefaultInstance().getEncryptionKeySha256Bytes(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.CommonObjectRequestParams) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.CommonObjectRequestParams) + private static final com.google.storage.v2.CommonObjectRequestParams DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.CommonObjectRequestParams(); + } + + public static com.google.storage.v2.CommonObjectRequestParams getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommonObjectRequestParams parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParamsOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParamsOrBuilder.java new file mode 100644 index 000000000000..bf8b2d6668df --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CommonObjectRequestParamsOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface CommonObjectRequestParamsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.CommonObjectRequestParams) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+   * Keys feature.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + java.lang.String getEncryptionAlgorithm(); + + /** + * + * + *
+   * Optional. Encryption algorithm used with the Customer-Supplied Encryption
+   * Keys feature.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + com.google.protobuf.ByteString getEncryptionAlgorithmBytes(); + + /** + * + * + *
+   * Optional. Encryption key used with the Customer-Supplied Encryption Keys
+   * feature. In raw bytes format (not base64-encoded).
+   * 
+ * + * bytes encryption_key_bytes = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeyBytes. + */ + com.google.protobuf.ByteString getEncryptionKeyBytes(); + + /** + * + * + *
+   * Optional. SHA256 hash of encryption key used with the Customer-supplied
+   * encryption keys feature.
+   * 
+ * + * bytes encryption_key_sha256_bytes = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionKeySha256Bytes. + */ + com.google.protobuf.ByteString getEncryptionKeySha256Bytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequest.java new file mode 100644 index 000000000000..e7c97137aeeb --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequest.java @@ -0,0 +1,4527 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [ComposeObject][google.storage.v2.Storage.ComposeObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ComposeObjectRequest} + */ +@com.google.protobuf.Generated +public final class ComposeObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ComposeObjectRequest) + ComposeObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ComposeObjectRequest"); + } + + // Use ComposeObjectRequest.newBuilder() to construct. + private ComposeObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ComposeObjectRequest() { + sourceObjects_ = java.util.Collections.emptyList(); + destinationPredefinedAcl_ = ""; + kmsKey_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.class, + com.google.storage.v2.ComposeObjectRequest.Builder.class); + } + + public interface SourceObjectOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ComposeObjectRequest.SourceObject) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The source object's name. All source objects must reside in the
+     * same bucket.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+     * Required. The source object's name. All source objects must reside in the
+     * same bucket.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+     * Optional. The generation of this object to use as the source.
+     * 
+ * + * int64 generation = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectPreconditions field is set. + */ + boolean hasObjectPreconditions(); + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectPreconditions. + */ + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getObjectPreconditions(); + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder + getObjectPreconditionsOrBuilder(); + } + + /** + * + * + *
+   * Description of a source object for a composition request.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ComposeObjectRequest.SourceObject} + */ + public static final class SourceObject extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ComposeObjectRequest.SourceObject) + SourceObjectOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SourceObject"); + } + + // Use SourceObject.newBuilder() to construct. + private SourceObject(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SourceObject() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.SourceObject.class, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder.class); + } + + public interface ObjectPreconditionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Only perform the composition if the generation of the source object
+       * that would be used matches this value.  If this value and a generation
+       * are both specified, they must be the same value or the call fails.
+       * 
+ * + * optional int64 if_generation_match = 1; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+       * Only perform the composition if the generation of the source object
+       * that would be used matches this value.  If this value and a generation
+       * are both specified, they must be the same value or the call fails.
+       * 
+ * + * optional int64 if_generation_match = 1; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + } + + /** + * + * + *
+     * Preconditions for a source object of a composition request.
+     * 
+ * + * Protobuf type {@code google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions} + */ + public static final class ObjectPreconditions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + ObjectPreconditionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectPreconditions"); + } + + // Use ObjectPreconditions.newBuilder() to construct. + private ObjectPreconditions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectPreconditions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.class, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.Builder + .class); + } + + private int bitField0_; + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 1; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+       * Only perform the composition if the generation of the source object
+       * that would be used matches this value.  If this value and a generation
+       * are both specified, they must be the same value or the call fails.
+       * 
+ * + * optional int64 if_generation_match = 1; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Only perform the composition if the generation of the source object
+       * that would be used matches this value.  If this value and a generation
+       * are both specified, they must be the same value or the call fails.
+       * 
+ * + * optional int64 if_generation_match = 1; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(1, ifGenerationMatch_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, ifGenerationMatch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions)) { + return super.equals(obj); + } + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions other = + (com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) obj; + + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+       * Preconditions for a source object of a composition request.
+       * 
+ * + * Protobuf type {@code + * google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.class, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .Builder.class); + } + + // Construct using + // com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + ifGenerationMatch_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getDefaultInstanceForType() { + return com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions build() { + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + buildPartial() { + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions result = + new com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) { + return mergeFrom( + (com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions other) { + if (other + == com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance()) return this; + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long ifGenerationMatch_; + + /** + * + * + *
+         * Only perform the composition if the generation of the source object
+         * that would be used matches this value.  If this value and a generation
+         * are both specified, they must be the same value or the call fails.
+         * 
+ * + * optional int64 if_generation_match = 1; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+         * Only perform the composition if the generation of the source object
+         * that would be used matches this value.  If this value and a generation
+         * are both specified, they must be the same value or the call fails.
+         * 
+ * + * optional int64 if_generation_match = 1; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+         * Only perform the composition if the generation of the source object
+         * that would be used matches this value.  If this value and a generation
+         * are both specified, they must be the same value or the call fails.
+         * 
+ * + * optional int64 if_generation_match = 1; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+         * Only perform the composition if the generation of the source object
+         * that would be used matches this value.  If this value and a generation
+         * are both specified, they must be the same value or the call fails.
+         * 
+ * + * optional int64 if_generation_match = 1; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000001); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions) + private static final com.google.storage.v2.ComposeObjectRequest.SourceObject + .ObjectPreconditions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions(); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectPreconditions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The source object's name. All source objects must reside in the
+     * same bucket.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+     * Required. The source object's name. All source objects must reside in the
+     * same bucket.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 2; + private long generation_ = 0L; + + /** + * + * + *
+     * Optional. The generation of this object to use as the source.
+     * 
+ * + * int64 generation = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int OBJECT_PRECONDITIONS_FIELD_NUMBER = 3; + private com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + objectPreconditions_; + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectPreconditions field is set. + */ + @java.lang.Override + public boolean hasObjectPreconditions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectPreconditions. + */ + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getObjectPreconditions() { + return objectPreconditions_ == null + ? com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance() + : objectPreconditions_; + } + + /** + * + * + *
+     * Optional. Conditions that must be met for this operation to execute.
+     * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder + getObjectPreconditionsOrBuilder() { + return objectPreconditions_ == null + ? com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance() + : objectPreconditions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (generation_ != 0L) { + output.writeInt64(2, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getObjectPreconditions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(3, getObjectPreconditions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ComposeObjectRequest.SourceObject)) { + return super.equals(obj); + } + com.google.storage.v2.ComposeObjectRequest.SourceObject other = + (com.google.storage.v2.ComposeObjectRequest.SourceObject) obj; + + if (!getName().equals(other.getName())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasObjectPreconditions() != other.hasObjectPreconditions()) return false; + if (hasObjectPreconditions()) { + if (!getObjectPreconditions().equals(other.getObjectPreconditions())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasObjectPreconditions()) { + hash = (37 * hash) + OBJECT_PRECONDITIONS_FIELD_NUMBER; + hash = (53 * hash) + getObjectPreconditions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.ComposeObjectRequest.SourceObject prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Description of a source object for a composition request.
+     * 
+ * + * Protobuf type {@code google.storage.v2.ComposeObjectRequest.SourceObject} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ComposeObjectRequest.SourceObject) + com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.SourceObject.class, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder.class); + } + + // Construct using com.google.storage.v2.ComposeObjectRequest.SourceObject.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetObjectPreconditionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + generation_ = 0L; + objectPreconditions_ = null; + if (objectPreconditionsBuilder_ != null) { + objectPreconditionsBuilder_.dispose(); + objectPreconditionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject getDefaultInstanceForType() { + return com.google.storage.v2.ComposeObjectRequest.SourceObject.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject build() { + com.google.storage.v2.ComposeObjectRequest.SourceObject result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject buildPartial() { + com.google.storage.v2.ComposeObjectRequest.SourceObject result = + new com.google.storage.v2.ComposeObjectRequest.SourceObject(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ComposeObjectRequest.SourceObject result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.objectPreconditions_ = + objectPreconditionsBuilder_ == null + ? objectPreconditions_ + : objectPreconditionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ComposeObjectRequest.SourceObject) { + return mergeFrom((com.google.storage.v2.ComposeObjectRequest.SourceObject) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ComposeObjectRequest.SourceObject other) { + if (other == com.google.storage.v2.ComposeObjectRequest.SourceObject.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasObjectPreconditions()) { + mergeObjectPreconditions(other.getObjectPreconditions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage( + internalGetObjectPreconditionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+       * Required. The source object's name. All source objects must reside in the
+       * same bucket.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Required. The source object's name. All source objects must reside in the
+       * same bucket.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Required. The source object's name. All source objects must reside in the
+       * same bucket.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The source object's name. All source objects must reside in the
+       * same bucket.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The source object's name. All source objects must reside in the
+       * same bucket.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+       * Optional. The generation of this object to use as the source.
+       * 
+ * + * int64 generation = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+       * Optional. The generation of this object to use as the source.
+       * 
+ * + * int64 generation = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The generation of this object to use as the source.
+       * 
+ * + * int64 generation = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000002); + generation_ = 0L; + onChanged(); + return this; + } + + private com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + objectPreconditions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder> + objectPreconditionsBuilder_; + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectPreconditions field is set. + */ + public boolean hasObjectPreconditions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectPreconditions. + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + getObjectPreconditions() { + if (objectPreconditionsBuilder_ == null) { + return objectPreconditions_ == null + ? com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance() + : objectPreconditions_; + } else { + return objectPreconditionsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectPreconditions( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions value) { + if (objectPreconditionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectPreconditions_ = value; + } else { + objectPreconditionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectPreconditions( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.Builder + builderForValue) { + if (objectPreconditionsBuilder_ == null) { + objectPreconditions_ = builderForValue.build(); + } else { + objectPreconditionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectPreconditions( + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions value) { + if (objectPreconditionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && objectPreconditions_ != null + && objectPreconditions_ + != com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance()) { + getObjectPreconditionsBuilder().mergeFrom(value); + } else { + objectPreconditions_ = value; + } + } else { + objectPreconditionsBuilder_.mergeFrom(value); + } + if (objectPreconditions_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectPreconditions() { + bitField0_ = (bitField0_ & ~0x00000004); + objectPreconditions_ = null; + if (objectPreconditionsBuilder_ != null) { + objectPreconditionsBuilder_.dispose(); + objectPreconditionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.Builder + getObjectPreconditionsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetObjectPreconditionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder + getObjectPreconditionsOrBuilder() { + if (objectPreconditionsBuilder_ != null) { + return objectPreconditionsBuilder_.getMessageOrBuilder(); + } else { + return objectPreconditions_ == null + ? com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .getDefaultInstance() + : objectPreconditions_; + } + } + + /** + * + * + *
+       * Optional. Conditions that must be met for this operation to execute.
+       * 
+ * + * + * .google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions object_preconditions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions.Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditionsOrBuilder> + internalGetObjectPreconditionsFieldBuilder() { + if (objectPreconditionsBuilder_ == null) { + objectPreconditionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions, + com.google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + .Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObject + .ObjectPreconditionsOrBuilder>( + getObjectPreconditions(), getParentForChildren(), isClean()); + objectPreconditions_ = null; + } + return objectPreconditionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ComposeObjectRequest.SourceObject) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ComposeObjectRequest.SourceObject) + private static final com.google.storage.v2.ComposeObjectRequest.SourceObject DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ComposeObjectRequest.SourceObject(); + } + + public static com.google.storage.v2.ComposeObjectRequest.SourceObject getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SourceObject parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int DESTINATION_FIELD_NUMBER = 1; + private com.google.storage.v2.Object destination_; + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the destination field is set. + */ + @java.lang.Override + public boolean hasDestination() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The destination. + */ + @java.lang.Override + public com.google.storage.v2.Object getDestination() { + return destination_ == null ? com.google.storage.v2.Object.getDefaultInstance() : destination_; + } + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder() { + return destination_ == null ? com.google.storage.v2.Object.getDefaultInstance() : destination_; + } + + public static final int SOURCE_OBJECTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List sourceObjects_; + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getSourceObjectsList() { + return sourceObjects_; + } + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getSourceObjectsOrBuilderList() { + return sourceObjects_; + } + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getSourceObjectsCount() { + return sourceObjects_.size(); + } + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObject getSourceObjects(int index) { + return sourceObjects_.get(index); + } + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder getSourceObjectsOrBuilder( + int index) { + return sourceObjects_.get(index); + } + + public static final int DESTINATION_PREDEFINED_ACL_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationPredefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The destinationPredefinedAcl. + */ + @java.lang.Override + public java.lang.String getDestinationPredefinedAcl() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPredefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for destinationPredefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationPredefinedAclBytes() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPredefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 4; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 5; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int KMS_KEY_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKey_ = ""; + + /** + * + * + *
+   * Optional. Resource name of the Cloud KMS key, of the form
+   * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+   * that is used to encrypt the object. Overrides the object
+   * metadata's `kms_key_name` value, if any.
+   * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + @java.lang.Override + public java.lang.String getKmsKey() { + java.lang.Object ref = kmsKey_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKey_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Resource name of the Cloud KMS key, of the form
+   * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+   * that is used to encrypt the object. Overrides the object
+   * metadata's `kms_key_name` value, if any.
+   * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyBytes() { + java.lang.Object ref = kmsKey_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 7; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 10; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + public static final int DELETE_SOURCE_OBJECTS_FIELD_NUMBER = 11; + private boolean deleteSourceObjects_ = false; + + /** + * + * + *
+   * Whether the source objects should be deleted in the compose request.
+   * 
+ * + * optional bool delete_source_objects = 11; + * + * @return Whether the deleteSourceObjects field is set. + */ + @java.lang.Override + public boolean hasDeleteSourceObjects() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Whether the source objects should be deleted in the compose request.
+   * 
+ * + * optional bool delete_source_objects = 11; + * + * @return The deleteSourceObjects. + */ + @java.lang.Override + public boolean getDeleteSourceObjects() { + return deleteSourceObjects_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getDestination()); + } + for (int i = 0; i < sourceObjects_.size(); i++) { + output.writeMessage(2, sourceObjects_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(5, ifMetagenerationMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKey_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, kmsKey_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(7, getCommonObjectRequestParams()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPredefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, destinationPredefinedAcl_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(10, getObjectChecksums()); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeBool(11, deleteSourceObjects_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getDestination()); + } + for (int i = 0; i < sourceObjects_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, sourceObjects_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifMetagenerationMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKey_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, kmsKey_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, getCommonObjectRequestParams()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPredefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, destinationPredefinedAcl_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getObjectChecksums()); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, deleteSourceObjects_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ComposeObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.ComposeObjectRequest other = + (com.google.storage.v2.ComposeObjectRequest) obj; + + if (hasDestination() != other.hasDestination()) return false; + if (hasDestination()) { + if (!getDestination().equals(other.getDestination())) return false; + } + if (!getSourceObjectsList().equals(other.getSourceObjectsList())) return false; + if (!getDestinationPredefinedAcl().equals(other.getDestinationPredefinedAcl())) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (!getKmsKey().equals(other.getKmsKey())) return false; + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (hasDeleteSourceObjects() != other.hasDeleteSourceObjects()) return false; + if (hasDeleteSourceObjects()) { + if (getDeleteSourceObjects() != other.getDeleteSourceObjects()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasDestination()) { + hash = (37 * hash) + DESTINATION_FIELD_NUMBER; + hash = (53 * hash) + getDestination().hashCode(); + } + if (getSourceObjectsCount() > 0) { + hash = (37 * hash) + SOURCE_OBJECTS_FIELD_NUMBER; + hash = (53 * hash) + getSourceObjectsList().hashCode(); + } + hash = (37 * hash) + DESTINATION_PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getDestinationPredefinedAcl().hashCode(); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + hash = (37 * hash) + KMS_KEY_FIELD_NUMBER; + hash = (53 * hash) + getKmsKey().hashCode(); + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + if (hasDeleteSourceObjects()) { + hash = (37 * hash) + DELETE_SOURCE_OBJECTS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDeleteSourceObjects()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ComposeObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ComposeObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [ComposeObject][google.storage.v2.Storage.ComposeObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ComposeObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ComposeObjectRequest) + com.google.storage.v2.ComposeObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ComposeObjectRequest.class, + com.google.storage.v2.ComposeObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.ComposeObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetDestinationFieldBuilder(); + internalGetSourceObjectsFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetObjectChecksumsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + destination_ = null; + if (destinationBuilder_ != null) { + destinationBuilder_.dispose(); + destinationBuilder_ = null; + } + if (sourceObjectsBuilder_ == null) { + sourceObjects_ = java.util.Collections.emptyList(); + } else { + sourceObjects_ = null; + sourceObjectsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + destinationPredefinedAcl_ = ""; + ifGenerationMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + kmsKey_ = ""; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + deleteSourceObjects_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ComposeObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.ComposeObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest build() { + com.google.storage.v2.ComposeObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest buildPartial() { + com.google.storage.v2.ComposeObjectRequest result = + new com.google.storage.v2.ComposeObjectRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.ComposeObjectRequest result) { + if (sourceObjectsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + sourceObjects_ = java.util.Collections.unmodifiableList(sourceObjects_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.sourceObjects_ = sourceObjects_; + } else { + result.sourceObjects_ = sourceObjectsBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.ComposeObjectRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.destination_ = + destinationBuilder_ == null ? destination_ : destinationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.destinationPredefinedAcl_ = destinationPredefinedAcl_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.kmsKey_ = kmsKey_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.deleteSourceObjects_ = deleteSourceObjects_; + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ComposeObjectRequest) { + return mergeFrom((com.google.storage.v2.ComposeObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ComposeObjectRequest other) { + if (other == com.google.storage.v2.ComposeObjectRequest.getDefaultInstance()) return this; + if (other.hasDestination()) { + mergeDestination(other.getDestination()); + } + if (sourceObjectsBuilder_ == null) { + if (!other.sourceObjects_.isEmpty()) { + if (sourceObjects_.isEmpty()) { + sourceObjects_ = other.sourceObjects_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSourceObjectsIsMutable(); + sourceObjects_.addAll(other.sourceObjects_); + } + onChanged(); + } + } else { + if (!other.sourceObjects_.isEmpty()) { + if (sourceObjectsBuilder_.isEmpty()) { + sourceObjectsBuilder_.dispose(); + sourceObjectsBuilder_ = null; + sourceObjects_ = other.sourceObjects_; + bitField0_ = (bitField0_ & ~0x00000002); + sourceObjectsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSourceObjectsFieldBuilder() + : null; + } else { + sourceObjectsBuilder_.addAllMessages(other.sourceObjects_); + } + } + } + if (!other.getDestinationPredefinedAcl().isEmpty()) { + destinationPredefinedAcl_ = other.destinationPredefinedAcl_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (!other.getKmsKey().isEmpty()) { + kmsKey_ = other.kmsKey_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + if (other.hasDeleteSourceObjects()) { + setDeleteSourceObjects(other.getDeleteSourceObjects()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetDestinationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.storage.v2.ComposeObjectRequest.SourceObject m = + input.readMessage( + com.google.storage.v2.ComposeObjectRequest.SourceObject.parser(), + extensionRegistry); + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + sourceObjects_.add(m); + } else { + sourceObjectsBuilder_.addMessage(m); + } + break; + } // case 18 + case 32: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + kmsKey_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 74: + { + destinationPredefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 74 + case 82: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 82 + case 88: + { + deleteSourceObjects_ = input.readBool(); + bitField0_ |= 0x00000100; + break; + } // case 88 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Object destination_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + destinationBuilder_; + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the destination field is set. + */ + public boolean hasDestination() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The destination. + */ + public com.google.storage.v2.Object getDestination() { + if (destinationBuilder_ == null) { + return destination_ == null + ? com.google.storage.v2.Object.getDefaultInstance() + : destination_; + } else { + return destinationBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setDestination(com.google.storage.v2.Object value) { + if (destinationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + destination_ = value; + } else { + destinationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setDestination(com.google.storage.v2.Object.Builder builderForValue) { + if (destinationBuilder_ == null) { + destination_ = builderForValue.build(); + } else { + destinationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeDestination(com.google.storage.v2.Object value) { + if (destinationBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && destination_ != null + && destination_ != com.google.storage.v2.Object.getDefaultInstance()) { + getDestinationBuilder().mergeFrom(value); + } else { + destination_ = value; + } + } else { + destinationBuilder_.mergeFrom(value); + } + if (destination_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearDestination() { + bitField0_ = (bitField0_ & ~0x00000001); + destination_ = null; + if (destinationBuilder_ != null) { + destinationBuilder_.dispose(); + destinationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.Object.Builder getDestinationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetDestinationFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder() { + if (destinationBuilder_ != null) { + return destinationBuilder_.getMessageOrBuilder(); + } else { + return destination_ == null + ? com.google.storage.v2.Object.getDefaultInstance() + : destination_; + } + } + + /** + * + * + *
+     * Required. Properties of the resulting object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetDestinationFieldBuilder() { + if (destinationBuilder_ == null) { + destinationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getDestination(), getParentForChildren(), isClean()); + destination_ = null; + } + return destinationBuilder_; + } + + private java.util.List sourceObjects_ = + java.util.Collections.emptyList(); + + private void ensureSourceObjectsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + sourceObjects_ = + new java.util.ArrayList( + sourceObjects_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder> + sourceObjectsBuilder_; + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getSourceObjectsList() { + if (sourceObjectsBuilder_ == null) { + return java.util.Collections.unmodifiableList(sourceObjects_); + } else { + return sourceObjectsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getSourceObjectsCount() { + if (sourceObjectsBuilder_ == null) { + return sourceObjects_.size(); + } else { + return sourceObjectsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject getSourceObjects(int index) { + if (sourceObjectsBuilder_ == null) { + return sourceObjects_.get(index); + } else { + return sourceObjectsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSourceObjects( + int index, com.google.storage.v2.ComposeObjectRequest.SourceObject value) { + if (sourceObjectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSourceObjectsIsMutable(); + sourceObjects_.set(index, value); + onChanged(); + } else { + sourceObjectsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSourceObjects( + int index, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder builderForValue) { + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + sourceObjects_.set(index, builderForValue.build()); + onChanged(); + } else { + sourceObjectsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSourceObjects(com.google.storage.v2.ComposeObjectRequest.SourceObject value) { + if (sourceObjectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSourceObjectsIsMutable(); + sourceObjects_.add(value); + onChanged(); + } else { + sourceObjectsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSourceObjects( + int index, com.google.storage.v2.ComposeObjectRequest.SourceObject value) { + if (sourceObjectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSourceObjectsIsMutable(); + sourceObjects_.add(index, value); + onChanged(); + } else { + sourceObjectsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSourceObjects( + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder builderForValue) { + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + sourceObjects_.add(builderForValue.build()); + onChanged(); + } else { + sourceObjectsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addSourceObjects( + int index, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder builderForValue) { + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + sourceObjects_.add(index, builderForValue.build()); + onChanged(); + } else { + sourceObjectsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllSourceObjects( + java.lang.Iterable + values) { + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, sourceObjects_); + onChanged(); + } else { + sourceObjectsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSourceObjects() { + if (sourceObjectsBuilder_ == null) { + sourceObjects_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + sourceObjectsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeSourceObjects(int index) { + if (sourceObjectsBuilder_ == null) { + ensureSourceObjectsIsMutable(); + sourceObjects_.remove(index); + onChanged(); + } else { + sourceObjectsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder getSourceObjectsBuilder( + int index) { + return internalGetSourceObjectsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder + getSourceObjectsOrBuilder(int index) { + if (sourceObjectsBuilder_ == null) { + return sourceObjects_.get(index); + } else { + return sourceObjectsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder> + getSourceObjectsOrBuilderList() { + if (sourceObjectsBuilder_ != null) { + return sourceObjectsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(sourceObjects_); + } + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder + addSourceObjectsBuilder() { + return internalGetSourceObjectsFieldBuilder() + .addBuilder(com.google.storage.v2.ComposeObjectRequest.SourceObject.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder addSourceObjectsBuilder( + int index) { + return internalGetSourceObjectsFieldBuilder() + .addBuilder( + index, com.google.storage.v2.ComposeObjectRequest.SourceObject.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. The list of source objects that is concatenated into a single
+     * object.
+     * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getSourceObjectsBuilderList() { + return internalGetSourceObjectsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder> + internalGetSourceObjectsFieldBuilder() { + if (sourceObjectsBuilder_ == null) { + sourceObjectsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ComposeObjectRequest.SourceObject, + com.google.storage.v2.ComposeObjectRequest.SourceObject.Builder, + com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder>( + sourceObjects_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + sourceObjects_ = null; + } + return sourceObjectsBuilder_; + } + + private java.lang.Object destinationPredefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The destinationPredefinedAcl. + */ + public java.lang.String getDestinationPredefinedAcl() { + java.lang.Object ref = destinationPredefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPredefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for destinationPredefinedAcl. + */ + public com.google.protobuf.ByteString getDestinationPredefinedAclBytes() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPredefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The destinationPredefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setDestinationPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationPredefinedAcl_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDestinationPredefinedAcl() { + destinationPredefinedAcl_ = getDefaultInstance().getDestinationPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for destinationPredefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setDestinationPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationPredefinedAcl_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object kmsKey_ = ""; + + /** + * + * + *
+     * Optional. Resource name of the Cloud KMS key, of the form
+     * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+     * that is used to encrypt the object. Overrides the object
+     * metadata's `kms_key_name` value, if any.
+     * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + public java.lang.String getKmsKey() { + java.lang.Object ref = kmsKey_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKey_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Resource name of the Cloud KMS key, of the form
+     * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+     * that is used to encrypt the object. Overrides the object
+     * metadata's `kms_key_name` value, if any.
+     * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + public com.google.protobuf.ByteString getKmsKeyBytes() { + java.lang.Object ref = kmsKey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Resource name of the Cloud KMS key, of the form
+     * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+     * that is used to encrypt the object. Overrides the object
+     * metadata's `kms_key_name` value, if any.
+     * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKey to set. + * @return This builder for chaining. + */ + public Builder setKmsKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKey_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Resource name of the Cloud KMS key, of the form
+     * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+     * that is used to encrypt the object. Overrides the object
+     * metadata's `kms_key_name` value, if any.
+     * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKey() { + kmsKey_ = getDefaultInstance().getKmsKey(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Resource name of the Cloud KMS key, of the form
+     * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+     * that is used to encrypt the object. Overrides the object
+     * metadata's `kms_key_name` value, if any.
+     * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKey to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKey_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000040); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00000080); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is validated against
+     * the combined checksums of the component objects.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + private boolean deleteSourceObjects_; + + /** + * + * + *
+     * Whether the source objects should be deleted in the compose request.
+     * 
+ * + * optional bool delete_source_objects = 11; + * + * @return Whether the deleteSourceObjects field is set. + */ + @java.lang.Override + public boolean hasDeleteSourceObjects() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Whether the source objects should be deleted in the compose request.
+     * 
+ * + * optional bool delete_source_objects = 11; + * + * @return The deleteSourceObjects. + */ + @java.lang.Override + public boolean getDeleteSourceObjects() { + return deleteSourceObjects_; + } + + /** + * + * + *
+     * Whether the source objects should be deleted in the compose request.
+     * 
+ * + * optional bool delete_source_objects = 11; + * + * @param value The deleteSourceObjects to set. + * @return This builder for chaining. + */ + public Builder setDeleteSourceObjects(boolean value) { + + deleteSourceObjects_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Whether the source objects should be deleted in the compose request.
+     * 
+ * + * optional bool delete_source_objects = 11; + * + * @return This builder for chaining. + */ + public Builder clearDeleteSourceObjects() { + bitField0_ = (bitField0_ & ~0x00000100); + deleteSourceObjects_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ComposeObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ComposeObjectRequest) + private static final com.google.storage.v2.ComposeObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ComposeObjectRequest(); + } + + public static com.google.storage.v2.ComposeObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ComposeObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ComposeObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequestOrBuilder.java new file mode 100644 index 000000000000..c6df27b12e13 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ComposeObjectRequestOrBuilder.java @@ -0,0 +1,382 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ComposeObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ComposeObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the destination field is set. + */ + boolean hasDestination(); + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The destination. + */ + com.google.storage.v2.Object getDestination(); + + /** + * + * + *
+   * Required. Properties of the resulting object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder(); + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getSourceObjectsList(); + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ComposeObjectRequest.SourceObject getSourceObjects(int index); + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getSourceObjectsCount(); + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getSourceObjectsOrBuilderList(); + + /** + * + * + *
+   * Optional. The list of source objects that is concatenated into a single
+   * object.
+   * 
+ * + * + * repeated .google.storage.v2.ComposeObjectRequest.SourceObject source_objects = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ComposeObjectRequest.SourceObjectOrBuilder getSourceObjectsOrBuilder( + int index); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The destinationPredefinedAcl. + */ + java.lang.String getDestinationPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for destinationPredefinedAcl. + */ + com.google.protobuf.ByteString getDestinationPredefinedAclBytes(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Resource name of the Cloud KMS key, of the form
+   * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+   * that is used to encrypt the object. Overrides the object
+   * metadata's `kms_key_name` value, if any.
+   * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + java.lang.String getKmsKey(); + + /** + * + * + *
+   * Optional. Resource name of the Cloud KMS key, of the form
+   * `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+   * that is used to encrypt the object. Overrides the object
+   * metadata's `kms_key_name` value, if any.
+   * 
+ * + * + * string kms_key = 6 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + com.google.protobuf.ByteString getKmsKeyBytes(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is validated against
+   * the combined checksums of the component objects.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); + + /** + * + * + *
+   * Whether the source objects should be deleted in the compose request.
+   * 
+ * + * optional bool delete_source_objects = 11; + * + * @return Whether the deleteSourceObjects field is set. + */ + boolean hasDeleteSourceObjects(); + + /** + * + * + *
+   * Whether the source objects should be deleted in the compose request.
+   * 
+ * + * optional bool delete_source_objects = 11; + * + * @return The deleteSourceObjects. + */ + boolean getDeleteSourceObjects(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRange.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRange.java new file mode 100644 index 000000000000..17c5bdb9b31d --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRange.java @@ -0,0 +1,693 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Specifies a requested range of bytes to download.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ContentRange} + */ +@com.google.protobuf.Generated +public final class ContentRange extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ContentRange) + ContentRangeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ContentRange"); + } + + // Use ContentRange.newBuilder() to construct. + private ContentRange(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ContentRange() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ContentRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ContentRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ContentRange.class, + com.google.storage.v2.ContentRange.Builder.class); + } + + public static final int START_FIELD_NUMBER = 1; + private long start_ = 0L; + + /** + * + * + *
+   * The starting offset of the object data. This value is inclusive.
+   * 
+ * + * int64 start = 1; + * + * @return The start. + */ + @java.lang.Override + public long getStart() { + return start_; + } + + public static final int END_FIELD_NUMBER = 2; + private long end_ = 0L; + + /** + * + * + *
+   * The ending offset of the object data. This value is exclusive.
+   * 
+ * + * int64 end = 2; + * + * @return The end. + */ + @java.lang.Override + public long getEnd() { + return end_; + } + + public static final int COMPLETE_LENGTH_FIELD_NUMBER = 3; + private long completeLength_ = 0L; + + /** + * + * + *
+   * The complete length of the object data.
+   * 
+ * + * int64 complete_length = 3; + * + * @return The completeLength. + */ + @java.lang.Override + public long getCompleteLength() { + return completeLength_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (start_ != 0L) { + output.writeInt64(1, start_); + } + if (end_ != 0L) { + output.writeInt64(2, end_); + } + if (completeLength_ != 0L) { + output.writeInt64(3, completeLength_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (start_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, start_); + } + if (end_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, end_); + } + if (completeLength_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, completeLength_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ContentRange)) { + return super.equals(obj); + } + com.google.storage.v2.ContentRange other = (com.google.storage.v2.ContentRange) obj; + + if (getStart() != other.getStart()) return false; + if (getEnd() != other.getEnd()) return false; + if (getCompleteLength() != other.getCompleteLength()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + START_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getStart()); + hash = (37 * hash) + END_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEnd()); + hash = (37 * hash) + COMPLETE_LENGTH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getCompleteLength()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ContentRange parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ContentRange parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ContentRange parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ContentRange parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ContentRange parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ContentRange parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ContentRange parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ContentRange parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ContentRange parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ContentRange parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ContentRange parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ContentRange parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ContentRange prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Specifies a requested range of bytes to download.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ContentRange} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ContentRange) + com.google.storage.v2.ContentRangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ContentRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ContentRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ContentRange.class, + com.google.storage.v2.ContentRange.Builder.class); + } + + // Construct using com.google.storage.v2.ContentRange.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + start_ = 0L; + end_ = 0L; + completeLength_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ContentRange_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ContentRange getDefaultInstanceForType() { + return com.google.storage.v2.ContentRange.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ContentRange build() { + com.google.storage.v2.ContentRange result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ContentRange buildPartial() { + com.google.storage.v2.ContentRange result = new com.google.storage.v2.ContentRange(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ContentRange result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.start_ = start_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.end_ = end_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.completeLength_ = completeLength_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ContentRange) { + return mergeFrom((com.google.storage.v2.ContentRange) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ContentRange other) { + if (other == com.google.storage.v2.ContentRange.getDefaultInstance()) return this; + if (other.getStart() != 0L) { + setStart(other.getStart()); + } + if (other.getEnd() != 0L) { + setEnd(other.getEnd()); + } + if (other.getCompleteLength() != 0L) { + setCompleteLength(other.getCompleteLength()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + start_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + end_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + completeLength_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long start_; + + /** + * + * + *
+     * The starting offset of the object data. This value is inclusive.
+     * 
+ * + * int64 start = 1; + * + * @return The start. + */ + @java.lang.Override + public long getStart() { + return start_; + } + + /** + * + * + *
+     * The starting offset of the object data. This value is inclusive.
+     * 
+ * + * int64 start = 1; + * + * @param value The start to set. + * @return This builder for chaining. + */ + public Builder setStart(long value) { + + start_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The starting offset of the object data. This value is inclusive.
+     * 
+ * + * int64 start = 1; + * + * @return This builder for chaining. + */ + public Builder clearStart() { + bitField0_ = (bitField0_ & ~0x00000001); + start_ = 0L; + onChanged(); + return this; + } + + private long end_; + + /** + * + * + *
+     * The ending offset of the object data. This value is exclusive.
+     * 
+ * + * int64 end = 2; + * + * @return The end. + */ + @java.lang.Override + public long getEnd() { + return end_; + } + + /** + * + * + *
+     * The ending offset of the object data. This value is exclusive.
+     * 
+ * + * int64 end = 2; + * + * @param value The end to set. + * @return This builder for chaining. + */ + public Builder setEnd(long value) { + + end_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The ending offset of the object data. This value is exclusive.
+     * 
+ * + * int64 end = 2; + * + * @return This builder for chaining. + */ + public Builder clearEnd() { + bitField0_ = (bitField0_ & ~0x00000002); + end_ = 0L; + onChanged(); + return this; + } + + private long completeLength_; + + /** + * + * + *
+     * The complete length of the object data.
+     * 
+ * + * int64 complete_length = 3; + * + * @return The completeLength. + */ + @java.lang.Override + public long getCompleteLength() { + return completeLength_; + } + + /** + * + * + *
+     * The complete length of the object data.
+     * 
+ * + * int64 complete_length = 3; + * + * @param value The completeLength to set. + * @return This builder for chaining. + */ + public Builder setCompleteLength(long value) { + + completeLength_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The complete length of the object data.
+     * 
+ * + * int64 complete_length = 3; + * + * @return This builder for chaining. + */ + public Builder clearCompleteLength() { + bitField0_ = (bitField0_ & ~0x00000004); + completeLength_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ContentRange) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ContentRange) + private static final com.google.storage.v2.ContentRange DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ContentRange(); + } + + public static com.google.storage.v2.ContentRange getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ContentRange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ContentRange getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRangeOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRangeOrBuilder.java new file mode 100644 index 000000000000..006f8d605d74 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ContentRangeOrBuilder.java @@ -0,0 +1,67 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ContentRangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ContentRange) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The starting offset of the object data. This value is inclusive.
+   * 
+ * + * int64 start = 1; + * + * @return The start. + */ + long getStart(); + + /** + * + * + *
+   * The ending offset of the object data. This value is exclusive.
+   * 
+ * + * int64 end = 2; + * + * @return The end. + */ + long getEnd(); + + /** + * + * + *
+   * The complete length of the object data.
+   * 
+ * + * int64 complete_length = 3; + * + * @return The completeLength. + */ + long getCompleteLength(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequest.java new file mode 100644 index 000000000000..690c0e6576dd --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequest.java @@ -0,0 +1,1693 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [CreateBucket][google.storage.v2.Storage.CreateBucket].
+ * 
+ * + * Protobuf type {@code google.storage.v2.CreateBucketRequest} + */ +@com.google.protobuf.Generated +public final class CreateBucketRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.CreateBucketRequest) + CreateBucketRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateBucketRequest"); + } + + // Use CreateBucketRequest.newBuilder() to construct. + private CreateBucketRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateBucketRequest() { + parent_ = ""; + bucketId_ = ""; + predefinedAcl_ = ""; + predefinedDefaultObjectAcl_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CreateBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CreateBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CreateBucketRequest.class, + com.google.storage.v2.CreateBucketRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The project to which this bucket belongs. This field must either
+   * be empty or `projects/_`. The project ID that owns this bucket should be
+   * specified in the `bucket.project` field.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The project to which this bucket belongs. This field must either
+   * be empty or `projects/_`. The project ID that owns this bucket should be
+   * specified in the `bucket.project` field.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BUCKET_FIELD_NUMBER = 2; + private com.google.storage.v2.Bucket bucket_; + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the bucket field is set. + */ + @java.lang.Override + public boolean hasBucket() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bucket. + */ + @java.lang.Override + public com.google.storage.v2.Bucket getBucket() { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public com.google.storage.v2.BucketOrBuilder getBucketOrBuilder() { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + + public static final int BUCKET_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucketId_ = ""; + + /** + * + * + *
+   * Required. The ID to use for this bucket, which becomes the final component
+   * of the bucket's resource name. For example, the value `foo` might result in
+   * a bucket with the name `projects/123456/buckets/foo`.
+   * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucketId. + */ + @java.lang.Override + public java.lang.String getBucketId() { + java.lang.Object ref = bucketId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucketId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The ID to use for this bucket, which becomes the final component
+   * of the bucket's resource name. For example, the value `foo` might result in
+   * a bucket with the name `projects/123456/buckets/foo`.
+   * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for bucketId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketIdBytes() { + java.lang.Object ref = bucketId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucketId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREDEFINED_ACL_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREDEFINED_DEFAULT_OBJECT_ACL_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedDefaultObjectAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedDefaultObjectAcl() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedDefaultObjectAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedDefaultObjectAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENABLE_OBJECT_RETENTION_FIELD_NUMBER = 9; + private boolean enableObjectRetention_ = false; + + /** + * + * + *
+   * Optional. If true, enable object retention on the bucket.
+   * 
+ * + * bool enable_object_retention = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableObjectRetention. + */ + @java.lang.Override + public boolean getEnableObjectRetention() { + return enableObjectRetention_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getBucket()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucketId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, bucketId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, predefinedAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedDefaultObjectAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, predefinedDefaultObjectAcl_); + } + if (enableObjectRetention_ != false) { + output.writeBool(9, enableObjectRetention_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getBucket()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucketId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, bucketId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, predefinedAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedDefaultObjectAcl_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize(7, predefinedDefaultObjectAcl_); + } + if (enableObjectRetention_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, enableObjectRetention_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.CreateBucketRequest)) { + return super.equals(obj); + } + com.google.storage.v2.CreateBucketRequest other = + (com.google.storage.v2.CreateBucketRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasBucket() != other.hasBucket()) return false; + if (hasBucket()) { + if (!getBucket().equals(other.getBucket())) return false; + } + if (!getBucketId().equals(other.getBucketId())) return false; + if (!getPredefinedAcl().equals(other.getPredefinedAcl())) return false; + if (!getPredefinedDefaultObjectAcl().equals(other.getPredefinedDefaultObjectAcl())) + return false; + if (getEnableObjectRetention() != other.getEnableObjectRetention()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasBucket()) { + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + } + hash = (37 * hash) + BUCKET_ID_FIELD_NUMBER; + hash = (53 * hash) + getBucketId().hashCode(); + hash = (37 * hash) + PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedAcl().hashCode(); + hash = (37 * hash) + PREDEFINED_DEFAULT_OBJECT_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedDefaultObjectAcl().hashCode(); + hash = (37 * hash) + ENABLE_OBJECT_RETENTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableObjectRetention()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CreateBucketRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CreateBucketRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CreateBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.CreateBucketRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [CreateBucket][google.storage.v2.Storage.CreateBucket].
+   * 
+ * + * Protobuf type {@code google.storage.v2.CreateBucketRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.CreateBucketRequest) + com.google.storage.v2.CreateBucketRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CreateBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CreateBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CreateBucketRequest.class, + com.google.storage.v2.CreateBucketRequest.Builder.class); + } + + // Construct using com.google.storage.v2.CreateBucketRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBucketFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + bucket_ = null; + if (bucketBuilder_ != null) { + bucketBuilder_.dispose(); + bucketBuilder_ = null; + } + bucketId_ = ""; + predefinedAcl_ = ""; + predefinedDefaultObjectAcl_ = ""; + enableObjectRetention_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CreateBucketRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.CreateBucketRequest getDefaultInstanceForType() { + return com.google.storage.v2.CreateBucketRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.CreateBucketRequest build() { + com.google.storage.v2.CreateBucketRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.CreateBucketRequest buildPartial() { + com.google.storage.v2.CreateBucketRequest result = + new com.google.storage.v2.CreateBucketRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.CreateBucketRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.bucket_ = bucketBuilder_ == null ? bucket_ : bucketBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.bucketId_ = bucketId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.predefinedAcl_ = predefinedAcl_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.predefinedDefaultObjectAcl_ = predefinedDefaultObjectAcl_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.enableObjectRetention_ = enableObjectRetention_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.CreateBucketRequest) { + return mergeFrom((com.google.storage.v2.CreateBucketRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.CreateBucketRequest other) { + if (other == com.google.storage.v2.CreateBucketRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasBucket()) { + mergeBucket(other.getBucket()); + } + if (!other.getBucketId().isEmpty()) { + bucketId_ = other.bucketId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getPredefinedAcl().isEmpty()) { + predefinedAcl_ = other.predefinedAcl_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getPredefinedDefaultObjectAcl().isEmpty()) { + predefinedDefaultObjectAcl_ = other.predefinedDefaultObjectAcl_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.getEnableObjectRetention() != false) { + setEnableObjectRetention(other.getEnableObjectRetention()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetBucketFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + bucketId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 50: + { + predefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 50 + case 58: + { + predefinedDefaultObjectAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 58 + case 72: + { + enableObjectRetention_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The project to which this bucket belongs. This field must either
+     * be empty or `projects/_`. The project ID that owns this bucket should be
+     * specified in the `bucket.project` field.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The project to which this bucket belongs. This field must either
+     * be empty or `projects/_`. The project ID that owns this bucket should be
+     * specified in the `bucket.project` field.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The project to which this bucket belongs. This field must either
+     * be empty or `projects/_`. The project ID that owns this bucket should be
+     * specified in the `bucket.project` field.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project to which this bucket belongs. This field must either
+     * be empty or `projects/_`. The project ID that owns this bucket should be
+     * specified in the `bucket.project` field.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project to which this bucket belongs. This field must either
+     * be empty or `projects/_`. The project ID that owns this bucket should be
+     * specified in the `bucket.project` field.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.v2.Bucket bucket_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + bucketBuilder_; + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the bucket field is set. + */ + public boolean hasBucket() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bucket. + */ + public com.google.storage.v2.Bucket getBucket() { + if (bucketBuilder_ == null) { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } else { + return bucketBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setBucket(com.google.storage.v2.Bucket value) { + if (bucketBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + } else { + bucketBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setBucket(com.google.storage.v2.Bucket.Builder builderForValue) { + if (bucketBuilder_ == null) { + bucket_ = builderForValue.build(); + } else { + bucketBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder mergeBucket(com.google.storage.v2.Bucket value) { + if (bucketBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && bucket_ != null + && bucket_ != com.google.storage.v2.Bucket.getDefaultInstance()) { + getBucketBuilder().mergeFrom(value); + } else { + bucket_ = value; + } + } else { + bucketBuilder_.mergeFrom(value); + } + if (bucket_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder clearBucket() { + bitField0_ = (bitField0_ & ~0x00000002); + bucket_ = null; + if (bucketBuilder_ != null) { + bucketBuilder_.dispose(); + bucketBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.storage.v2.Bucket.Builder getBucketBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetBucketFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.storage.v2.BucketOrBuilder getBucketOrBuilder() { + if (bucketBuilder_ != null) { + return bucketBuilder_.getMessageOrBuilder(); + } else { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + } + + /** + * + * + *
+     * Optional. Properties of the new bucket being inserted.
+     * The name of the bucket is specified in the `bucket_id` field. Populating
+     * `bucket.name` field results in an error.
+     * The project of the bucket must be specified in the `bucket.project` field.
+     * This field must be in `projects/{projectIdentifier}` format,
+     * {projectIdentifier} can be the project ID or project number. The `parent`
+     * field must be either empty or `projects/_`.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + internalGetBucketFieldBuilder() { + if (bucketBuilder_ == null) { + bucketBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder>( + getBucket(), getParentForChildren(), isClean()); + bucket_ = null; + } + return bucketBuilder_; + } + + private java.lang.Object bucketId_ = ""; + + /** + * + * + *
+     * Required. The ID to use for this bucket, which becomes the final component
+     * of the bucket's resource name. For example, the value `foo` might result in
+     * a bucket with the name `projects/123456/buckets/foo`.
+     * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucketId. + */ + public java.lang.String getBucketId() { + java.lang.Object ref = bucketId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucketId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The ID to use for this bucket, which becomes the final component
+     * of the bucket's resource name. For example, the value `foo` might result in
+     * a bucket with the name `projects/123456/buckets/foo`.
+     * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for bucketId. + */ + public com.google.protobuf.ByteString getBucketIdBytes() { + java.lang.Object ref = bucketId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucketId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The ID to use for this bucket, which becomes the final component
+     * of the bucket's resource name. For example, the value `foo` might result in
+     * a bucket with the name `projects/123456/buckets/foo`.
+     * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bucketId to set. + * @return This builder for chaining. + */ + public Builder setBucketId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucketId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The ID to use for this bucket, which becomes the final component
+     * of the bucket's resource name. For example, the value `foo` might result in
+     * a bucket with the name `projects/123456/buckets/foo`.
+     * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearBucketId() { + bucketId_ = getDefaultInstance().getBucketId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The ID to use for this bucket, which becomes the final component
+     * of the bucket's resource name. For example, the value `foo` might result in
+     * a bucket with the name `projects/123456/buckets/foo`.
+     * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for bucketId to set. + * @return This builder for chaining. + */ + public Builder setBucketIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucketId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedAcl_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPredefinedAcl() { + predefinedAcl_ = getDefaultInstance().getPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedAcl_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object predefinedDefaultObjectAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + public java.lang.String getPredefinedDefaultObjectAcl() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedDefaultObjectAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + public com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedDefaultObjectAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The predefinedDefaultObjectAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedDefaultObjectAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedDefaultObjectAcl_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearPredefinedDefaultObjectAcl() { + predefinedDefaultObjectAcl_ = getDefaultInstance().getPredefinedDefaultObjectAcl(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes for predefinedDefaultObjectAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedDefaultObjectAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedDefaultObjectAcl_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private boolean enableObjectRetention_; + + /** + * + * + *
+     * Optional. If true, enable object retention on the bucket.
+     * 
+ * + * bool enable_object_retention = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableObjectRetention. + */ + @java.lang.Override + public boolean getEnableObjectRetention() { + return enableObjectRetention_; + } + + /** + * + * + *
+     * Optional. If true, enable object retention on the bucket.
+     * 
+ * + * bool enable_object_retention = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enableObjectRetention to set. + * @return This builder for chaining. + */ + public Builder setEnableObjectRetention(boolean value) { + + enableObjectRetention_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If true, enable object retention on the bucket.
+     * 
+ * + * bool enable_object_retention = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnableObjectRetention() { + bitField0_ = (bitField0_ & ~0x00000020); + enableObjectRetention_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.CreateBucketRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.CreateBucketRequest) + private static final com.google.storage.v2.CreateBucketRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.CreateBucketRequest(); + } + + public static com.google.storage.v2.CreateBucketRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBucketRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.CreateBucketRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequestOrBuilder.java new file mode 100644 index 000000000000..558055f54d32 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CreateBucketRequestOrBuilder.java @@ -0,0 +1,222 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface CreateBucketRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.CreateBucketRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The project to which this bucket belongs. This field must either
+   * be empty or `projects/_`. The project ID that owns this bucket should be
+   * specified in the `bucket.project` field.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The project to which this bucket belongs. This field must either
+   * be empty or `projects/_`. The project ID that owns this bucket should be
+   * specified in the `bucket.project` field.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the bucket field is set. + */ + boolean hasBucket(); + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bucket. + */ + com.google.storage.v2.Bucket getBucket(); + + /** + * + * + *
+   * Optional. Properties of the new bucket being inserted.
+   * The name of the bucket is specified in the `bucket_id` field. Populating
+   * `bucket.name` field results in an error.
+   * The project of the bucket must be specified in the `bucket.project` field.
+   * This field must be in `projects/{projectIdentifier}` format,
+   * {projectIdentifier} can be the project ID or project number. The `parent`
+   * field must be either empty or `projects/_`.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + com.google.storage.v2.BucketOrBuilder getBucketOrBuilder(); + + /** + * + * + *
+   * Required. The ID to use for this bucket, which becomes the final component
+   * of the bucket's resource name. For example, the value `foo` might result in
+   * a bucket with the name `projects/123456/buckets/foo`.
+   * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucketId. + */ + java.lang.String getBucketId(); + + /** + * + * + *
+   * Required. The ID to use for this bucket, which becomes the final component
+   * of the bucket's resource name. For example, the value `foo` might result in
+   * a bucket with the name `projects/123456/buckets/foo`.
+   * 
+ * + * string bucket_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for bucketId. + */ + com.google.protobuf.ByteString getBucketIdBytes(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + java.lang.String getPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + com.google.protobuf.ByteString getPredefinedAclBytes(); + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + java.lang.String getPredefinedDefaultObjectAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes(); + + /** + * + * + *
+   * Optional. If true, enable object retention on the bucket.
+   * 
+ * + * bool enable_object_retention = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableObjectRetention. + */ + boolean getEnableObjectRetention(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CryptoKeyName.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CryptoKeyName.java new file mode 100644 index 000000000000..f10da50fb9e8 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CryptoKeyName.java @@ -0,0 +1,261 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class CryptoKeyName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_KEY_RING_CRYPTO_KEY = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String keyRing; + private final String cryptoKey; + + @Deprecated + protected CryptoKeyName() { + project = null; + location = null; + keyRing = null; + cryptoKey = null; + } + + private CryptoKeyName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + keyRing = Preconditions.checkNotNull(builder.getKeyRing()); + cryptoKey = Preconditions.checkNotNull(builder.getCryptoKey()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static CryptoKeyName of( + String project, String location, String keyRing, String cryptoKey) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .build(); + } + + public static String format(String project, String location, String keyRing, String cryptoKey) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .build() + .toString(); + } + + public static CryptoKeyName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.validatedMatch( + formattedString, "CryptoKeyName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("key_ring"), + matchMap.get("crypto_key")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (CryptoKeyName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (keyRing != null) { + fieldMapBuilder.put("key_ring", keyRing); + } + if (cryptoKey != null) { + fieldMapBuilder.put("crypto_key", cryptoKey); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.instantiate( + "project", project, "location", location, "key_ring", keyRing, "crypto_key", cryptoKey); + } + + @Override + public boolean equals(java.lang.Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + CryptoKeyName that = ((CryptoKeyName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.keyRing, that.keyRing) + && Objects.equals(this.cryptoKey, that.cryptoKey); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(keyRing); + h *= 1000003; + h ^= Objects.hashCode(cryptoKey); + return h; + } + + /** + * Builder for + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + */ + public static class Builder { + private String project; + private String location; + private String keyRing; + private String cryptoKey; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setKeyRing(String keyRing) { + this.keyRing = keyRing; + return this; + } + + public Builder setCryptoKey(String cryptoKey) { + this.cryptoKey = cryptoKey; + return this; + } + + private Builder(CryptoKeyName cryptoKeyName) { + this.project = cryptoKeyName.project; + this.location = cryptoKeyName.location; + this.keyRing = cryptoKeyName.keyRing; + this.cryptoKey = cryptoKeyName.cryptoKey; + } + + public CryptoKeyName build() { + return new CryptoKeyName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryption.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryption.java new file mode 100644 index 000000000000..f058a4803997 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryption.java @@ -0,0 +1,700 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Describes the customer-supplied encryption key mechanism used to store an
+ * object's data at rest.
+ * 
+ * + * Protobuf type {@code google.storage.v2.CustomerEncryption} + */ +@com.google.protobuf.Generated +public final class CustomerEncryption extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.CustomerEncryption) + CustomerEncryptionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CustomerEncryption"); + } + + // Use CustomerEncryption.newBuilder() to construct. + private CustomerEncryption(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CustomerEncryption() { + encryptionAlgorithm_ = ""; + keySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CustomerEncryption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CustomerEncryption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CustomerEncryption.class, + com.google.storage.v2.CustomerEncryption.Builder.class); + } + + public static final int ENCRYPTION_ALGORITHM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object encryptionAlgorithm_ = ""; + + /** + * + * + *
+   * Optional. The encryption algorithm.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + @java.lang.Override + public java.lang.String getEncryptionAlgorithm() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + encryptionAlgorithm_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The encryption algorithm.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + encryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEY_SHA256_BYTES_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString keySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. SHA256 hash value of the encryption key.
+   * In raw bytes format (not base64-encoded).
+   * 
+ * + * bytes key_sha256_bytes = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The keySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKeySha256Bytes() { + return keySha256Bytes_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(encryptionAlgorithm_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, encryptionAlgorithm_); + } + if (!keySha256Bytes_.isEmpty()) { + output.writeBytes(3, keySha256Bytes_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(encryptionAlgorithm_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, encryptionAlgorithm_); + } + if (!keySha256Bytes_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(3, keySha256Bytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.CustomerEncryption)) { + return super.equals(obj); + } + com.google.storage.v2.CustomerEncryption other = (com.google.storage.v2.CustomerEncryption) obj; + + if (!getEncryptionAlgorithm().equals(other.getEncryptionAlgorithm())) return false; + if (!getKeySha256Bytes().equals(other.getKeySha256Bytes())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_ALGORITHM_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionAlgorithm().hashCode(); + hash = (37 * hash) + KEY_SHA256_BYTES_FIELD_NUMBER; + hash = (53 * hash) + getKeySha256Bytes().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.CustomerEncryption parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CustomerEncryption parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CustomerEncryption parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.CustomerEncryption parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.CustomerEncryption prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Describes the customer-supplied encryption key mechanism used to store an
+   * object's data at rest.
+   * 
+ * + * Protobuf type {@code google.storage.v2.CustomerEncryption} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.CustomerEncryption) + com.google.storage.v2.CustomerEncryptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CustomerEncryption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CustomerEncryption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.CustomerEncryption.class, + com.google.storage.v2.CustomerEncryption.Builder.class); + } + + // Construct using com.google.storage.v2.CustomerEncryption.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionAlgorithm_ = ""; + keySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_CustomerEncryption_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.CustomerEncryption getDefaultInstanceForType() { + return com.google.storage.v2.CustomerEncryption.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.CustomerEncryption build() { + com.google.storage.v2.CustomerEncryption result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.CustomerEncryption buildPartial() { + com.google.storage.v2.CustomerEncryption result = + new com.google.storage.v2.CustomerEncryption(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.CustomerEncryption result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionAlgorithm_ = encryptionAlgorithm_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.keySha256Bytes_ = keySha256Bytes_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.CustomerEncryption) { + return mergeFrom((com.google.storage.v2.CustomerEncryption) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.CustomerEncryption other) { + if (other == com.google.storage.v2.CustomerEncryption.getDefaultInstance()) return this; + if (!other.getEncryptionAlgorithm().isEmpty()) { + encryptionAlgorithm_ = other.encryptionAlgorithm_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getKeySha256Bytes().isEmpty()) { + setKeySha256Bytes(other.getKeySha256Bytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + encryptionAlgorithm_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + keySha256Bytes_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object encryptionAlgorithm_ = ""; + + /** + * + * + *
+     * Optional. The encryption algorithm.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + public java.lang.String getEncryptionAlgorithm() { + java.lang.Object ref = encryptionAlgorithm_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + encryptionAlgorithm_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The encryption algorithm.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + public com.google.protobuf.ByteString getEncryptionAlgorithmBytes() { + java.lang.Object ref = encryptionAlgorithm_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + encryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The encryption algorithm.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The encryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setEncryptionAlgorithm(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + encryptionAlgorithm_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The encryption algorithm.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEncryptionAlgorithm() { + encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The encryption algorithm.
+     * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for encryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setEncryptionAlgorithmBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + encryptionAlgorithm_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString keySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. SHA256 hash value of the encryption key.
+     * In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes key_sha256_bytes = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The keySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKeySha256Bytes() { + return keySha256Bytes_; + } + + /** + * + * + *
+     * Optional. SHA256 hash value of the encryption key.
+     * In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes key_sha256_bytes = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The keySha256Bytes to set. + * @return This builder for chaining. + */ + public Builder setKeySha256Bytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + keySha256Bytes_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SHA256 hash value of the encryption key.
+     * In raw bytes format (not base64-encoded).
+     * 
+ * + * bytes key_sha256_bytes = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearKeySha256Bytes() { + bitField0_ = (bitField0_ & ~0x00000002); + keySha256Bytes_ = getDefaultInstance().getKeySha256Bytes(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.CustomerEncryption) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.CustomerEncryption) + private static final com.google.storage.v2.CustomerEncryption DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.CustomerEncryption(); + } + + public static com.google.storage.v2.CustomerEncryption getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CustomerEncryption parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.CustomerEncryption getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryptionOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryptionOrBuilder.java new file mode 100644 index 000000000000..8296fdb9bafc --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/CustomerEncryptionOrBuilder.java @@ -0,0 +1,68 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface CustomerEncryptionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.CustomerEncryption) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The encryption algorithm.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The encryptionAlgorithm. + */ + java.lang.String getEncryptionAlgorithm(); + + /** + * + * + *
+   * Optional. The encryption algorithm.
+   * 
+ * + * string encryption_algorithm = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for encryptionAlgorithm. + */ + com.google.protobuf.ByteString getEncryptionAlgorithmBytes(); + + /** + * + * + *
+   * Optional. SHA256 hash value of the encryption key.
+   * In raw bytes format (not base64-encoded).
+   * 
+ * + * bytes key_sha256_bytes = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The keySha256Bytes. + */ + com.google.protobuf.ByteString getKeySha256Bytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequest.java new file mode 100644 index 000000000000..d93decbc6181 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequest.java @@ -0,0 +1,888 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [DeleteBucket][google.storage.v2.Storage.DeleteBucket].
+ * 
+ * + * Protobuf type {@code google.storage.v2.DeleteBucketRequest} + */ +@com.google.protobuf.Generated +public final class DeleteBucketRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.DeleteBucketRequest) + DeleteBucketRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteBucketRequest"); + } + + // Use DeleteBucketRequest.newBuilder() to construct. + private DeleteBucketRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteBucketRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.DeleteBucketRequest.class, + com.google.storage.v2.DeleteBucketRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of a bucket to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of a bucket to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 2; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration matches this value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration matches this value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration does not match this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration does not match this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(3, ifMetagenerationNotMatch_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationNotMatch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.DeleteBucketRequest)) { + return super.equals(obj); + } + com.google.storage.v2.DeleteBucketRequest other = + (com.google.storage.v2.DeleteBucketRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.DeleteBucketRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteBucketRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.DeleteBucketRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [DeleteBucket][google.storage.v2.Storage.DeleteBucket].
+   * 
+ * + * Protobuf type {@code google.storage.v2.DeleteBucketRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.DeleteBucketRequest) + com.google.storage.v2.DeleteBucketRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.DeleteBucketRequest.class, + com.google.storage.v2.DeleteBucketRequest.Builder.class); + } + + // Construct using com.google.storage.v2.DeleteBucketRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteBucketRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.DeleteBucketRequest getDefaultInstanceForType() { + return com.google.storage.v2.DeleteBucketRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.DeleteBucketRequest build() { + com.google.storage.v2.DeleteBucketRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.DeleteBucketRequest buildPartial() { + com.google.storage.v2.DeleteBucketRequest result = + new com.google.storage.v2.DeleteBucketRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.DeleteBucketRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.DeleteBucketRequest) { + return mergeFrom((com.google.storage.v2.DeleteBucketRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.DeleteBucketRequest other) { + if (other == com.google.storage.v2.DeleteBucketRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of a bucket to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration matches this value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration matches this value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration matches this value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration matches this value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration does not match this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration does not match this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration does not match this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, only deletes the bucket if its metageneration does not match this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.DeleteBucketRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.DeleteBucketRequest) + private static final com.google.storage.v2.DeleteBucketRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.DeleteBucketRequest(); + } + + public static com.google.storage.v2.DeleteBucketRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteBucketRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.DeleteBucketRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequestOrBuilder.java new file mode 100644 index 000000000000..8167fc296093 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteBucketRequestOrBuilder.java @@ -0,0 +1,112 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface DeleteBucketRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.DeleteBucketRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of a bucket to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of a bucket to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration matches this value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration matches this value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration does not match this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * If set, only deletes the bucket if its metageneration does not match this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequest.java new file mode 100644 index 000000000000..e919608ae803 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequest.java @@ -0,0 +1,1837 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for deleting an object.
+ * 
+ * + * Protobuf type {@code google.storage.v2.DeleteObjectRequest} + */ +@com.google.protobuf.Generated +public final class DeleteObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.DeleteObjectRequest) + DeleteObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteObjectRequest"); + } + + // Use DeleteObjectRequest.newBuilder() to construct. + private DeleteObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteObjectRequest() { + bucket_ = ""; + object_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.DeleteObjectRequest.class, + com.google.storage.v2.DeleteObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. The name of the finalized object to delete.
+   * Note: If you want to delete an unfinalized resumable upload please use
+   * `CancelResumableWrite`.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the finalized object to delete.
+   * Note: If you want to delete an unfinalized resumable upload please use
+   * `CancelResumableWrite`.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 4; + private long generation_ = 0L; + + /** + * + * + *
+   * Optional. If present, permanently deletes a specific revision of this
+   * object (as opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 5; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 5; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 5; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 6; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 7; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 8; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 10; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(4, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(5, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(6, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(7, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(8, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(10, getCommonObjectRequestParams()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, getCommonObjectRequestParams()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.DeleteObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.DeleteObjectRequest other = + (com.google.storage.v2.DeleteObjectRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.DeleteObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.DeleteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.DeleteObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for deleting an object.
+   * 
+ * + * Protobuf type {@code google.storage.v2.DeleteObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.DeleteObjectRequest) + com.google.storage.v2.DeleteObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.DeleteObjectRequest.class, + com.google.storage.v2.DeleteObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.DeleteObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_DeleteObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.DeleteObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.DeleteObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.DeleteObjectRequest build() { + com.google.storage.v2.DeleteObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.DeleteObjectRequest buildPartial() { + com.google.storage.v2.DeleteObjectRequest result = + new com.google.storage.v2.DeleteObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.DeleteObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.DeleteObjectRequest) { + return mergeFrom((com.google.storage.v2.DeleteObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.DeleteObjectRequest other) { + if (other == com.google.storage.v2.DeleteObjectRequest.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 32: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 40: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 48: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 48 + case 56: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 56 + case 64: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 64 + case 82: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. The name of the finalized object to delete.
+     * Note: If you want to delete an unfinalized resumable upload please use
+     * `CancelResumableWrite`.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the finalized object to delete.
+     * Note: If you want to delete an unfinalized resumable upload please use
+     * `CancelResumableWrite`.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the finalized object to delete.
+     * Note: If you want to delete an unfinalized resumable upload please use
+     * `CancelResumableWrite`.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the finalized object to delete.
+     * Note: If you want to delete an unfinalized resumable upload please use
+     * `CancelResumableWrite`.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the finalized object to delete.
+     * Note: If you want to delete an unfinalized resumable upload please use
+     * `CancelResumableWrite`.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Optional. If present, permanently deletes a specific revision of this
+     * object (as opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Optional. If present, permanently deletes a specific revision of this
+     * object (as opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If present, permanently deletes a specific revision of this
+     * object (as opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 5; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 5; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 5; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000080); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.DeleteObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.DeleteObjectRequest) + private static final com.google.storage.v2.DeleteObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.DeleteObjectRequest(); + } + + public static com.google.storage.v2.DeleteObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.DeleteObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequestOrBuilder.java new file mode 100644 index 000000000000..3b97e2108265 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/DeleteObjectRequestOrBuilder.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface DeleteObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.DeleteObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. The name of the finalized object to delete.
+   * Note: If you want to delete an unfinalized resumable upload please use
+   * `CancelResumableWrite`.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. The name of the finalized object to delete.
+   * Note: If you want to delete an unfinalized resumable upload please use
+   * `CancelResumableWrite`.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Optional. If present, permanently deletes a specific revision of this
+   * object (as opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 5; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 5; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 6; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 7; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 8; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequest.java new file mode 100644 index 000000000000..d419a9bf55c8 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequest.java @@ -0,0 +1,1198 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [GetBucket][google.storage.v2.Storage.GetBucket].
+ * 
+ * + * Protobuf type {@code google.storage.v2.GetBucketRequest} + */ +@com.google.protobuf.Generated +public final class GetBucketRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.GetBucketRequest) + GetBucketRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetBucketRequest"); + } + + // Use GetBucketRequest.newBuilder() to construct. + private GetBucketRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetBucketRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.GetBucketRequest.class, + com.google.storage.v2.GetBucketRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 2; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * If set, only gets the bucket metadata if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If set, only gets the bucket metadata if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * If set, and if the bucket's current metageneration matches the specified
+   * value, the request returns an error.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * If set, and if the bucket's current metageneration matches the specified
+   * value, the request returns an error.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int READ_MASK_FIELD_NUMBER = 5; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + @java.lang.Override + public boolean hasReadMask() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(3, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getReadMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getReadMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.GetBucketRequest)) { + return super.equals(obj); + } + com.google.storage.v2.GetBucketRequest other = (com.google.storage.v2.GetBucketRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.GetBucketRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.GetBucketRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetBucketRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.GetBucketRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [GetBucket][google.storage.v2.Storage.GetBucket].
+   * 
+ * + * Protobuf type {@code google.storage.v2.GetBucketRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.GetBucketRequest) + com.google.storage.v2.GetBucketRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.GetBucketRequest.class, + com.google.storage.v2.GetBucketRequest.Builder.class); + } + + // Construct using com.google.storage.v2.GetBucketRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetBucketRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.GetBucketRequest getDefaultInstanceForType() { + return com.google.storage.v2.GetBucketRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.GetBucketRequest build() { + com.google.storage.v2.GetBucketRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.GetBucketRequest buildPartial() { + com.google.storage.v2.GetBucketRequest result = + new com.google.storage.v2.GetBucketRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.GetBucketRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.GetBucketRequest) { + return mergeFrom((com.google.storage.v2.GetBucketRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.GetBucketRequest other) { + if (other == com.google.storage.v2.GetBucketRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 42: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * If set, only gets the bucket metadata if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If set, only gets the bucket metadata if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * If set, only gets the bucket metadata if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, only gets the bucket metadata if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * If set, and if the bucket's current metageneration matches the specified
+     * value, the request returns an error.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If set, and if the bucket's current metageneration matches the specified
+     * value, the request returns an error.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * If set, and if the bucket's current metageneration matches the specified
+     * value, the request returns an error.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, and if the bucket's current metageneration matches the specified
+     * value, the request returns an error.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + public boolean hasReadMask() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000008); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * A `*` field might be used to indicate all fields.
+     * If no mask is specified, it defaults to all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.GetBucketRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.GetBucketRequest) + private static final com.google.storage.v2.GetBucketRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.GetBucketRequest(); + } + + public static com.google.storage.v2.GetBucketRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetBucketRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.GetBucketRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequestOrBuilder.java new file mode 100644 index 000000000000..c719f0050dd9 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetBucketRequestOrBuilder.java @@ -0,0 +1,157 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface GetBucketRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.GetBucketRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * If set, only gets the bucket metadata if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, only gets the bucket metadata if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, and if the bucket's current metageneration matches the specified
+   * value, the request returns an error.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * If set, and if the bucket's current metageneration matches the specified
+   * value, the request returns an error.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * A `*` field might be used to indicate all fields.
+   * If no mask is specified, it defaults to all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequest.java new file mode 100644 index 000000000000..32957e21a70c --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequest.java @@ -0,0 +1,2483 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [GetObject][google.storage.v2.Storage.GetObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.GetObjectRequest} + */ +@com.google.protobuf.Generated +public final class GetObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.GetObjectRequest) + GetObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetObjectRequest"); + } + + // Use GetObjectRequest.newBuilder() to construct. + private GetObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetObjectRequest() { + bucket_ = ""; + object_ = ""; + restoreToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.GetObjectRequest.class, + com.google.storage.v2.GetObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. Name of the object.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the object.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int SOFT_DELETED_FIELD_NUMBER = 11; + private boolean softDeleted_ = false; + + /** + * + * + *
+   * If true, return the soft-deleted version of this object.
+   * 
+ * + * optional bool soft_deleted = 11; + * + * @return Whether the softDeleted field is set. + */ + @java.lang.Override + public boolean hasSoftDeleted() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If true, return the soft-deleted version of this object.
+   * 
+ * + * optional bool soft_deleted = 11; + * + * @return The softDeleted. + */ + @java.lang.Override + public boolean getSoftDeleted() { + return softDeleted_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 4; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 6; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 7; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 8; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int READ_MASK_FIELD_NUMBER = 10; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return Whether the readMask field is set. + */ + @java.lang.Override + public boolean hasReadMask() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return The readMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + public static final int RESTORE_TOKEN_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private volatile java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+   * and is only required in the rare case when there are multiple soft-deleted
+   * objects with the same `name` and `generation`.
+   * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + @java.lang.Override + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+   * and is only required in the rare case when there are multiple soft-deleted
+   * objects with the same `name` and `generation`.
+   * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(10, getReadMask()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeBool(11, softDeleted_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(restoreToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 12, restoreToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getReadMask()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, softDeleted_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(restoreToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(12, restoreToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.GetObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.GetObjectRequest other = (com.google.storage.v2.GetObjectRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasSoftDeleted() != other.hasSoftDeleted()) return false; + if (hasSoftDeleted()) { + if (getSoftDeleted() != other.getSoftDeleted()) return false; + } + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (!getRestoreToken().equals(other.getRestoreToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasSoftDeleted()) { + hash = (37 * hash) + SOFT_DELETED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSoftDeleted()); + } + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + hash = (37 * hash) + RESTORE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRestoreToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.GetObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.GetObjectRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.GetObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.GetObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [GetObject][google.storage.v2.Storage.GetObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.GetObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.GetObjectRequest) + com.google.storage.v2.GetObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.GetObjectRequest.class, + com.google.storage.v2.GetObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.GetObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetReadMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + softDeleted_ = false; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + restoreToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_GetObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.GetObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.GetObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.GetObjectRequest build() { + com.google.storage.v2.GetObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.GetObjectRequest buildPartial() { + com.google.storage.v2.GetObjectRequest result = + new com.google.storage.v2.GetObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.GetObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.softDeleted_ = softDeleted_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.restoreToken_ = restoreToken_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.GetObjectRequest) { + return mergeFrom((com.google.storage.v2.GetObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.GetObjectRequest other) { + if (other == com.google.storage.v2.GetObjectRequest.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasSoftDeleted()) { + setSoftDeleted(other.getSoftDeleted()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + if (!other.getRestoreToken().isEmpty()) { + restoreToken_ = other.restoreToken_; + bitField0_ |= 0x00000400; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 32 + case 40: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 40 + case 48: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 48 + case 56: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 66 + case 82: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 82 + case 88: + { + softDeleted_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 88 + case 98: + { + restoreToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000400; + break; + } // case 98 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. Name of the object.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the object.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the object.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the object.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the object.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private boolean softDeleted_; + + /** + * + * + *
+     * If true, return the soft-deleted version of this object.
+     * 
+ * + * optional bool soft_deleted = 11; + * + * @return Whether the softDeleted field is set. + */ + @java.lang.Override + public boolean hasSoftDeleted() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * If true, return the soft-deleted version of this object.
+     * 
+ * + * optional bool soft_deleted = 11; + * + * @return The softDeleted. + */ + @java.lang.Override + public boolean getSoftDeleted() { + return softDeleted_; + } + + /** + * + * + *
+     * If true, return the soft-deleted version of this object.
+     * 
+ * + * optional bool soft_deleted = 11; + * + * @param value The softDeleted to set. + * @return This builder for chaining. + */ + public Builder setSoftDeleted(boolean value) { + + softDeleted_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * If true, return the soft-deleted version of this object.
+     * 
+ * + * optional bool soft_deleted = 11; + * + * @return This builder for chaining. + */ + public Builder clearSoftDeleted() { + bitField0_ = (bitField0_ & ~0x00000008); + softDeleted_ = false; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000080); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000100); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return Whether the readMask field is set. + */ + public boolean hasReadMask() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return The readMask. + */ + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000200); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * acl` and `metadata.owner`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + private java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+     * and is only required in the rare case when there are multiple soft-deleted
+     * objects with the same `name` and `generation`.
+     * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+     * and is only required in the rare case when there are multiple soft-deleted
+     * objects with the same `name` and `generation`.
+     * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+     * and is only required in the rare case when there are multiple soft-deleted
+     * objects with the same `name` and `generation`.
+     * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restoreToken_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+     * and is only required in the rare case when there are multiple soft-deleted
+     * objects with the same `name` and `generation`.
+     * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRestoreToken() { + restoreToken_ = getDefaultInstance().getRestoreToken(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+     * and is only required in the rare case when there are multiple soft-deleted
+     * objects with the same `name` and `generation`.
+     * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restoreToken_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.GetObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.GetObjectRequest) + private static final com.google.storage.v2.GetObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.GetObjectRequest(); + } + + public static com.google.storage.v2.GetObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.GetObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequestOrBuilder.java new file mode 100644 index 000000000000..91140466b317 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/GetObjectRequestOrBuilder.java @@ -0,0 +1,368 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface GetObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.GetObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. Name of the object.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. Name of the object.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * If true, return the soft-deleted version of this object.
+   * 
+ * + * optional bool soft_deleted = 11; + * + * @return Whether the softDeleted field is set. + */ + boolean hasSoftDeleted(); + + /** + * + * + *
+   * If true, return the soft-deleted version of this object.
+   * 
+ * + * optional bool soft_deleted = 11; + * + * @return The softDeleted. + */ + boolean getSoftDeleted(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return Whether the readMask field is set. + */ + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + * + * @return The readMask. + */ + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * acl` and `metadata.owner`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 10; + */ + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+   * and is only required in the rare case when there are multiple soft-deleted
+   * objects with the same `name` and `generation`.
+   * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + java.lang.String getRestoreToken(); + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets and if `soft_deleted` is set to `true`. This parameter is optional,
+   * and is only required in the rare case when there are multiple soft-deleted
+   * objects with the same `name` and `generation`.
+   * 
+ * + * string restore_token = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + com.google.protobuf.ByteString getRestoreTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequest.java new file mode 100644 index 000000000000..9654f1b7e9a5 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequest.java @@ -0,0 +1,1523 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [ListBuckets][google.storage.v2.Storage.ListBuckets].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ListBucketsRequest} + */ +@com.google.protobuf.Generated +public final class ListBucketsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ListBucketsRequest) + ListBucketsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBucketsRequest"); + } + + // Use ListBucketsRequest.newBuilder() to construct. + private ListBucketsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBucketsRequest() { + parent_ = ""; + pageToken_ = ""; + prefix_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListBucketsRequest.class, + com.google.storage.v2.ListBucketsRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The project whose buckets we are listing.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The project whose buckets we are listing.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Maximum number of buckets to return in a single response. The
+   * service uses this parameter or `1,000` items, whichever is smaller. If
+   * `acl` is present in the `read_mask`, the service uses this parameter of
+   * `200` items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREFIX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object prefix_ = ""; + + /** + * + * + *
+   * Optional. Filter results to buckets whose names begin with this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + @java.lang.Override + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to buckets whose names begin with this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int READ_MASK_FIELD_NUMBER = 5; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + @java.lang.Override + public boolean hasReadMask() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + public static final int RETURN_PARTIAL_SUCCESS_FIELD_NUMBER = 9; + private boolean returnPartialSuccess_ = false; + + /** + * + * + *
+   * Optional. Allows listing of buckets, even if there are buckets that are
+   * unreachable.
+   * 
+ * + * bool return_partial_success = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The returnPartialSuccess. + */ + @java.lang.Override + public boolean getReturnPartialSuccess() { + return returnPartialSuccess_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, prefix_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getReadMask()); + } + if (returnPartialSuccess_ != false) { + output.writeBool(9, returnPartialSuccess_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, prefix_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getReadMask()); + } + if (returnPartialSuccess_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, returnPartialSuccess_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ListBucketsRequest)) { + return super.equals(obj); + } + com.google.storage.v2.ListBucketsRequest other = (com.google.storage.v2.ListBucketsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getPrefix().equals(other.getPrefix())) return false; + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (getReturnPartialSuccess() != other.getReturnPartialSuccess()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getPrefix().hashCode(); + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + hash = (37 * hash) + RETURN_PARTIAL_SUCCESS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReturnPartialSuccess()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ListBucketsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [ListBuckets][google.storage.v2.Storage.ListBuckets].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ListBucketsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ListBucketsRequest) + com.google.storage.v2.ListBucketsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListBucketsRequest.class, + com.google.storage.v2.ListBucketsRequest.Builder.class); + } + + // Construct using com.google.storage.v2.ListBucketsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + prefix_ = ""; + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + returnPartialSuccess_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsRequest getDefaultInstanceForType() { + return com.google.storage.v2.ListBucketsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsRequest build() { + com.google.storage.v2.ListBucketsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsRequest buildPartial() { + com.google.storage.v2.ListBucketsRequest result = + new com.google.storage.v2.ListBucketsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ListBucketsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.prefix_ = prefix_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.returnPartialSuccess_ = returnPartialSuccess_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ListBucketsRequest) { + return mergeFrom((com.google.storage.v2.ListBucketsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ListBucketsRequest other) { + if (other == com.google.storage.v2.ListBucketsRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getPrefix().isEmpty()) { + prefix_ = other.prefix_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + if (other.getReturnPartialSuccess() != false) { + setReturnPartialSuccess(other.getReturnPartialSuccess()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + prefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 72: + { + returnPartialSuccess_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The project whose buckets we are listing.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The project whose buckets we are listing.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The project whose buckets we are listing.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project whose buckets we are listing.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The project whose buckets we are listing.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Maximum number of buckets to return in a single response. The
+     * service uses this parameter or `1,000` items, whichever is smaller. If
+     * `acl` is present in the `read_mask`, the service uses this parameter of
+     * `200` items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Maximum number of buckets to return in a single response. The
+     * service uses this parameter or `1,000` items, whichever is smaller. If
+     * `acl` is present in the `read_mask`, the service uses this parameter of
+     * `200` items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Maximum number of buckets to return in a single response. The
+     * service uses this parameter or `1,000` items, whichever is smaller. If
+     * `acl` is present in the `read_mask`, the service uses this parameter of
+     * `200` items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object prefix_ = ""; + + /** + * + * + *
+     * Optional. Filter results to buckets whose names begin with this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to buckets whose names begin with this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to buckets whose names begin with this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to buckets whose names begin with this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrefix() { + prefix_ = getDefaultInstance().getPrefix(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to buckets whose names begin with this prefix.
+     * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + prefix_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + public boolean hasReadMask() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000010); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, it defaults to all fields except `items.
+     * owner`, `items.acl`, and `items.default_object_acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + private boolean returnPartialSuccess_; + + /** + * + * + *
+     * Optional. Allows listing of buckets, even if there are buckets that are
+     * unreachable.
+     * 
+ * + * bool return_partial_success = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The returnPartialSuccess. + */ + @java.lang.Override + public boolean getReturnPartialSuccess() { + return returnPartialSuccess_; + } + + /** + * + * + *
+     * Optional. Allows listing of buckets, even if there are buckets that are
+     * unreachable.
+     * 
+ * + * bool return_partial_success = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The returnPartialSuccess to set. + * @return This builder for chaining. + */ + public Builder setReturnPartialSuccess(boolean value) { + + returnPartialSuccess_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Allows listing of buckets, even if there are buckets that are
+     * unreachable.
+     * 
+ * + * bool return_partial_success = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearReturnPartialSuccess() { + bitField0_ = (bitField0_ & ~0x00000020); + returnPartialSuccess_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ListBucketsRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ListBucketsRequest) + private static final com.google.storage.v2.ListBucketsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ListBucketsRequest(); + } + + public static com.google.storage.v2.ListBucketsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBucketsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequestOrBuilder.java new file mode 100644 index 000000000000..68af45a440c8 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsRequestOrBuilder.java @@ -0,0 +1,188 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ListBucketsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ListBucketsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The project whose buckets we are listing.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The project whose buckets we are listing.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Maximum number of buckets to return in a single response. The
+   * service uses this parameter or `1,000` items, whichever is smaller. If
+   * `acl` is present in the `read_mask`, the service uses this parameter of
+   * `200` items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. Filter results to buckets whose names begin with this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + java.lang.String getPrefix(); + + /** + * + * + *
+   * Optional. Filter results to buckets whose names begin with this prefix.
+   * 
+ * + * string prefix = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + com.google.protobuf.ByteString getPrefixBytes(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return Whether the readMask field is set. + */ + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + * + * @return The readMask. + */ + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, it defaults to all fields except `items.
+   * owner`, `items.acl`, and `items.default_object_acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 5; + */ + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); + + /** + * + * + *
+   * Optional. Allows listing of buckets, even if there are buckets that are
+   * unreachable.
+   * 
+ * + * bool return_partial_success = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The returnPartialSuccess. + */ + boolean getReturnPartialSuccess(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponse.java new file mode 100644 index 000000000000..d8bca71dc321 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponse.java @@ -0,0 +1,1518 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response message for [ListBuckets][google.storage.v2.Storage.ListBuckets].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ListBucketsResponse} + */ +@com.google.protobuf.Generated +public final class ListBucketsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ListBucketsResponse) + ListBucketsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBucketsResponse"); + } + + // Use ListBucketsResponse.newBuilder() to construct. + private ListBucketsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBucketsResponse() { + buckets_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListBucketsResponse.class, + com.google.storage.v2.ListBucketsResponse.Builder.class); + } + + public static final int BUCKETS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List buckets_; + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + @java.lang.Override + public java.util.List getBucketsList() { + return buckets_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + @java.lang.Override + public java.util.List getBucketsOrBuilderList() { + return buckets_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + @java.lang.Override + public int getBucketsCount() { + return buckets_.size(); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + @java.lang.Override + public com.google.storage.v2.Bucket getBuckets(int index) { + return buckets_.get(index); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + @java.lang.Override + public com.google.storage.v2.BucketOrBuilder getBucketsOrBuilder(int index) { + return buckets_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + return unreachable_; + } + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < buckets_.size(); i++) { + output.writeMessage(1, buckets_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachable_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, unreachable_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < buckets_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, buckets_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachable_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachable_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ListBucketsResponse)) { + return super.equals(obj); + } + com.google.storage.v2.ListBucketsResponse other = + (com.google.storage.v2.ListBucketsResponse) obj; + + if (!getBucketsList().equals(other.getBucketsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableList().equals(other.getUnreachableList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBucketsCount() > 0) { + hash = (37 * hash) + BUCKETS_FIELD_NUMBER; + hash = (53 * hash) + getBucketsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableCount() > 0) { + hash = (37 * hash) + UNREACHABLE_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListBucketsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ListBucketsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for [ListBuckets][google.storage.v2.Storage.ListBuckets].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ListBucketsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ListBucketsResponse) + com.google.storage.v2.ListBucketsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListBucketsResponse.class, + com.google.storage.v2.ListBucketsResponse.Builder.class); + } + + // Construct using com.google.storage.v2.ListBucketsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (bucketsBuilder_ == null) { + buckets_ = java.util.Collections.emptyList(); + } else { + buckets_ = null; + bucketsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListBucketsResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsResponse getDefaultInstanceForType() { + return com.google.storage.v2.ListBucketsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsResponse build() { + com.google.storage.v2.ListBucketsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsResponse buildPartial() { + com.google.storage.v2.ListBucketsResponse result = + new com.google.storage.v2.ListBucketsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.ListBucketsResponse result) { + if (bucketsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + buckets_ = java.util.Collections.unmodifiableList(buckets_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.buckets_ = buckets_; + } else { + result.buckets_ = bucketsBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.ListBucketsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachable_.makeImmutable(); + result.unreachable_ = unreachable_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ListBucketsResponse) { + return mergeFrom((com.google.storage.v2.ListBucketsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ListBucketsResponse other) { + if (other == com.google.storage.v2.ListBucketsResponse.getDefaultInstance()) return this; + if (bucketsBuilder_ == null) { + if (!other.buckets_.isEmpty()) { + if (buckets_.isEmpty()) { + buckets_ = other.buckets_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBucketsIsMutable(); + buckets_.addAll(other.buckets_); + } + onChanged(); + } + } else { + if (!other.buckets_.isEmpty()) { + if (bucketsBuilder_.isEmpty()) { + bucketsBuilder_.dispose(); + bucketsBuilder_ = null; + buckets_ = other.buckets_; + bitField0_ = (bitField0_ & ~0x00000001); + bucketsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetBucketsFieldBuilder() + : null; + } else { + bucketsBuilder_.addAllMessages(other.buckets_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachable_.isEmpty()) { + if (unreachable_.isEmpty()) { + unreachable_ = other.unreachable_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableIsMutable(); + unreachable_.addAll(other.unreachable_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.v2.Bucket m = + input.readMessage(com.google.storage.v2.Bucket.parser(), extensionRegistry); + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + buckets_.add(m); + } else { + bucketsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableIsMutable(); + unreachable_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List buckets_ = + java.util.Collections.emptyList(); + + private void ensureBucketsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + buckets_ = new java.util.ArrayList(buckets_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + bucketsBuilder_; + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public java.util.List getBucketsList() { + if (bucketsBuilder_ == null) { + return java.util.Collections.unmodifiableList(buckets_); + } else { + return bucketsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public int getBucketsCount() { + if (bucketsBuilder_ == null) { + return buckets_.size(); + } else { + return bucketsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public com.google.storage.v2.Bucket getBuckets(int index) { + if (bucketsBuilder_ == null) { + return buckets_.get(index); + } else { + return bucketsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder setBuckets(int index, com.google.storage.v2.Bucket value) { + if (bucketsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketsIsMutable(); + buckets_.set(index, value); + onChanged(); + } else { + bucketsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder setBuckets(int index, com.google.storage.v2.Bucket.Builder builderForValue) { + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + buckets_.set(index, builderForValue.build()); + onChanged(); + } else { + bucketsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder addBuckets(com.google.storage.v2.Bucket value) { + if (bucketsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketsIsMutable(); + buckets_.add(value); + onChanged(); + } else { + bucketsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder addBuckets(int index, com.google.storage.v2.Bucket value) { + if (bucketsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketsIsMutable(); + buckets_.add(index, value); + onChanged(); + } else { + bucketsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder addBuckets(com.google.storage.v2.Bucket.Builder builderForValue) { + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + buckets_.add(builderForValue.build()); + onChanged(); + } else { + bucketsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder addBuckets(int index, com.google.storage.v2.Bucket.Builder builderForValue) { + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + buckets_.add(index, builderForValue.build()); + onChanged(); + } else { + bucketsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder addAllBuckets( + java.lang.Iterable values) { + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, buckets_); + onChanged(); + } else { + bucketsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder clearBuckets() { + if (bucketsBuilder_ == null) { + buckets_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + bucketsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public Builder removeBuckets(int index) { + if (bucketsBuilder_ == null) { + ensureBucketsIsMutable(); + buckets_.remove(index); + onChanged(); + } else { + bucketsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public com.google.storage.v2.Bucket.Builder getBucketsBuilder(int index) { + return internalGetBucketsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public com.google.storage.v2.BucketOrBuilder getBucketsOrBuilder(int index) { + if (bucketsBuilder_ == null) { + return buckets_.get(index); + } else { + return bucketsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public java.util.List + getBucketsOrBuilderList() { + if (bucketsBuilder_ != null) { + return bucketsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(buckets_); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public com.google.storage.v2.Bucket.Builder addBucketsBuilder() { + return internalGetBucketsFieldBuilder() + .addBuilder(com.google.storage.v2.Bucket.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public com.google.storage.v2.Bucket.Builder addBucketsBuilder(int index) { + return internalGetBucketsFieldBuilder() + .addBuilder(index, com.google.storage.v2.Bucket.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + public java.util.List getBucketsBuilderList() { + return internalGetBucketsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + internalGetBucketsFieldBuilder() { + if (bucketsBuilder_ == null) { + bucketsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder>( + buckets_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + buckets_ = null; + } + return bucketsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableIsMutable() { + if (!unreachable_.isModifiable()) { + unreachable_ = new com.google.protobuf.LazyStringArrayList(unreachable_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + unreachable_.makeImmutable(); + return unreachable_; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index to set the value at. + * @param value The unreachable to set. + * @return This builder for chaining. + */ + public Builder setUnreachable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param value The unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param values The unreachable to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachable(java.lang.Iterable values) { + ensureUnreachableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, unreachable_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @return This builder for chaining. + */ + public Builder clearUnreachable() { + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Unreachable resources.
+     * This field can only be present if the caller specified
+     * return_partial_success to be true in the request to receive indications
+     * of temporarily missing resources.
+     * unreachable might be:
+     * unreachable = [
+     * "projects/_/buckets/bucket1",
+     * "projects/_/buckets/bucket2",
+     * "projects/_/buckets/bucket3",
+     * ]
+     * 
+ * + * repeated string unreachable = 3; + * + * @param value The bytes of the unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ListBucketsResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ListBucketsResponse) + private static final com.google.storage.v2.ListBucketsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ListBucketsResponse(); + } + + public static com.google.storage.v2.ListBucketsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBucketsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ListBucketsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponseOrBuilder.java new file mode 100644 index 000000000000..0663caee04cf --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListBucketsResponseOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ListBucketsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ListBucketsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + java.util.List getBucketsList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + com.google.storage.v2.Bucket getBuckets(int index); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + int getBucketsCount(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + java.util.List getBucketsOrBuilderList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Bucket buckets = 1; + */ + com.google.storage.v2.BucketOrBuilder getBucketsOrBuilder(int index); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + java.util.List getUnreachableList(); + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + int getUnreachableCount(); + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + java.lang.String getUnreachable(int index); + + /** + * + * + *
+   * Unreachable resources.
+   * This field can only be present if the caller specified
+   * return_partial_success to be true in the request to receive indications
+   * of temporarily missing resources.
+   * unreachable might be:
+   * unreachable = [
+   * "projects/_/buckets/bucket1",
+   * "projects/_/buckets/bucket2",
+   * "projects/_/buckets/bucket3",
+   * ]
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + com.google.protobuf.ByteString getUnreachableBytes(int index); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequest.java new file mode 100644 index 000000000000..6af3d7298f65 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequest.java @@ -0,0 +1,2890 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [ListObjects][google.storage.v2.Storage.ListObjects].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ListObjectsRequest} + */ +@com.google.protobuf.Generated +public final class ListObjectsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ListObjectsRequest) + ListObjectsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListObjectsRequest"); + } + + // Use ListObjectsRequest.newBuilder() to construct. + private ListObjectsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListObjectsRequest() { + parent_ = ""; + pageToken_ = ""; + delimiter_ = ""; + prefix_ = ""; + lexicographicStart_ = ""; + lexicographicEnd_ = ""; + matchGlob_ = ""; + filter_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListObjectsRequest.class, + com.google.storage.v2.ListObjectsRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which to look for objects.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which to look for objects.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
+   * Optional. Maximum number of `items` plus `prefixes` to return
+   * in a single page of responses. As duplicate `prefixes` are
+   * omitted, fewer total results might be returned than requested. The service
+   * uses this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DELIMITER_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object delimiter_ = ""; + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. `items`
+   * contains only objects whose names, aside from the `prefix`, do not contain
+   * `delimiter`. Objects whose names, aside from the `prefix`, contain
+   * `delimiter` has their name, truncated after the `delimiter`, returned in
+   * `prefixes`. Duplicate `prefixes` are omitted.
+   * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + @java.lang.Override + public java.lang.String getDelimiter() { + java.lang.Object ref = delimiter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + delimiter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. `items`
+   * contains only objects whose names, aside from the `prefix`, do not contain
+   * `delimiter`. Objects whose names, aside from the `prefix`, contain
+   * `delimiter` has their name, truncated after the `delimiter`, returned in
+   * `prefixes`. Duplicate `prefixes` are omitted.
+   * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDelimiterBytes() { + java.lang.Object ref = delimiter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + delimiter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INCLUDE_TRAILING_DELIMITER_FIELD_NUMBER = 5; + private boolean includeTrailingDelimiter_ = false; + + /** + * + * + *
+   * Optional. If true, objects that end in exactly one instance of `delimiter`
+   * has their metadata included in `items` in addition to
+   * `prefixes`.
+   * 
+ * + * bool include_trailing_delimiter = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeTrailingDelimiter. + */ + @java.lang.Override + public boolean getIncludeTrailingDelimiter() { + return includeTrailingDelimiter_; + } + + public static final int PREFIX_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object prefix_ = ""; + + /** + * + * + *
+   * Optional. Filter results to objects whose names begin with this prefix.
+   * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + @java.lang.Override + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to objects whose names begin with this prefix.
+   * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSIONS_FIELD_NUMBER = 7; + private boolean versions_ = false; + + /** + * + * + *
+   * Optional. If `true`, lists all versions of an object as distinct results.
+   * 
+ * + * bool versions = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The versions. + */ + @java.lang.Override + public boolean getVersions() { + return versions_; + } + + public static final int READ_MASK_FIELD_NUMBER = 8; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return Whether the readMask field is set. + */ + @java.lang.Override + public boolean hasReadMask() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return The readMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + public static final int LEXICOGRAPHIC_START_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private volatile java.lang.Object lexicographicStart_ = ""; + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically equal
+   * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + @java.lang.Override + public java.lang.String getLexicographicStart() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicStart_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically equal
+   * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLexicographicStartBytes() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicStart_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LEXICOGRAPHIC_END_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private volatile java.lang.Object lexicographicEnd_ = ""; + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically
+   * before `lexicographic_end`. If `lexicographic_start` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + @java.lang.Override + public java.lang.String getLexicographicEnd() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicEnd_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically
+   * before `lexicographic_end`. If `lexicographic_start` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLexicographicEndBytes() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicEnd_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOFT_DELETED_FIELD_NUMBER = 12; + private boolean softDeleted_ = false; + + /** + * + * + *
+   * Optional. If true, only list all soft-deleted versions of the object.
+   * Soft delete policy is required to set this option.
+   * 
+ * + * bool soft_deleted = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The softDeleted. + */ + @java.lang.Override + public boolean getSoftDeleted() { + return softDeleted_; + } + + public static final int INCLUDE_FOLDERS_AS_PREFIXES_FIELD_NUMBER = 13; + private boolean includeFoldersAsPrefixes_ = false; + + /** + * + * + *
+   * Optional. If true, includes folders and managed folders (besides objects)
+   * in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+   * 
+ * + * bool include_folders_as_prefixes = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeFoldersAsPrefixes. + */ + @java.lang.Override + public boolean getIncludeFoldersAsPrefixes() { + return includeFoldersAsPrefixes_; + } + + public static final int MATCH_GLOB_FIELD_NUMBER = 14; + + @SuppressWarnings("serial") + private volatile java.lang.Object matchGlob_ = ""; + + /** + * + * + *
+   * Optional. Filter results to objects and prefixes that match this glob
+   * pattern. See [List objects using
+   * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+   * for the full syntax.
+   * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The matchGlob. + */ + @java.lang.Override + public java.lang.String getMatchGlob() { + java.lang.Object ref = matchGlob_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + matchGlob_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Filter results to objects and prefixes that match this glob
+   * pattern. See [List objects using
+   * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+   * for the full syntax.
+   * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for matchGlob. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMatchGlobBytes() { + java.lang.Object ref = matchGlob_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + matchGlob_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 15; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
+   * Optional. An expression used to filter the returned objects by the
+   * `context` field. For the full syntax, see [Filter objects by contexts
+   * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+   * If a `delimiter` is set, the returned `prefixes` are exempt from this
+   * filter.
+   * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. An expression used to filter the returned objects by the
+   * `context` field. For the full syntax, see [Filter objects by contexts
+   * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+   * If a `delimiter` is set, the returned `prefixes` are exempt from this
+   * filter.
+   * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(delimiter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, delimiter_); + } + if (includeTrailingDelimiter_ != false) { + output.writeBool(5, includeTrailingDelimiter_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, prefix_); + } + if (versions_ != false) { + output.writeBool(7, versions_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(8, getReadMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicStart_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 10, lexicographicStart_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicEnd_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, lexicographicEnd_); + } + if (softDeleted_ != false) { + output.writeBool(12, softDeleted_); + } + if (includeFoldersAsPrefixes_ != false) { + output.writeBool(13, includeFoldersAsPrefixes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(matchGlob_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 14, matchGlob_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 15, filter_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(delimiter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, delimiter_); + } + if (includeTrailingDelimiter_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, includeTrailingDelimiter_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(prefix_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, prefix_); + } + if (versions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, versions_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getReadMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicStart_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(10, lexicographicStart_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(lexicographicEnd_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(11, lexicographicEnd_); + } + if (softDeleted_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(12, softDeleted_); + } + if (includeFoldersAsPrefixes_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, includeFoldersAsPrefixes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(matchGlob_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(14, matchGlob_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(15, filter_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ListObjectsRequest)) { + return super.equals(obj); + } + com.google.storage.v2.ListObjectsRequest other = (com.google.storage.v2.ListObjectsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getDelimiter().equals(other.getDelimiter())) return false; + if (getIncludeTrailingDelimiter() != other.getIncludeTrailingDelimiter()) return false; + if (!getPrefix().equals(other.getPrefix())) return false; + if (getVersions() != other.getVersions()) return false; + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (!getLexicographicStart().equals(other.getLexicographicStart())) return false; + if (!getLexicographicEnd().equals(other.getLexicographicEnd())) return false; + if (getSoftDeleted() != other.getSoftDeleted()) return false; + if (getIncludeFoldersAsPrefixes() != other.getIncludeFoldersAsPrefixes()) return false; + if (!getMatchGlob().equals(other.getMatchGlob())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + DELIMITER_FIELD_NUMBER; + hash = (53 * hash) + getDelimiter().hashCode(); + hash = (37 * hash) + INCLUDE_TRAILING_DELIMITER_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeTrailingDelimiter()); + hash = (37 * hash) + PREFIX_FIELD_NUMBER; + hash = (53 * hash) + getPrefix().hashCode(); + hash = (37 * hash) + VERSIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getVersions()); + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + hash = (37 * hash) + LEXICOGRAPHIC_START_FIELD_NUMBER; + hash = (53 * hash) + getLexicographicStart().hashCode(); + hash = (37 * hash) + LEXICOGRAPHIC_END_FIELD_NUMBER; + hash = (53 * hash) + getLexicographicEnd().hashCode(); + hash = (37 * hash) + SOFT_DELETED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSoftDeleted()); + hash = (37 * hash) + INCLUDE_FOLDERS_AS_PREFIXES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeFoldersAsPrefixes()); + hash = (37 * hash) + MATCH_GLOB_FIELD_NUMBER; + hash = (53 * hash) + getMatchGlob().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ListObjectsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [ListObjects][google.storage.v2.Storage.ListObjects].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ListObjectsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ListObjectsRequest) + com.google.storage.v2.ListObjectsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListObjectsRequest.class, + com.google.storage.v2.ListObjectsRequest.Builder.class); + } + + // Construct using com.google.storage.v2.ListObjectsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + delimiter_ = ""; + includeTrailingDelimiter_ = false; + prefix_ = ""; + versions_ = false; + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + lexicographicStart_ = ""; + lexicographicEnd_ = ""; + softDeleted_ = false; + includeFoldersAsPrefixes_ = false; + matchGlob_ = ""; + filter_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsRequest getDefaultInstanceForType() { + return com.google.storage.v2.ListObjectsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsRequest build() { + com.google.storage.v2.ListObjectsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsRequest buildPartial() { + com.google.storage.v2.ListObjectsRequest result = + new com.google.storage.v2.ListObjectsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ListObjectsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.delimiter_ = delimiter_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.includeTrailingDelimiter_ = includeTrailingDelimiter_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.prefix_ = prefix_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.versions_ = versions_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000080) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.lexicographicStart_ = lexicographicStart_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.lexicographicEnd_ = lexicographicEnd_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.softDeleted_ = softDeleted_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.includeFoldersAsPrefixes_ = includeFoldersAsPrefixes_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.matchGlob_ = matchGlob_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.filter_ = filter_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ListObjectsRequest) { + return mergeFrom((com.google.storage.v2.ListObjectsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ListObjectsRequest other) { + if (other == com.google.storage.v2.ListObjectsRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getDelimiter().isEmpty()) { + delimiter_ = other.delimiter_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.getIncludeTrailingDelimiter() != false) { + setIncludeTrailingDelimiter(other.getIncludeTrailingDelimiter()); + } + if (!other.getPrefix().isEmpty()) { + prefix_ = other.prefix_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.getVersions() != false) { + setVersions(other.getVersions()); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + if (!other.getLexicographicStart().isEmpty()) { + lexicographicStart_ = other.lexicographicStart_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (!other.getLexicographicEnd().isEmpty()) { + lexicographicEnd_ = other.lexicographicEnd_; + bitField0_ |= 0x00000200; + onChanged(); + } + if (other.getSoftDeleted() != false) { + setSoftDeleted(other.getSoftDeleted()); + } + if (other.getIncludeFoldersAsPrefixes() != false) { + setIncludeFoldersAsPrefixes(other.getIncludeFoldersAsPrefixes()); + } + if (!other.getMatchGlob().isEmpty()) { + matchGlob_ = other.matchGlob_; + bitField0_ |= 0x00001000; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00002000; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + delimiter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + includeTrailingDelimiter_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + prefix_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 56: + { + versions_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 82: + { + lexicographicStart_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 82 + case 90: + { + lexicographicEnd_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000200; + break; + } // case 90 + case 96: + { + softDeleted_ = input.readBool(); + bitField0_ |= 0x00000400; + break; + } // case 96 + case 104: + { + includeFoldersAsPrefixes_ = input.readBool(); + bitField0_ |= 0x00000800; + break; + } // case 104 + case 114: + { + matchGlob_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00001000; + break; + } // case 114 + case 122: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00002000; + break; + } // case 122 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which to look for objects.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for objects.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for objects.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for objects.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to look for objects.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
+     * Optional. Maximum number of `items` plus `prefixes` to return
+     * in a single page of responses. As duplicate `prefixes` are
+     * omitted, fewer total results might be returned than requested. The service
+     * uses this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
+     * Optional. Maximum number of `items` plus `prefixes` to return
+     * in a single page of responses. As duplicate `prefixes` are
+     * omitted, fewer total results might be returned than requested. The service
+     * uses this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Maximum number of `items` plus `prefixes` to return
+     * in a single page of responses. As duplicate `prefixes` are
+     * omitted, fewer total results might be returned than requested. The service
+     * uses this parameter or 1,000 items, whichever is smaller.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A previously-returned page token representing part of the larger
+     * set of results to view.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object delimiter_ = ""; + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. `items`
+     * contains only objects whose names, aside from the `prefix`, do not contain
+     * `delimiter`. Objects whose names, aside from the `prefix`, contain
+     * `delimiter` has their name, truncated after the `delimiter`, returned in
+     * `prefixes`. Duplicate `prefixes` are omitted.
+     * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + public java.lang.String getDelimiter() { + java.lang.Object ref = delimiter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + delimiter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. `items`
+     * contains only objects whose names, aside from the `prefix`, do not contain
+     * `delimiter`. Objects whose names, aside from the `prefix`, contain
+     * `delimiter` has their name, truncated after the `delimiter`, returned in
+     * `prefixes`. Duplicate `prefixes` are omitted.
+     * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + public com.google.protobuf.ByteString getDelimiterBytes() { + java.lang.Object ref = delimiter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + delimiter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. `items`
+     * contains only objects whose names, aside from the `prefix`, do not contain
+     * `delimiter`. Objects whose names, aside from the `prefix`, contain
+     * `delimiter` has their name, truncated after the `delimiter`, returned in
+     * `prefixes`. Duplicate `prefixes` are omitted.
+     * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The delimiter to set. + * @return This builder for chaining. + */ + public Builder setDelimiter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + delimiter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. `items`
+     * contains only objects whose names, aside from the `prefix`, do not contain
+     * `delimiter`. Objects whose names, aside from the `prefix`, contain
+     * `delimiter` has their name, truncated after the `delimiter`, returned in
+     * `prefixes`. Duplicate `prefixes` are omitted.
+     * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDelimiter() { + delimiter_ = getDefaultInstance().getDelimiter(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If set, returns results in a directory-like mode. `items`
+     * contains only objects whose names, aside from the `prefix`, do not contain
+     * `delimiter`. Objects whose names, aside from the `prefix`, contain
+     * `delimiter` has their name, truncated after the `delimiter`, returned in
+     * `prefixes`. Duplicate `prefixes` are omitted.
+     * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for delimiter to set. + * @return This builder for chaining. + */ + public Builder setDelimiterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + delimiter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private boolean includeTrailingDelimiter_; + + /** + * + * + *
+     * Optional. If true, objects that end in exactly one instance of `delimiter`
+     * has their metadata included in `items` in addition to
+     * `prefixes`.
+     * 
+ * + * bool include_trailing_delimiter = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeTrailingDelimiter. + */ + @java.lang.Override + public boolean getIncludeTrailingDelimiter() { + return includeTrailingDelimiter_; + } + + /** + * + * + *
+     * Optional. If true, objects that end in exactly one instance of `delimiter`
+     * has their metadata included in `items` in addition to
+     * `prefixes`.
+     * 
+ * + * bool include_trailing_delimiter = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The includeTrailingDelimiter to set. + * @return This builder for chaining. + */ + public Builder setIncludeTrailingDelimiter(boolean value) { + + includeTrailingDelimiter_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If true, objects that end in exactly one instance of `delimiter`
+     * has their metadata included in `items` in addition to
+     * `prefixes`.
+     * 
+ * + * bool include_trailing_delimiter = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearIncludeTrailingDelimiter() { + bitField0_ = (bitField0_ & ~0x00000010); + includeTrailingDelimiter_ = false; + onChanged(); + return this; + } + + private java.lang.Object prefix_ = ""; + + /** + * + * + *
+     * Optional. Filter results to objects whose names begin with this prefix.
+     * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + public java.lang.String getPrefix() { + java.lang.Object ref = prefix_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + prefix_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names begin with this prefix.
+     * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + public com.google.protobuf.ByteString getPrefixBytes() { + java.lang.Object ref = prefix_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + prefix_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names begin with this prefix.
+     * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefix(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + prefix_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names begin with this prefix.
+     * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrefix() { + prefix_ = getDefaultInstance().getPrefix(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names begin with this prefix.
+     * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for prefix to set. + * @return This builder for chaining. + */ + public Builder setPrefixBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + prefix_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private boolean versions_; + + /** + * + * + *
+     * Optional. If `true`, lists all versions of an object as distinct results.
+     * 
+ * + * bool versions = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The versions. + */ + @java.lang.Override + public boolean getVersions() { + return versions_; + } + + /** + * + * + *
+     * Optional. If `true`, lists all versions of an object as distinct results.
+     * 
+ * + * bool versions = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The versions to set. + * @return This builder for chaining. + */ + public Builder setVersions(boolean value) { + + versions_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If `true`, lists all versions of an object as distinct results.
+     * 
+ * + * bool versions = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearVersions() { + bitField0_ = (bitField0_ & ~0x00000040); + versions_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return Whether the readMask field is set. + */ + public boolean hasReadMask() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return The readMask. + */ + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000080); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read from each result.
+     * If no mask is specified, defaults to all fields except `items.acl` and
+     * `items.owner`.
+     * `*` might be used to mean all fields.
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + private java.lang.Object lexicographicStart_ = ""; + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically equal
+     * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + public java.lang.String getLexicographicStart() { + java.lang.Object ref = lexicographicStart_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicStart_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically equal
+     * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + public com.google.protobuf.ByteString getLexicographicStartBytes() { + java.lang.Object ref = lexicographicStart_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicStart_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically equal
+     * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lexicographicStart to set. + * @return This builder for chaining. + */ + public Builder setLexicographicStart(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + lexicographicStart_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically equal
+     * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLexicographicStart() { + lexicographicStart_ = getDefaultInstance().getLexicographicStart(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically equal
+     * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for lexicographicStart to set. + * @return This builder for chaining. + */ + public Builder setLexicographicStartBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + lexicographicStart_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private java.lang.Object lexicographicEnd_ = ""; + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically
+     * before `lexicographic_end`. If `lexicographic_start` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + public java.lang.String getLexicographicEnd() { + java.lang.Object ref = lexicographicEnd_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + lexicographicEnd_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically
+     * before `lexicographic_end`. If `lexicographic_start` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + public com.google.protobuf.ByteString getLexicographicEndBytes() { + java.lang.Object ref = lexicographicEnd_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + lexicographicEnd_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically
+     * before `lexicographic_end`. If `lexicographic_start` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lexicographicEnd to set. + * @return This builder for chaining. + */ + public Builder setLexicographicEnd(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + lexicographicEnd_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically
+     * before `lexicographic_end`. If `lexicographic_start` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLexicographicEnd() { + lexicographicEnd_ = getDefaultInstance().getLexicographicEnd(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects whose names are lexicographically
+     * before `lexicographic_end`. If `lexicographic_start` is also set, the
+     * objects listed have names between `lexicographic_start` (inclusive) and
+     * `lexicographic_end` (exclusive).
+     * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for lexicographicEnd to set. + * @return This builder for chaining. + */ + public Builder setLexicographicEndBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + lexicographicEnd_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + private boolean softDeleted_; + + /** + * + * + *
+     * Optional. If true, only list all soft-deleted versions of the object.
+     * Soft delete policy is required to set this option.
+     * 
+ * + * bool soft_deleted = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The softDeleted. + */ + @java.lang.Override + public boolean getSoftDeleted() { + return softDeleted_; + } + + /** + * + * + *
+     * Optional. If true, only list all soft-deleted versions of the object.
+     * Soft delete policy is required to set this option.
+     * 
+ * + * bool soft_deleted = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The softDeleted to set. + * @return This builder for chaining. + */ + public Builder setSoftDeleted(boolean value) { + + softDeleted_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If true, only list all soft-deleted versions of the object.
+     * Soft delete policy is required to set this option.
+     * 
+ * + * bool soft_deleted = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSoftDeleted() { + bitField0_ = (bitField0_ & ~0x00000400); + softDeleted_ = false; + onChanged(); + return this; + } + + private boolean includeFoldersAsPrefixes_; + + /** + * + * + *
+     * Optional. If true, includes folders and managed folders (besides objects)
+     * in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+     * 
+ * + * bool include_folders_as_prefixes = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeFoldersAsPrefixes. + */ + @java.lang.Override + public boolean getIncludeFoldersAsPrefixes() { + return includeFoldersAsPrefixes_; + } + + /** + * + * + *
+     * Optional. If true, includes folders and managed folders (besides objects)
+     * in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+     * 
+ * + * bool include_folders_as_prefixes = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The includeFoldersAsPrefixes to set. + * @return This builder for chaining. + */ + public Builder setIncludeFoldersAsPrefixes(boolean value) { + + includeFoldersAsPrefixes_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If true, includes folders and managed folders (besides objects)
+     * in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+     * 
+ * + * bool include_folders_as_prefixes = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearIncludeFoldersAsPrefixes() { + bitField0_ = (bitField0_ & ~0x00000800); + includeFoldersAsPrefixes_ = false; + onChanged(); + return this; + } + + private java.lang.Object matchGlob_ = ""; + + /** + * + * + *
+     * Optional. Filter results to objects and prefixes that match this glob
+     * pattern. See [List objects using
+     * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+     * for the full syntax.
+     * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The matchGlob. + */ + public java.lang.String getMatchGlob() { + java.lang.Object ref = matchGlob_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + matchGlob_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects and prefixes that match this glob
+     * pattern. See [List objects using
+     * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+     * for the full syntax.
+     * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for matchGlob. + */ + public com.google.protobuf.ByteString getMatchGlobBytes() { + java.lang.Object ref = matchGlob_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + matchGlob_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Filter results to objects and prefixes that match this glob
+     * pattern. See [List objects using
+     * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+     * for the full syntax.
+     * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The matchGlob to set. + * @return This builder for chaining. + */ + public Builder setMatchGlob(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + matchGlob_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects and prefixes that match this glob
+     * pattern. See [List objects using
+     * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+     * for the full syntax.
+     * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMatchGlob() { + matchGlob_ = getDefaultInstance().getMatchGlob(); + bitField0_ = (bitField0_ & ~0x00001000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Filter results to objects and prefixes that match this glob
+     * pattern. See [List objects using
+     * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+     * for the full syntax.
+     * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for matchGlob to set. + * @return This builder for chaining. + */ + public Builder setMatchGlobBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + matchGlob_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
+     * Optional. An expression used to filter the returned objects by the
+     * `context` field. For the full syntax, see [Filter objects by contexts
+     * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+     * If a `delimiter` is set, the returned `prefixes` are exempt from this
+     * filter.
+     * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. An expression used to filter the returned objects by the
+     * `context` field. For the full syntax, see [Filter objects by contexts
+     * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+     * If a `delimiter` is set, the returned `prefixes` are exempt from this
+     * filter.
+     * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. An expression used to filter the returned objects by the
+     * `context` field. For the full syntax, see [Filter objects by contexts
+     * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+     * If a `delimiter` is set, the returned `prefixes` are exempt from this
+     * filter.
+     * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. An expression used to filter the returned objects by the
+     * `context` field. For the full syntax, see [Filter objects by contexts
+     * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+     * If a `delimiter` is set, the returned `prefixes` are exempt from this
+     * filter.
+     * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00002000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. An expression used to filter the returned objects by the
+     * `context` field. For the full syntax, see [Filter objects by contexts
+     * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+     * If a `delimiter` is set, the returned `prefixes` are exempt from this
+     * filter.
+     * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ListObjectsRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ListObjectsRequest) + private static final com.google.storage.v2.ListObjectsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ListObjectsRequest(); + } + + public static com.google.storage.v2.ListObjectsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListObjectsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequestOrBuilder.java new file mode 100644 index 000000000000..0bcd04620f1b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsRequestOrBuilder.java @@ -0,0 +1,394 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ListObjectsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ListObjectsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which to look for objects.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Name of the bucket in which to look for objects.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. Maximum number of `items` plus `prefixes` to return
+   * in a single page of responses. As duplicate `prefixes` are
+   * omitted, fewer total results might be returned than requested. The service
+   * uses this parameter or 1,000 items, whichever is smaller.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
+   * Optional. A previously-returned page token representing part of the larger
+   * set of results to view.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. `items`
+   * contains only objects whose names, aside from the `prefix`, do not contain
+   * `delimiter`. Objects whose names, aside from the `prefix`, contain
+   * `delimiter` has their name, truncated after the `delimiter`, returned in
+   * `prefixes`. Duplicate `prefixes` are omitted.
+   * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The delimiter. + */ + java.lang.String getDelimiter(); + + /** + * + * + *
+   * Optional. If set, returns results in a directory-like mode. `items`
+   * contains only objects whose names, aside from the `prefix`, do not contain
+   * `delimiter`. Objects whose names, aside from the `prefix`, contain
+   * `delimiter` has their name, truncated after the `delimiter`, returned in
+   * `prefixes`. Duplicate `prefixes` are omitted.
+   * 
+ * + * string delimiter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for delimiter. + */ + com.google.protobuf.ByteString getDelimiterBytes(); + + /** + * + * + *
+   * Optional. If true, objects that end in exactly one instance of `delimiter`
+   * has their metadata included in `items` in addition to
+   * `prefixes`.
+   * 
+ * + * bool include_trailing_delimiter = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeTrailingDelimiter. + */ + boolean getIncludeTrailingDelimiter(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names begin with this prefix.
+   * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The prefix. + */ + java.lang.String getPrefix(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names begin with this prefix.
+   * 
+ * + * string prefix = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for prefix. + */ + com.google.protobuf.ByteString getPrefixBytes(); + + /** + * + * + *
+   * Optional. If `true`, lists all versions of an object as distinct results.
+   * 
+ * + * bool versions = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The versions. + */ + boolean getVersions(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return Whether the readMask field is set. + */ + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + * + * @return The readMask. + */ + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read from each result.
+   * If no mask is specified, defaults to all fields except `items.acl` and
+   * `items.owner`.
+   * `*` might be used to mean all fields.
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 8; + */ + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically equal
+   * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicStart. + */ + java.lang.String getLexicographicStart(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically equal
+   * to or after `lexicographic_start`. If `lexicographic_end` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_start = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicStart. + */ + com.google.protobuf.ByteString getLexicographicStartBytes(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically
+   * before `lexicographic_end`. If `lexicographic_start` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lexicographicEnd. + */ + java.lang.String getLexicographicEnd(); + + /** + * + * + *
+   * Optional. Filter results to objects whose names are lexicographically
+   * before `lexicographic_end`. If `lexicographic_start` is also set, the
+   * objects listed have names between `lexicographic_start` (inclusive) and
+   * `lexicographic_end` (exclusive).
+   * 
+ * + * string lexicographic_end = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for lexicographicEnd. + */ + com.google.protobuf.ByteString getLexicographicEndBytes(); + + /** + * + * + *
+   * Optional. If true, only list all soft-deleted versions of the object.
+   * Soft delete policy is required to set this option.
+   * 
+ * + * bool soft_deleted = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The softDeleted. + */ + boolean getSoftDeleted(); + + /** + * + * + *
+   * Optional. If true, includes folders and managed folders (besides objects)
+   * in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+   * 
+ * + * bool include_folders_as_prefixes = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The includeFoldersAsPrefixes. + */ + boolean getIncludeFoldersAsPrefixes(); + + /** + * + * + *
+   * Optional. Filter results to objects and prefixes that match this glob
+   * pattern. See [List objects using
+   * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+   * for the full syntax.
+   * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The matchGlob. + */ + java.lang.String getMatchGlob(); + + /** + * + * + *
+   * Optional. Filter results to objects and prefixes that match this glob
+   * pattern. See [List objects using
+   * glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+   * for the full syntax.
+   * 
+ * + * string match_glob = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for matchGlob. + */ + com.google.protobuf.ByteString getMatchGlobBytes(); + + /** + * + * + *
+   * Optional. An expression used to filter the returned objects by the
+   * `context` field. For the full syntax, see [Filter objects by contexts
+   * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+   * If a `delimiter` is set, the returned `prefixes` are exempt from this
+   * filter.
+   * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
+   * Optional. An expression used to filter the returned objects by the
+   * `context` field. For the full syntax, see [Filter objects by contexts
+   * syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax).
+   * If a `delimiter` is set, the returned `prefixes` are exempt from this
+   * filter.
+   * 
+ * + * string filter = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponse.java new file mode 100644 index 000000000000..e8eba54128a2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponse.java @@ -0,0 +1,1414 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * The result of a call to Objects.ListObjects
+ * 
+ * + * Protobuf type {@code google.storage.v2.ListObjectsResponse} + */ +@com.google.protobuf.Generated +public final class ListObjectsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ListObjectsResponse) + ListObjectsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListObjectsResponse"); + } + + // Use ListObjectsResponse.newBuilder() to construct. + private ListObjectsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListObjectsResponse() { + objects_ = java.util.Collections.emptyList(); + prefixes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListObjectsResponse.class, + com.google.storage.v2.ListObjectsResponse.Builder.class); + } + + public static final int OBJECTS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List objects_; + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + @java.lang.Override + public java.util.List getObjectsList() { + return objects_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + @java.lang.Override + public java.util.List getObjectsOrBuilderList() { + return objects_; + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + @java.lang.Override + public int getObjectsCount() { + return objects_.size(); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + @java.lang.Override + public com.google.storage.v2.Object getObjects(int index) { + return objects_.get(index); + } + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getObjectsOrBuilder(int index) { + return objects_.get(index); + } + + public static final int PREFIXES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList prefixes_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @return A list containing the prefixes. + */ + public com.google.protobuf.ProtocolStringList getPrefixesList() { + return prefixes_; + } + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @return The count of prefixes. + */ + public int getPrefixesCount() { + return prefixes_.size(); + } + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the element to return. + * @return The prefixes at the given index. + */ + public java.lang.String getPrefixes(int index) { + return prefixes_.get(index); + } + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the value to return. + * @return The bytes of the prefixes at the given index. + */ + public com.google.protobuf.ByteString getPrefixesBytes(int index) { + return prefixes_.getByteString(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < objects_.size(); i++) { + output.writeMessage(1, objects_.get(i)); + } + for (int i = 0; i < prefixes_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, prefixes_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < objects_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, objects_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < prefixes_.size(); i++) { + dataSize += computeStringSizeNoTag(prefixes_.getRaw(i)); + } + size += dataSize; + size += 1 * getPrefixesList().size(); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ListObjectsResponse)) { + return super.equals(obj); + } + com.google.storage.v2.ListObjectsResponse other = + (com.google.storage.v2.ListObjectsResponse) obj; + + if (!getObjectsList().equals(other.getObjectsList())) return false; + if (!getPrefixesList().equals(other.getPrefixesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getObjectsCount() > 0) { + hash = (37 * hash) + OBJECTS_FIELD_NUMBER; + hash = (53 * hash) + getObjectsList().hashCode(); + } + if (getPrefixesCount() > 0) { + hash = (37 * hash) + PREFIXES_FIELD_NUMBER; + hash = (53 * hash) + getPrefixesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ListObjectsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ListObjectsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The result of a call to Objects.ListObjects
+   * 
+ * + * Protobuf type {@code google.storage.v2.ListObjectsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ListObjectsResponse) + com.google.storage.v2.ListObjectsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ListObjectsResponse.class, + com.google.storage.v2.ListObjectsResponse.Builder.class); + } + + // Construct using com.google.storage.v2.ListObjectsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (objectsBuilder_ == null) { + objects_ = java.util.Collections.emptyList(); + } else { + objects_ = null; + objectsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + prefixes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ListObjectsResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsResponse getDefaultInstanceForType() { + return com.google.storage.v2.ListObjectsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsResponse build() { + com.google.storage.v2.ListObjectsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsResponse buildPartial() { + com.google.storage.v2.ListObjectsResponse result = + new com.google.storage.v2.ListObjectsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.ListObjectsResponse result) { + if (objectsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + objects_ = java.util.Collections.unmodifiableList(objects_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.objects_ = objects_; + } else { + result.objects_ = objectsBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.ListObjectsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + prefixes_.makeImmutable(); + result.prefixes_ = prefixes_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ListObjectsResponse) { + return mergeFrom((com.google.storage.v2.ListObjectsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ListObjectsResponse other) { + if (other == com.google.storage.v2.ListObjectsResponse.getDefaultInstance()) return this; + if (objectsBuilder_ == null) { + if (!other.objects_.isEmpty()) { + if (objects_.isEmpty()) { + objects_ = other.objects_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureObjectsIsMutable(); + objects_.addAll(other.objects_); + } + onChanged(); + } + } else { + if (!other.objects_.isEmpty()) { + if (objectsBuilder_.isEmpty()) { + objectsBuilder_.dispose(); + objectsBuilder_ = null; + objects_ = other.objects_; + bitField0_ = (bitField0_ & ~0x00000001); + objectsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetObjectsFieldBuilder() + : null; + } else { + objectsBuilder_.addAllMessages(other.objects_); + } + } + } + if (!other.prefixes_.isEmpty()) { + if (prefixes_.isEmpty()) { + prefixes_ = other.prefixes_; + bitField0_ |= 0x00000002; + } else { + ensurePrefixesIsMutable(); + prefixes_.addAll(other.prefixes_); + } + onChanged(); + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.storage.v2.Object m = + input.readMessage(com.google.storage.v2.Object.parser(), extensionRegistry); + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + objects_.add(m); + } else { + objectsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensurePrefixesIsMutable(); + prefixes_.add(s); + break; + } // case 18 + case 26: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List objects_ = + java.util.Collections.emptyList(); + + private void ensureObjectsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + objects_ = new java.util.ArrayList(objects_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + objectsBuilder_; + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public java.util.List getObjectsList() { + if (objectsBuilder_ == null) { + return java.util.Collections.unmodifiableList(objects_); + } else { + return objectsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public int getObjectsCount() { + if (objectsBuilder_ == null) { + return objects_.size(); + } else { + return objectsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public com.google.storage.v2.Object getObjects(int index) { + if (objectsBuilder_ == null) { + return objects_.get(index); + } else { + return objectsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder setObjects(int index, com.google.storage.v2.Object value) { + if (objectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectsIsMutable(); + objects_.set(index, value); + onChanged(); + } else { + objectsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder setObjects(int index, com.google.storage.v2.Object.Builder builderForValue) { + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + objects_.set(index, builderForValue.build()); + onChanged(); + } else { + objectsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder addObjects(com.google.storage.v2.Object value) { + if (objectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectsIsMutable(); + objects_.add(value); + onChanged(); + } else { + objectsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder addObjects(int index, com.google.storage.v2.Object value) { + if (objectsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureObjectsIsMutable(); + objects_.add(index, value); + onChanged(); + } else { + objectsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder addObjects(com.google.storage.v2.Object.Builder builderForValue) { + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + objects_.add(builderForValue.build()); + onChanged(); + } else { + objectsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder addObjects(int index, com.google.storage.v2.Object.Builder builderForValue) { + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + objects_.add(index, builderForValue.build()); + onChanged(); + } else { + objectsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder addAllObjects( + java.lang.Iterable values) { + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, objects_); + onChanged(); + } else { + objectsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder clearObjects() { + if (objectsBuilder_ == null) { + objects_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + objectsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public Builder removeObjects(int index) { + if (objectsBuilder_ == null) { + ensureObjectsIsMutable(); + objects_.remove(index); + onChanged(); + } else { + objectsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public com.google.storage.v2.Object.Builder getObjectsBuilder(int index) { + return internalGetObjectsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public com.google.storage.v2.ObjectOrBuilder getObjectsOrBuilder(int index) { + if (objectsBuilder_ == null) { + return objects_.get(index); + } else { + return objectsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public java.util.List + getObjectsOrBuilderList() { + if (objectsBuilder_ != null) { + return objectsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(objects_); + } + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public com.google.storage.v2.Object.Builder addObjectsBuilder() { + return internalGetObjectsFieldBuilder() + .addBuilder(com.google.storage.v2.Object.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public com.google.storage.v2.Object.Builder addObjectsBuilder(int index) { + return internalGetObjectsFieldBuilder() + .addBuilder(index, com.google.storage.v2.Object.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of items.
+     * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + public java.util.List getObjectsBuilderList() { + return internalGetObjectsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetObjectsFieldBuilder() { + if (objectsBuilder_ == null) { + objectsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + objects_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + objects_ = null; + } + return objectsBuilder_; + } + + private com.google.protobuf.LazyStringArrayList prefixes_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensurePrefixesIsMutable() { + if (!prefixes_.isModifiable()) { + prefixes_ = new com.google.protobuf.LazyStringArrayList(prefixes_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @return A list containing the prefixes. + */ + public com.google.protobuf.ProtocolStringList getPrefixesList() { + prefixes_.makeImmutable(); + return prefixes_; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @return The count of prefixes. + */ + public int getPrefixesCount() { + return prefixes_.size(); + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the element to return. + * @return The prefixes at the given index. + */ + public java.lang.String getPrefixes(int index) { + return prefixes_.get(index); + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the value to return. + * @return The bytes of the prefixes at the given index. + */ + public com.google.protobuf.ByteString getPrefixesBytes(int index) { + return prefixes_.getByteString(index); + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param index The index to set the value at. + * @param value The prefixes to set. + * @return This builder for chaining. + */ + public Builder setPrefixes(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrefixesIsMutable(); + prefixes_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param value The prefixes to add. + * @return This builder for chaining. + */ + public Builder addPrefixes(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrefixesIsMutable(); + prefixes_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param values The prefixes to add. + * @return This builder for chaining. + */ + public Builder addAllPrefixes(java.lang.Iterable values) { + ensurePrefixesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, prefixes_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @return This builder for chaining. + */ + public Builder clearPrefixes() { + prefixes_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * The list of prefixes of objects matching-but-not-listed up to and including
+     * the requested delimiter.
+     * 
+ * + * repeated string prefixes = 2; + * + * @param value The bytes of the prefixes to add. + * @return This builder for chaining. + */ + public Builder addPrefixesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePrefixesIsMutable(); + prefixes_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 3; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * The continuation token, used to page through large result sets. Provide
+     * this value in a subsequent request to return the next page of results.
+     * 
+ * + * string next_page_token = 3; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ListObjectsResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ListObjectsResponse) + private static final com.google.storage.v2.ListObjectsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ListObjectsResponse(); + } + + public static com.google.storage.v2.ListObjectsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListObjectsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ListObjectsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponseOrBuilder.java new file mode 100644 index 000000000000..8025fe059b20 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ListObjectsResponseOrBuilder.java @@ -0,0 +1,169 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ListObjectsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ListObjectsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + java.util.List getObjectsList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + com.google.storage.v2.Object getObjects(int index); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + int getObjectsCount(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + java.util.List getObjectsOrBuilderList(); + + /** + * + * + *
+   * The list of items.
+   * 
+ * + * repeated .google.storage.v2.Object objects = 1; + */ + com.google.storage.v2.ObjectOrBuilder getObjectsOrBuilder(int index); + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @return A list containing the prefixes. + */ + java.util.List getPrefixesList(); + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @return The count of prefixes. + */ + int getPrefixesCount(); + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the element to return. + * @return The prefixes at the given index. + */ + java.lang.String getPrefixes(int index); + + /** + * + * + *
+   * The list of prefixes of objects matching-but-not-listed up to and including
+   * the requested delimiter.
+   * 
+ * + * repeated string prefixes = 2; + * + * @param index The index of the value to return. + * @return The bytes of the prefixes at the given index. + */ + com.google.protobuf.ByteString getPrefixesBytes(int index); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
+   * The continuation token, used to page through large result sets. Provide
+   * this value in a subsequent request to return the next page of results.
+   * 
+ * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequest.java new file mode 100644 index 000000000000..33a943dcf693 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequest.java @@ -0,0 +1,715 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy].
+ * 
+ * + * Protobuf type {@code google.storage.v2.LockBucketRetentionPolicyRequest} + */ +@com.google.protobuf.Generated +public final class LockBucketRetentionPolicyRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.LockBucketRetentionPolicyRequest) + LockBucketRetentionPolicyRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "LockBucketRetentionPolicyRequest"); + } + + // Use LockBucketRetentionPolicyRequest.newBuilder() to construct. + private LockBucketRetentionPolicyRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private LockBucketRetentionPolicyRequest() { + bucket_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.LockBucketRetentionPolicyRequest.class, + com.google.storage.v2.LockBucketRetentionPolicyRequest.Builder.class); + } + + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 2; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Required. Makes the operation conditional on whether bucket's current
+   * metageneration matches the given value. Must be positive.
+   * 
+ * + * int64 if_metageneration_match = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (ifMetagenerationMatch_ != 0L) { + output.writeInt64(2, ifMetagenerationMatch_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (ifMetagenerationMatch_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifMetagenerationMatch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.LockBucketRetentionPolicyRequest)) { + return super.equals(obj); + } + com.google.storage.v2.LockBucketRetentionPolicyRequest other = + (com.google.storage.v2.LockBucketRetentionPolicyRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.storage.v2.LockBucketRetentionPolicyRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy].
+   * 
+ * + * Protobuf type {@code google.storage.v2.LockBucketRetentionPolicyRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.LockBucketRetentionPolicyRequest) + com.google.storage.v2.LockBucketRetentionPolicyRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.LockBucketRetentionPolicyRequest.class, + com.google.storage.v2.LockBucketRetentionPolicyRequest.Builder.class); + } + + // Construct using com.google.storage.v2.LockBucketRetentionPolicyRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + ifMetagenerationMatch_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.LockBucketRetentionPolicyRequest getDefaultInstanceForType() { + return com.google.storage.v2.LockBucketRetentionPolicyRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.LockBucketRetentionPolicyRequest build() { + com.google.storage.v2.LockBucketRetentionPolicyRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.LockBucketRetentionPolicyRequest buildPartial() { + com.google.storage.v2.LockBucketRetentionPolicyRequest result = + new com.google.storage.v2.LockBucketRetentionPolicyRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.LockBucketRetentionPolicyRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.LockBucketRetentionPolicyRequest) { + return mergeFrom((com.google.storage.v2.LockBucketRetentionPolicyRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.LockBucketRetentionPolicyRequest other) { + if (other == com.google.storage.v2.LockBucketRetentionPolicyRequest.getDefaultInstance()) + return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getIfMetagenerationMatch() != 0L) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of a bucket.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Required. Makes the operation conditional on whether bucket's current
+     * metageneration matches the given value. Must be positive.
+     * 
+ * + * int64 if_metageneration_match = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Required. Makes the operation conditional on whether bucket's current
+     * metageneration matches the given value. Must be positive.
+     * 
+ * + * int64 if_metageneration_match = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Makes the operation conditional on whether bucket's current
+     * metageneration matches the given value. Must be positive.
+     * 
+ * + * int64 if_metageneration_match = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.LockBucketRetentionPolicyRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.LockBucketRetentionPolicyRequest) + private static final com.google.storage.v2.LockBucketRetentionPolicyRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.LockBucketRetentionPolicyRequest(); + } + + public static com.google.storage.v2.LockBucketRetentionPolicyRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public LockBucketRetentionPolicyRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.LockBucketRetentionPolicyRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequestOrBuilder.java new file mode 100644 index 000000000000..f3a47120c5cd --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/LockBucketRetentionPolicyRequestOrBuilder.java @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface LockBucketRetentionPolicyRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.LockBucketRetentionPolicyRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. Name of a bucket.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. Makes the operation conditional on whether bucket's current
+   * metageneration matches the given value. Must be positive.
+   * 
+ * + * int64 if_metageneration_match = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequest.java new file mode 100644 index 000000000000..5b4217d49366 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequest.java @@ -0,0 +1,2342 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [MoveObject][google.storage.v2.Storage.MoveObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.MoveObjectRequest} + */ +@com.google.protobuf.Generated +public final class MoveObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.MoveObjectRequest) + MoveObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveObjectRequest"); + } + + // Use MoveObjectRequest.newBuilder() to construct. + private MoveObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveObjectRequest() { + bucket_ = ""; + sourceObject_ = ""; + destinationObject_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_MoveObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_MoveObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.MoveObjectRequest.class, + com.google.storage.v2.MoveObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceObject_ = ""; + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + @java.lang.Override + public java.lang.String getSourceObject() { + java.lang.Object ref = sourceObject_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceObject_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceObjectBytes() { + java.lang.Object ref = sourceObject_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_OBJECT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationObject_ = ""; + + /** + * + * + *
+   * Required. Name of the destination object.
+   * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationObject. + */ + @java.lang.Override + public java.lang.String getDestinationObject() { + java.lang.Object ref = destinationObject_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationObject_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the destination object.
+   * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationObject. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationObjectBytes() { + java.lang.Object ref = destinationObject_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_SOURCE_GENERATION_MATCH_FIELD_NUMBER = 4; + private long ifSourceGenerationMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation matches the given value. `if_source_generation_match`
+   * and `if_source_generation_not_match` conditions are mutually exclusive:
+   * it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation matches the given value. `if_source_generation_match`
+   * and `if_source_generation_not_match` conditions are mutually exclusive:
+   * it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationMatch. + */ + @java.lang.Override + public long getIfSourceGenerationMatch() { + return ifSourceGenerationMatch_; + } + + public static final int IF_SOURCE_GENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifSourceGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation does not match the given value.
+   * `if_source_generation_match` and `if_source_generation_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation does not match the given value.
+   * `if_source_generation_match` and `if_source_generation_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceGenerationNotMatch() { + return ifSourceGenerationNotMatch_; + } + + public static final int IF_SOURCE_METAGENERATION_MATCH_FIELD_NUMBER = 6; + private long ifSourceMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration matches the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration matches the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationMatch() { + return ifSourceMetagenerationMatch_; + } + + public static final int IF_SOURCE_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 7; + private long ifSourceMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration does not match the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration does not match the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationNotMatch() { + return ifSourceMetagenerationNotMatch_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 8; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation matches the given value. Setting to 0 makes the
+   * operation succeed only if there are no live versions of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation matches the given value. Setting to 0 makes the
+   * operation succeed only if there are no live versions of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 9; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation does not match the given value. If no live
+   * object exists, the precondition fails. Setting to 0 makes the operation
+   * succeed only if there is a live version of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation does not match the given value. If no live
+   * object exists, the precondition fails. Setting to 0 makes the operation
+   * succeed only if there is a live version of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 10; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration matches the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration matches the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 11; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration does not match the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration does not match the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceObject_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, sourceObject_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationObject_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, destinationObject_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(4, ifSourceGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(5, ifSourceGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(6, ifSourceMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(7, ifSourceMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(8, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeInt64(9, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeInt64(10, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeInt64(11, ifMetagenerationNotMatch_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceObject_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, sourceObject_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationObject_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, destinationObject_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifSourceGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifSourceGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifSourceMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 7, ifSourceMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(10, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(11, ifMetagenerationNotMatch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.MoveObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.MoveObjectRequest other = (com.google.storage.v2.MoveObjectRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getSourceObject().equals(other.getSourceObject())) return false; + if (!getDestinationObject().equals(other.getDestinationObject())) return false; + if (hasIfSourceGenerationMatch() != other.hasIfSourceGenerationMatch()) return false; + if (hasIfSourceGenerationMatch()) { + if (getIfSourceGenerationMatch() != other.getIfSourceGenerationMatch()) return false; + } + if (hasIfSourceGenerationNotMatch() != other.hasIfSourceGenerationNotMatch()) return false; + if (hasIfSourceGenerationNotMatch()) { + if (getIfSourceGenerationNotMatch() != other.getIfSourceGenerationNotMatch()) return false; + } + if (hasIfSourceMetagenerationMatch() != other.hasIfSourceMetagenerationMatch()) return false; + if (hasIfSourceMetagenerationMatch()) { + if (getIfSourceMetagenerationMatch() != other.getIfSourceMetagenerationMatch()) return false; + } + if (hasIfSourceMetagenerationNotMatch() != other.hasIfSourceMetagenerationNotMatch()) + return false; + if (hasIfSourceMetagenerationNotMatch()) { + if (getIfSourceMetagenerationNotMatch() != other.getIfSourceMetagenerationNotMatch()) + return false; + } + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + SOURCE_OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getSourceObject().hashCode(); + hash = (37 * hash) + DESTINATION_OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getDestinationObject().hashCode(); + if (hasIfSourceGenerationMatch()) { + hash = (37 * hash) + IF_SOURCE_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceGenerationMatch()); + } + if (hasIfSourceGenerationNotMatch()) { + hash = (37 * hash) + IF_SOURCE_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceGenerationNotMatch()); + } + if (hasIfSourceMetagenerationMatch()) { + hash = (37 * hash) + IF_SOURCE_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceMetagenerationMatch()); + } + if (hasIfSourceMetagenerationNotMatch()) { + hash = (37 * hash) + IF_SOURCE_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = + (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceMetagenerationNotMatch()); + } + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.MoveObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.MoveObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.MoveObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.MoveObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [MoveObject][google.storage.v2.Storage.MoveObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.MoveObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.MoveObjectRequest) + com.google.storage.v2.MoveObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_MoveObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_MoveObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.MoveObjectRequest.class, + com.google.storage.v2.MoveObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.MoveObjectRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + sourceObject_ = ""; + destinationObject_ = ""; + ifSourceGenerationMatch_ = 0L; + ifSourceGenerationNotMatch_ = 0L; + ifSourceMetagenerationMatch_ = 0L; + ifSourceMetagenerationNotMatch_ = 0L; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_MoveObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.MoveObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.MoveObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.MoveObjectRequest build() { + com.google.storage.v2.MoveObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.MoveObjectRequest buildPartial() { + com.google.storage.v2.MoveObjectRequest result = + new com.google.storage.v2.MoveObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.MoveObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.sourceObject_ = sourceObject_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.destinationObject_ = destinationObject_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifSourceGenerationMatch_ = ifSourceGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifSourceGenerationNotMatch_ = ifSourceGenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifSourceMetagenerationMatch_ = ifSourceMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifSourceMetagenerationNotMatch_ = ifSourceMetagenerationNotMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000080; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.MoveObjectRequest) { + return mergeFrom((com.google.storage.v2.MoveObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.MoveObjectRequest other) { + if (other == com.google.storage.v2.MoveObjectRequest.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getSourceObject().isEmpty()) { + sourceObject_ = other.sourceObject_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDestinationObject().isEmpty()) { + destinationObject_ = other.destinationObject_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasIfSourceGenerationMatch()) { + setIfSourceGenerationMatch(other.getIfSourceGenerationMatch()); + } + if (other.hasIfSourceGenerationNotMatch()) { + setIfSourceGenerationNotMatch(other.getIfSourceGenerationNotMatch()); + } + if (other.hasIfSourceMetagenerationMatch()) { + setIfSourceMetagenerationMatch(other.getIfSourceMetagenerationMatch()); + } + if (other.hasIfSourceMetagenerationNotMatch()) { + setIfSourceMetagenerationNotMatch(other.getIfSourceMetagenerationNotMatch()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + sourceObject_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + destinationObject_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + ifSourceGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifSourceGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + ifSourceMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + ifSourceMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 64: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 72: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 80: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 88: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 88 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object sourceObject_ = ""; + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + public java.lang.String getSourceObject() { + java.lang.Object ref = sourceObject_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceObject_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + public com.google.protobuf.ByteString getSourceObjectBytes() { + java.lang.Object ref = sourceObject_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sourceObject to set. + * @return This builder for chaining. + */ + public Builder setSourceObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceObject_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSourceObject() { + sourceObject_ = getDefaultInstance().getSourceObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for sourceObject to set. + * @return This builder for chaining. + */ + public Builder setSourceObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceObject_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object destinationObject_ = ""; + + /** + * + * + *
+     * Required. Name of the destination object.
+     * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationObject. + */ + public java.lang.String getDestinationObject() { + java.lang.Object ref = destinationObject_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationObject_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the destination object.
+     * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationObject. + */ + public com.google.protobuf.ByteString getDestinationObjectBytes() { + java.lang.Object ref = destinationObject_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the destination object.
+     * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The destinationObject to set. + * @return This builder for chaining. + */ + public Builder setDestinationObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationObject_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the destination object.
+     * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearDestinationObject() { + destinationObject_ = getDefaultInstance().getDestinationObject(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the destination object.
+     * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for destinationObject to set. + * @return This builder for chaining. + */ + public Builder setDestinationObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationObject_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private long ifSourceGenerationMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation matches the given value. `if_source_generation_match`
+     * and `if_source_generation_not_match` conditions are mutually exclusive:
+     * it's an error for both of them to be set in the request.
+     * 
+ * + * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation matches the given value. `if_source_generation_match`
+     * and `if_source_generation_not_match` conditions are mutually exclusive:
+     * it's an error for both of them to be set in the request.
+     * 
+ * + * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationMatch. + */ + @java.lang.Override + public long getIfSourceGenerationMatch() { + return ifSourceGenerationMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation matches the given value. `if_source_generation_match`
+     * and `if_source_generation_not_match` conditions are mutually exclusive:
+     * it's an error for both of them to be set in the request.
+     * 
+ * + * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifSourceGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceGenerationMatch(long value) { + + ifSourceGenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation matches the given value. `if_source_generation_match`
+     * and `if_source_generation_not_match` conditions are mutually exclusive:
+     * it's an error for both of them to be set in the request.
+     * 
+ * + * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfSourceGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifSourceGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceGenerationNotMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation does not match the given value.
+     * `if_source_generation_match` and `if_source_generation_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation does not match the given value.
+     * `if_source_generation_match` and `if_source_generation_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceGenerationNotMatch() { + return ifSourceGenerationNotMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation does not match the given value.
+     * `if_source_generation_match` and `if_source_generation_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifSourceGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceGenerationNotMatch(long value) { + + ifSourceGenerationNotMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current generation does not match the given value.
+     * `if_source_generation_match` and `if_source_generation_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfSourceGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifSourceGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceMetagenerationMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration matches the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration matches the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationMatch() { + return ifSourceMetagenerationMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration matches the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifSourceMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceMetagenerationMatch(long value) { + + ifSourceMetagenerationMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration matches the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfSourceMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifSourceMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceMetagenerationNotMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration does not match the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationNotMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration does not match the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationNotMatch() { + return ifSourceMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration does not match the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifSourceMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceMetagenerationNotMatch(long value) { + + ifSourceMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the source object's
+     * current metageneration does not match the given value.
+     * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+     * conditions are mutually exclusive: it's an error for both of them to be set
+     * in the request.
+     * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfSourceMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifSourceMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation matches the given value. Setting to 0 makes the
+     * operation succeed only if there are no live versions of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation matches the given value. Setting to 0 makes the
+     * operation succeed only if there are no live versions of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation matches the given value. Setting to 0 makes the
+     * operation succeed only if there are no live versions of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation matches the given value. Setting to 0 makes the
+     * operation succeed only if there are no live versions of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000080); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation does not match the given value. If no live
+     * object exists, the precondition fails. Setting to 0 makes the operation
+     * succeed only if there is a live version of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation does not match the given value. If no live
+     * object exists, the precondition fails. Setting to 0 makes the operation
+     * succeed only if there is a live version of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation does not match the given value. If no live
+     * object exists, the precondition fails. Setting to 0 makes the operation
+     * succeed only if there is a live version of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current generation does not match the given value. If no live
+     * object exists, the precondition fails. Setting to 0 makes the operation
+     * succeed only if there is a live version of the object.
+     * `if_generation_match` and `if_generation_not_match` conditions are mutually
+     * exclusive: it's an error for both of them to be set in the request.
+     * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000100); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration matches the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration matches the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration matches the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration matches the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000200); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration does not match the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration does not match the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration does not match the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Makes the operation conditional on whether the destination
+     * object's current metageneration does not match the given value.
+     * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+     * mutually exclusive: it's an error for both of them to be set in the
+     * request.
+     * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000400); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.MoveObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.MoveObjectRequest) + private static final com.google.storage.v2.MoveObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.MoveObjectRequest(); + } + + public static com.google.storage.v2.MoveObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.MoveObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequestOrBuilder.java new file mode 100644 index 000000000000..2c361a47ea6e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/MoveObjectRequestOrBuilder.java @@ -0,0 +1,404 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface MoveObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.MoveObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + java.lang.String getSourceObject(); + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + com.google.protobuf.ByteString getSourceObjectBytes(); + + /** + * + * + *
+   * Required. Name of the destination object.
+   * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The destinationObject. + */ + java.lang.String getDestinationObject(); + + /** + * + * + *
+   * Required. Name of the destination object.
+   * 
+ * + * string destination_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for destinationObject. + */ + com.google.protobuf.ByteString getDestinationObjectBytes(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation matches the given value. `if_source_generation_match`
+   * and `if_source_generation_not_match` conditions are mutually exclusive:
+   * it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + boolean hasIfSourceGenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation matches the given value. `if_source_generation_match`
+   * and `if_source_generation_not_match` conditions are mutually exclusive:
+   * it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_source_generation_match = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationMatch. + */ + long getIfSourceGenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation does not match the given value.
+   * `if_source_generation_match` and `if_source_generation_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + boolean hasIfSourceGenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current generation does not match the given value.
+   * `if_source_generation_match` and `if_source_generation_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_generation_not_match = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceGenerationNotMatch. + */ + long getIfSourceGenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration matches the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + boolean hasIfSourceMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration matches the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_match = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationMatch. + */ + long getIfSourceMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration does not match the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + boolean hasIfSourceMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the source object's
+   * current metageneration does not match the given value.
+   * `if_source_metageneration_match` and `if_source_metageneration_not_match`
+   * conditions are mutually exclusive: it's an error for both of them to be set
+   * in the request.
+   * 
+ * + * + * optional int64 if_source_metageneration_not_match = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifSourceMetagenerationNotMatch. + */ + long getIfSourceMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation matches the given value. Setting to 0 makes the
+   * operation succeed only if there are no live versions of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation matches the given value. Setting to 0 makes the
+   * operation succeed only if there are no live versions of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_match = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation does not match the given value. If no live
+   * object exists, the precondition fails. Setting to 0 makes the operation
+   * succeed only if there is a live version of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current generation does not match the given value. If no live
+   * object exists, the precondition fails. Setting to 0 makes the operation
+   * succeed only if there is a live version of the object.
+   * `if_generation_match` and `if_generation_not_match` conditions are mutually
+   * exclusive: it's an error for both of them to be set in the request.
+   * 
+ * + * optional int64 if_generation_not_match = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration matches the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration matches the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * optional int64 if_metageneration_match = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration does not match the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Makes the operation conditional on whether the destination
+   * object's current metageneration does not match the given value.
+   * `if_metageneration_match` and `if_metageneration_not_match` conditions are
+   * mutually exclusive: it's an error for both of them to be set in the
+   * request.
+   * 
+ * + * + * optional int64 if_metageneration_not_match = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Object.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Object.java new file mode 100644 index 000000000000..88f59388cb76 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Object.java @@ -0,0 +1,10096 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * An object.
+ * 
+ * + * Protobuf type {@code google.storage.v2.Object} + */ +@com.google.protobuf.Generated +public final class Object extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Object) + ObjectOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Object"); + } + + // Use Object.newBuilder() to construct. + private Object(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Object() { + name_ = ""; + bucket_ = ""; + etag_ = ""; + restoreToken_ = ""; + storageClass_ = ""; + contentEncoding_ = ""; + contentDisposition_ = ""; + cacheControl_ = ""; + acl_ = java.util.Collections.emptyList(); + contentLanguage_ = ""; + contentType_ = ""; + kmsKey_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Object_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 22: + return internalGetMetadata(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Object.class, com.google.storage.v2.Object.Builder.class); + } + + public interface RetentionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Object.Retention) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The mode of the Retention.
+     * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + + /** + * + * + *
+     * Optional. The mode of the Retention.
+     * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + com.google.storage.v2.Object.Retention.Mode getMode(); + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retainUntilTime field is set. + */ + boolean hasRetainUntilTime(); + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retainUntilTime. + */ + com.google.protobuf.Timestamp getRetainUntilTime(); + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getRetainUntilTimeOrBuilder(); + } + + /** + * + * + *
+   * Specifies retention parameters of the object. Objects under retention
+   * cannot be deleted or overwritten until their retention expires.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Object.Retention} + */ + public static final class Retention extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Object.Retention) + RetentionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Retention"); + } + + // Use Retention.newBuilder() to construct. + private Retention(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Retention() { + mode_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_Retention_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_Retention_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Object.Retention.class, + com.google.storage.v2.Object.Retention.Builder.class); + } + + /** + * + * + *
+     * Retention mode values.
+     * 
+ * + * Protobuf enum {@code google.storage.v2.Object.Retention.Mode} + */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * No specified mode. Object is not under retention.
+       * 
+ * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** + * + * + *
+       * Retention period might be decreased or increased.
+       * The Retention configuration might be removed.
+       * The mode might be changed to locked.
+       * 
+ * + * UNLOCKED = 1; + */ + UNLOCKED(1), + /** + * + * + *
+       * Retention period might be increased.
+       * The Retention configuration cannot be removed.
+       * The mode cannot be changed.
+       * 
+ * + * LOCKED = 2; + */ + LOCKED(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mode"); + } + + /** + * + * + *
+       * No specified mode. Object is not under retention.
+       * 
+ * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+       * Retention period might be decreased or increased.
+       * The Retention configuration might be removed.
+       * The mode might be changed to locked.
+       * 
+ * + * UNLOCKED = 1; + */ + public static final int UNLOCKED_VALUE = 1; + + /** + * + * + *
+       * Retention period might be increased.
+       * The Retention configuration cannot be removed.
+       * The mode cannot be changed.
+       * 
+ * + * LOCKED = 2; + */ + public static final int LOCKED_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return UNLOCKED; + case 2: + return LOCKED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.storage.v2.Object.Retention.getDescriptor().getEnumTypes().get(0); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.storage.v2.Object.Retention.Mode) + } + + private int bitField0_; + public static final int MODE_FIELD_NUMBER = 1; + private int mode_ = 0; + + /** + * + * + *
+     * Optional. The mode of the Retention.
+     * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+     * Optional. The mode of the Retention.
+     * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.storage.v2.Object.Retention.Mode getMode() { + com.google.storage.v2.Object.Retention.Mode result = + com.google.storage.v2.Object.Retention.Mode.forNumber(mode_); + return result == null ? com.google.storage.v2.Object.Retention.Mode.UNRECOGNIZED : result; + } + + public static final int RETAIN_UNTIL_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp retainUntilTime_; + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retainUntilTime field is set. + */ + @java.lang.Override + public boolean hasRetainUntilTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retainUntilTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getRetainUntilTime() { + return retainUntilTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retainUntilTime_; + } + + /** + * + * + *
+     * Optional. The timestamp that the object needs to be retained until.
+     * Value cannot be set in the past or more than 100 years in the future.
+     * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getRetainUntilTimeOrBuilder() { + return retainUntilTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retainUntilTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (mode_ != com.google.storage.v2.Object.Retention.Mode.MODE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, mode_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getRetainUntilTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (mode_ != com.google.storage.v2.Object.Retention.Mode.MODE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, mode_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRetainUntilTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Object.Retention)) { + return super.equals(obj); + } + com.google.storage.v2.Object.Retention other = (com.google.storage.v2.Object.Retention) obj; + + if (mode_ != other.mode_) return false; + if (hasRetainUntilTime() != other.hasRetainUntilTime()) return false; + if (hasRetainUntilTime()) { + if (!getRetainUntilTime().equals(other.getRetainUntilTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + if (hasRetainUntilTime()) { + hash = (37 * hash) + RETAIN_UNTIL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getRetainUntilTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Object.Retention parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object.Retention parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object.Retention parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Object.Retention parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object.Retention parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object.Retention parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Object.Retention prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Specifies retention parameters of the object. Objects under retention
+     * cannot be deleted or overwritten until their retention expires.
+     * 
+ * + * Protobuf type {@code google.storage.v2.Object.Retention} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Object.Retention) + com.google.storage.v2.Object.RetentionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_Retention_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_Retention_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Object.Retention.class, + com.google.storage.v2.Object.Retention.Builder.class); + } + + // Construct using com.google.storage.v2.Object.Retention.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRetainUntilTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mode_ = 0; + retainUntilTime_ = null; + if (retainUntilTimeBuilder_ != null) { + retainUntilTimeBuilder_.dispose(); + retainUntilTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_Retention_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Object.Retention getDefaultInstanceForType() { + return com.google.storage.v2.Object.Retention.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Object.Retention build() { + com.google.storage.v2.Object.Retention result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Object.Retention buildPartial() { + com.google.storage.v2.Object.Retention result = + new com.google.storage.v2.Object.Retention(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Object.Retention result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mode_ = mode_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.retainUntilTime_ = + retainUntilTimeBuilder_ == null ? retainUntilTime_ : retainUntilTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Object.Retention) { + return mergeFrom((com.google.storage.v2.Object.Retention) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Object.Retention other) { + if (other == com.google.storage.v2.Object.Retention.getDefaultInstance()) return this; + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + if (other.hasRetainUntilTime()) { + mergeRetainUntilTime(other.getRetainUntilTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + mode_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetRetainUntilTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int mode_ = 0; + + /** + * + * + *
+       * Optional. The mode of the Retention.
+       * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+       * Optional. The mode of the Retention.
+       * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + mode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The mode of the Retention.
+       * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.storage.v2.Object.Retention.Mode getMode() { + com.google.storage.v2.Object.Retention.Mode result = + com.google.storage.v2.Object.Retention.Mode.forNumber(mode_); + return result == null ? com.google.storage.v2.Object.Retention.Mode.UNRECOGNIZED : result; + } + + /** + * + * + *
+       * Optional. The mode of the Retention.
+       * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(com.google.storage.v2.Object.Retention.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + mode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The mode of the Retention.
+       * 
+ * + * + * .google.storage.v2.Object.Retention.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMode() { + bitField0_ = (bitField0_ & ~0x00000001); + mode_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp retainUntilTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + retainUntilTimeBuilder_; + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retainUntilTime field is set. + */ + public boolean hasRetainUntilTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retainUntilTime. + */ + public com.google.protobuf.Timestamp getRetainUntilTime() { + if (retainUntilTimeBuilder_ == null) { + return retainUntilTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retainUntilTime_; + } else { + return retainUntilTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetainUntilTime(com.google.protobuf.Timestamp value) { + if (retainUntilTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retainUntilTime_ = value; + } else { + retainUntilTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetainUntilTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (retainUntilTimeBuilder_ == null) { + retainUntilTime_ = builderForValue.build(); + } else { + retainUntilTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetainUntilTime(com.google.protobuf.Timestamp value) { + if (retainUntilTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && retainUntilTime_ != null + && retainUntilTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getRetainUntilTimeBuilder().mergeFrom(value); + } else { + retainUntilTime_ = value; + } + } else { + retainUntilTimeBuilder_.mergeFrom(value); + } + if (retainUntilTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetainUntilTime() { + bitField0_ = (bitField0_ & ~0x00000002); + retainUntilTime_ = null; + if (retainUntilTimeBuilder_ != null) { + retainUntilTimeBuilder_.dispose(); + retainUntilTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getRetainUntilTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetRetainUntilTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getRetainUntilTimeOrBuilder() { + if (retainUntilTimeBuilder_ != null) { + return retainUntilTimeBuilder_.getMessageOrBuilder(); + } else { + return retainUntilTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retainUntilTime_; + } + } + + /** + * + * + *
+       * Optional. The timestamp that the object needs to be retained until.
+       * Value cannot be set in the past or more than 100 years in the future.
+       * 
+ * + * + * .google.protobuf.Timestamp retain_until_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetRetainUntilTimeFieldBuilder() { + if (retainUntilTimeBuilder_ == null) { + retainUntilTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getRetainUntilTime(), getParentForChildren(), isClean()); + retainUntilTime_ = null; + } + return retainUntilTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Object.Retention) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Object.Retention) + private static final com.google.storage.v2.Object.Retention DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Object.Retention(); + } + + public static com.google.storage.v2.Object.Retention getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Retention parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Object.Retention getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Immutable. The name of this object. Nearly any sequence of unicode
+   * characters is valid. See
+   * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The name of this object. Nearly any sequence of unicode
+   * characters is valid. See
+   * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BUCKET_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Immutable. The name of the bucket containing this object.
+   * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. The name of the bucket containing this object.
+   * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 27; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
+   * Optional. The `etag` of an object.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object.
+   * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The `etag` of an object.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object.
+   * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Immutable. The content generation of this object. Used for object
+   * versioning.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int RESTORE_TOKEN_FIELD_NUMBER = 35; + + @SuppressWarnings("serial") + private volatile java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the restoreToken field is set. + */ + @java.lang.Override + public boolean hasRestoreToken() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The restoreToken. + */ + @java.lang.Override + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for restoreToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METAGENERATION_FIELD_NUMBER = 4; + private long metageneration_ = 0L; + + /** + * + * + *
+   * Output only. The version of the metadata for this generation of this
+   * object. Used for preconditions and for detecting changes in metadata. A
+   * metageneration number is only meaningful in the context of a particular
+   * generation of a particular object.
+   * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + public static final int STORAGE_CLASS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object storageClass_ = ""; + + /** + * + * + *
+   * Optional. Storage class of the object.
+   * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + @java.lang.Override + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Storage class of the object.
+   * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SIZE_FIELD_NUMBER = 6; + private long size_ = 0L; + + /** + * + * + *
+   * Output only. Content-Length of the object data in bytes, matching
+   * [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]).
+   * 
+ * + * int64 size = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The size. + */ + @java.lang.Override + public long getSize() { + return size_; + } + + public static final int CONTENT_ENCODING_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object contentEncoding_ = ""; + + /** + * + * + *
+   * Optional. Content-Encoding of the object data, matching
+   * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+   * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentEncoding. + */ + @java.lang.Override + public java.lang.String getContentEncoding() { + java.lang.Object ref = contentEncoding_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentEncoding_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Content-Encoding of the object data, matching
+   * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+   * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentEncoding. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContentEncodingBytes() { + java.lang.Object ref = contentEncoding_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentEncoding_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONTENT_DISPOSITION_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object contentDisposition_ = ""; + + /** + * + * + *
+   * Optional. Content-Disposition of the object data, matching
+   * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+   * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentDisposition. + */ + @java.lang.Override + public java.lang.String getContentDisposition() { + java.lang.Object ref = contentDisposition_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentDisposition_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Content-Disposition of the object data, matching
+   * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+   * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentDisposition. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContentDispositionBytes() { + java.lang.Object ref = contentDisposition_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentDisposition_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CACHE_CONTROL_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object cacheControl_ = ""; + + /** + * + * + *
+   * Optional. Cache-Control directive for the object data, matching
+   * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+   * If omitted, and the object is accessible to all anonymous users, the
+   * default is `public, max-age=3600`.
+   * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The cacheControl. + */ + @java.lang.Override + public java.lang.String getCacheControl() { + java.lang.Object ref = cacheControl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cacheControl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Cache-Control directive for the object data, matching
+   * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+   * If omitted, and the object is accessible to all anonymous users, the
+   * default is `public, max-age=3600`.
+   * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for cacheControl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCacheControlBytes() { + java.lang.Object ref = cacheControl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cacheControl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ACL_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private java.util.List acl_; + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getAclList() { + return acl_; + } + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getAclOrBuilderList() { + return acl_; + } + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getAclCount() { + return acl_.size(); + } + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl getAcl(int index) { + return acl_.get(index); + } + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectAccessControlOrBuilder getAclOrBuilder(int index) { + return acl_.get(index); + } + + public static final int CONTENT_LANGUAGE_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private volatile java.lang.Object contentLanguage_ = ""; + + /** + * + * + *
+   * Optional. Content-Language of the object data, matching
+   * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+   * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentLanguage. + */ + @java.lang.Override + public java.lang.String getContentLanguage() { + java.lang.Object ref = contentLanguage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentLanguage_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Content-Language of the object data, matching
+   * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+   * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentLanguage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContentLanguageBytes() { + java.lang.Object ref = contentLanguage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentLanguage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DELETE_TIME_FIELD_NUMBER = 12; + private com.google.protobuf.Timestamp deleteTime_; + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the deleteTime field is set. + */ + @java.lang.Override + public boolean hasDeleteTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The deleteTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getDeleteTime() { + return deleteTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : deleteTime_; + } + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getDeleteTimeOrBuilder() { + return deleteTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : deleteTime_; + } + + public static final int FINALIZE_TIME_FIELD_NUMBER = 36; + private com.google.protobuf.Timestamp finalizeTime_; + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the finalizeTime field is set. + */ + @java.lang.Override + public boolean hasFinalizeTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The finalizeTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getFinalizeTime() { + return finalizeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : finalizeTime_; + } + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getFinalizeTimeOrBuilder() { + return finalizeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : finalizeTime_; + } + + public static final int CONTENT_TYPE_FIELD_NUMBER = 13; + + @SuppressWarnings("serial") + private volatile java.lang.Object contentType_ = ""; + + /** + * + * + *
+   * Optional. Content-Type of the object data, matching
+   * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+   * If an object is stored without a Content-Type, it is served as
+   * `application/octet-stream`.
+   * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentType. + */ + @java.lang.Override + public java.lang.String getContentType() { + java.lang.Object ref = contentType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentType_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Content-Type of the object data, matching
+   * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+   * If an object is stored without a Content-Type, it is served as
+   * `application/octet-stream`.
+   * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getContentTypeBytes() { + java.lang.Object ref = contentType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 14; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int COMPONENT_COUNT_FIELD_NUMBER = 15; + private int componentCount_ = 0; + + /** + * + * + *
+   * Output only. Number of underlying components that make up this object.
+   * Components are accumulated by compose operations.
+   * 
+ * + * int32 component_count = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The componentCount. + */ + @java.lang.Override + public int getComponentCount() { + return componentCount_; + } + + public static final int CHECKSUMS_FIELD_NUMBER = 16; + private com.google.storage.v2.ObjectChecksums checksums_; + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the checksums field is set. + */ + @java.lang.Override + public boolean hasChecksums() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The checksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getChecksums() { + return checksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : checksums_; + } + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getChecksumsOrBuilder() { + return checksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : checksums_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 17; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int KMS_KEY_FIELD_NUMBER = 18; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKey_ = ""; + + /** + * + * + *
+   * Optional. Cloud KMS Key used to encrypt this object, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + @java.lang.Override + public java.lang.String getKmsKey() { + java.lang.Object ref = kmsKey_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKey_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Cloud KMS Key used to encrypt this object, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyBytes() { + java.lang.Object ref = kmsKey_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UPDATE_STORAGE_CLASS_TIME_FIELD_NUMBER = 19; + private com.google.protobuf.Timestamp updateStorageClassTime_; + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateStorageClassTime field is set. + */ + @java.lang.Override + public boolean hasUpdateStorageClassTime() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateStorageClassTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateStorageClassTime() { + return updateStorageClassTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateStorageClassTime_; + } + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateStorageClassTimeOrBuilder() { + return updateStorageClassTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateStorageClassTime_; + } + + public static final int TEMPORARY_HOLD_FIELD_NUMBER = 20; + private boolean temporaryHold_ = false; + + /** + * + * + *
+   * Optional. Whether an object is under temporary hold. While this flag is set
+   * to true, the object is protected against deletion and overwrites.  A common
+   * use case of this flag is regulatory investigations where objects need to be
+   * retained while the investigation is ongoing. Note that unlike event-based
+   * hold, temporary hold does not impact retention expiration time of an
+   * object.
+   * 
+ * + * bool temporary_hold = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The temporaryHold. + */ + @java.lang.Override + public boolean getTemporaryHold() { + return temporaryHold_; + } + + public static final int RETENTION_EXPIRE_TIME_FIELD_NUMBER = 21; + private com.google.protobuf.Timestamp retentionExpireTime_; + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionExpireTime field is set. + */ + @java.lang.Override + public boolean hasRetentionExpireTime() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionExpireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getRetentionExpireTime() { + return retentionExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retentionExpireTime_; + } + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getRetentionExpireTimeOrBuilder() { + return retentionExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retentionExpireTime_; + } + + public static final int METADATA_FIELD_NUMBER = 22; + + private static final class MetadataDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_MetadataEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField metadata_; + + private com.google.protobuf.MapField internalGetMetadata() { + if (metadata_ == null) { + return com.google.protobuf.MapField.emptyMapField(MetadataDefaultEntryHolder.defaultEntry); + } + return metadata_; + } + + public int getMetadataCount() { + return internalGetMetadata().getMap().size(); + } + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsMetadata(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetMetadata().getMap().containsKey(key); + } + + /** Use {@link #getMetadataMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getMetadata() { + return getMetadataMap(); + } + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getMetadataMap() { + return internalGetMetadata().getMap(); + } + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getMetadataOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetMetadata().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getMetadataOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetMetadata().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int CONTEXTS_FIELD_NUMBER = 38; + private com.google.storage.v2.ObjectContexts contexts_; + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the contexts field is set. + */ + @java.lang.Override + public boolean hasContexts() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The contexts. + */ + @java.lang.Override + public com.google.storage.v2.ObjectContexts getContexts() { + return contexts_ == null + ? com.google.storage.v2.ObjectContexts.getDefaultInstance() + : contexts_; + } + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectContextsOrBuilder getContextsOrBuilder() { + return contexts_ == null + ? com.google.storage.v2.ObjectContexts.getDefaultInstance() + : contexts_; + } + + public static final int EVENT_BASED_HOLD_FIELD_NUMBER = 23; + private boolean eventBasedHold_ = false; + + /** + * + * + *
+   * Whether an object is under event-based hold.
+   * An event-based hold is a way to force the retention of an object until
+   * after some event occurs. Once the hold is released by explicitly setting
+   * this field to `false`, the object becomes subject to any bucket-level
+   * retention policy, except that the retention duration is calculated
+   * from the time the event based hold was lifted, rather than the time the
+   * object was created.
+   *
+   * In a `WriteObject` request, not setting this field implies that the value
+   * should be taken from the parent bucket's `default_event_based_hold` field.
+   * In a response, this field is always set to `true` or `false`.
+   * 
+ * + * optional bool event_based_hold = 23; + * + * @return Whether the eventBasedHold field is set. + */ + @java.lang.Override + public boolean hasEventBasedHold() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+   * Whether an object is under event-based hold.
+   * An event-based hold is a way to force the retention of an object until
+   * after some event occurs. Once the hold is released by explicitly setting
+   * this field to `false`, the object becomes subject to any bucket-level
+   * retention policy, except that the retention duration is calculated
+   * from the time the event based hold was lifted, rather than the time the
+   * object was created.
+   *
+   * In a `WriteObject` request, not setting this field implies that the value
+   * should be taken from the parent bucket's `default_event_based_hold` field.
+   * In a response, this field is always set to `true` or `false`.
+   * 
+ * + * optional bool event_based_hold = 23; + * + * @return The eventBasedHold. + */ + @java.lang.Override + public boolean getEventBasedHold() { + return eventBasedHold_; + } + + public static final int OWNER_FIELD_NUMBER = 24; + private com.google.storage.v2.Owner owner_; + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the owner field is set. + */ + @java.lang.Override + public boolean hasOwner() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The owner. + */ + @java.lang.Override + public com.google.storage.v2.Owner getOwner() { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + @java.lang.Override + public com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder() { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + + public static final int CUSTOMER_ENCRYPTION_FIELD_NUMBER = 25; + private com.google.storage.v2.CustomerEncryption customerEncryption_; + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerEncryption field is set. + */ + @java.lang.Override + public boolean hasCustomerEncryption() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerEncryption. + */ + @java.lang.Override + public com.google.storage.v2.CustomerEncryption getCustomerEncryption() { + return customerEncryption_ == null + ? com.google.storage.v2.CustomerEncryption.getDefaultInstance() + : customerEncryption_; + } + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CustomerEncryptionOrBuilder getCustomerEncryptionOrBuilder() { + return customerEncryption_ == null + ? com.google.storage.v2.CustomerEncryption.getDefaultInstance() + : customerEncryption_; + } + + public static final int CUSTOM_TIME_FIELD_NUMBER = 26; + private com.google.protobuf.Timestamp customTime_; + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTime field is set. + */ + @java.lang.Override + public boolean hasCustomTime() { + return ((bitField0_ & 0x00001000) != 0); + } + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCustomTime() { + return customTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : customTime_; + } + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCustomTimeOrBuilder() { + return customTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : customTime_; + } + + public static final int SOFT_DELETE_TIME_FIELD_NUMBER = 28; + private com.google.protobuf.Timestamp softDeleteTime_; + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the softDeleteTime field is set. + */ + @java.lang.Override + public boolean hasSoftDeleteTime() { + return ((bitField0_ & 0x00002000) != 0); + } + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The softDeleteTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSoftDeleteTime() { + return softDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : softDeleteTime_; + } + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSoftDeleteTimeOrBuilder() { + return softDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : softDeleteTime_; + } + + public static final int HARD_DELETE_TIME_FIELD_NUMBER = 29; + private com.google.protobuf.Timestamp hardDeleteTime_; + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hardDeleteTime field is set. + */ + @java.lang.Override + public boolean hasHardDeleteTime() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hardDeleteTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getHardDeleteTime() { + return hardDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hardDeleteTime_; + } + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getHardDeleteTimeOrBuilder() { + return hardDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hardDeleteTime_; + } + + public static final int RETENTION_FIELD_NUMBER = 30; + private com.google.storage.v2.Object.Retention retention_; + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retention field is set. + */ + @java.lang.Override + public boolean hasRetention() { + return ((bitField0_ & 0x00008000) != 0); + } + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retention. + */ + @java.lang.Override + public com.google.storage.v2.Object.Retention getRetention() { + return retention_ == null + ? com.google.storage.v2.Object.Retention.getDefaultInstance() + : retention_; + } + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.Object.RetentionOrBuilder getRetentionOrBuilder() { + return retention_ == null + ? com.google.storage.v2.Object.Retention.getDefaultInstance() + : retention_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, bucket_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (metageneration_ != 0L) { + output.writeInt64(4, metageneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, storageClass_); + } + if (size_ != 0L) { + output.writeInt64(6, size_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentEncoding_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, contentEncoding_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentDisposition_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, contentDisposition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cacheControl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, cacheControl_); + } + for (int i = 0; i < acl_.size(); i++) { + output.writeMessage(10, acl_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentLanguage_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, contentLanguage_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(12, getDeleteTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 13, contentType_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(14, getCreateTime()); + } + if (componentCount_ != 0) { + output.writeInt32(15, componentCount_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(16, getChecksums()); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(17, getUpdateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKey_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 18, kmsKey_); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(19, getUpdateStorageClassTime()); + } + if (temporaryHold_ != false) { + output.writeBool(20, temporaryHold_); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeMessage(21, getRetentionExpireTime()); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetMetadata(), MetadataDefaultEntryHolder.defaultEntry, 22); + if (((bitField0_ & 0x00000200) != 0)) { + output.writeBool(23, eventBasedHold_); + } + if (((bitField0_ & 0x00000400) != 0)) { + output.writeMessage(24, getOwner()); + } + if (((bitField0_ & 0x00000800) != 0)) { + output.writeMessage(25, getCustomerEncryption()); + } + if (((bitField0_ & 0x00001000) != 0)) { + output.writeMessage(26, getCustomTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 27, etag_); + } + if (((bitField0_ & 0x00002000) != 0)) { + output.writeMessage(28, getSoftDeleteTime()); + } + if (((bitField0_ & 0x00004000) != 0)) { + output.writeMessage(29, getHardDeleteTime()); + } + if (((bitField0_ & 0x00008000) != 0)) { + output.writeMessage(30, getRetention()); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 35, restoreToken_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(36, getFinalizeTime()); + } + if (((bitField0_ & 0x00000100) != 0)) { + output.writeMessage(38, getContexts()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, bucket_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (metageneration_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, metageneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(storageClass_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, storageClass_); + } + if (size_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, size_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentEncoding_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, contentEncoding_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentDisposition_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, contentDisposition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cacheControl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, cacheControl_); + } + for (int i = 0; i < acl_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, acl_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentLanguage_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(11, contentLanguage_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getDeleteTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(contentType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(13, contentType_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(14, getCreateTime()); + } + if (componentCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(15, componentCount_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(16, getChecksums()); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, getUpdateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKey_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(18, kmsKey_); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(19, getUpdateStorageClassTime()); + } + if (temporaryHold_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(20, temporaryHold_); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(21, getRetentionExpireTime()); + } + for (java.util.Map.Entry entry : + internalGetMetadata().getMap().entrySet()) { + com.google.protobuf.MapEntry metadata__ = + MetadataDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(22, metadata__); + } + if (((bitField0_ & 0x00000200) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(23, eventBasedHold_); + } + if (((bitField0_ & 0x00000400) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(24, getOwner()); + } + if (((bitField0_ & 0x00000800) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(25, getCustomerEncryption()); + } + if (((bitField0_ & 0x00001000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(26, getCustomTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(27, etag_); + } + if (((bitField0_ & 0x00002000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(28, getSoftDeleteTime()); + } + if (((bitField0_ & 0x00004000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(29, getHardDeleteTime()); + } + if (((bitField0_ & 0x00008000) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(30, getRetention()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(35, restoreToken_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(36, getFinalizeTime()); + } + if (((bitField0_ & 0x00000100) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(38, getContexts()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Object)) { + return super.equals(obj); + } + com.google.storage.v2.Object other = (com.google.storage.v2.Object) obj; + + if (!getName().equals(other.getName())) return false; + if (!getBucket().equals(other.getBucket())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (hasRestoreToken() != other.hasRestoreToken()) return false; + if (hasRestoreToken()) { + if (!getRestoreToken().equals(other.getRestoreToken())) return false; + } + if (getMetageneration() != other.getMetageneration()) return false; + if (!getStorageClass().equals(other.getStorageClass())) return false; + if (getSize() != other.getSize()) return false; + if (!getContentEncoding().equals(other.getContentEncoding())) return false; + if (!getContentDisposition().equals(other.getContentDisposition())) return false; + if (!getCacheControl().equals(other.getCacheControl())) return false; + if (!getAclList().equals(other.getAclList())) return false; + if (!getContentLanguage().equals(other.getContentLanguage())) return false; + if (hasDeleteTime() != other.hasDeleteTime()) return false; + if (hasDeleteTime()) { + if (!getDeleteTime().equals(other.getDeleteTime())) return false; + } + if (hasFinalizeTime() != other.hasFinalizeTime()) return false; + if (hasFinalizeTime()) { + if (!getFinalizeTime().equals(other.getFinalizeTime())) return false; + } + if (!getContentType().equals(other.getContentType())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (getComponentCount() != other.getComponentCount()) return false; + if (hasChecksums() != other.hasChecksums()) return false; + if (hasChecksums()) { + if (!getChecksums().equals(other.getChecksums())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getKmsKey().equals(other.getKmsKey())) return false; + if (hasUpdateStorageClassTime() != other.hasUpdateStorageClassTime()) return false; + if (hasUpdateStorageClassTime()) { + if (!getUpdateStorageClassTime().equals(other.getUpdateStorageClassTime())) return false; + } + if (getTemporaryHold() != other.getTemporaryHold()) return false; + if (hasRetentionExpireTime() != other.hasRetentionExpireTime()) return false; + if (hasRetentionExpireTime()) { + if (!getRetentionExpireTime().equals(other.getRetentionExpireTime())) return false; + } + if (!internalGetMetadata().equals(other.internalGetMetadata())) return false; + if (hasContexts() != other.hasContexts()) return false; + if (hasContexts()) { + if (!getContexts().equals(other.getContexts())) return false; + } + if (hasEventBasedHold() != other.hasEventBasedHold()) return false; + if (hasEventBasedHold()) { + if (getEventBasedHold() != other.getEventBasedHold()) return false; + } + if (hasOwner() != other.hasOwner()) return false; + if (hasOwner()) { + if (!getOwner().equals(other.getOwner())) return false; + } + if (hasCustomerEncryption() != other.hasCustomerEncryption()) return false; + if (hasCustomerEncryption()) { + if (!getCustomerEncryption().equals(other.getCustomerEncryption())) return false; + } + if (hasCustomTime() != other.hasCustomTime()) return false; + if (hasCustomTime()) { + if (!getCustomTime().equals(other.getCustomTime())) return false; + } + if (hasSoftDeleteTime() != other.hasSoftDeleteTime()) return false; + if (hasSoftDeleteTime()) { + if (!getSoftDeleteTime().equals(other.getSoftDeleteTime())) return false; + } + if (hasHardDeleteTime() != other.hasHardDeleteTime()) return false; + if (hasHardDeleteTime()) { + if (!getHardDeleteTime().equals(other.getHardDeleteTime())) return false; + } + if (hasRetention() != other.hasRetention()) return false; + if (hasRetention()) { + if (!getRetention().equals(other.getRetention())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + if (hasRestoreToken()) { + hash = (37 * hash) + RESTORE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRestoreToken().hashCode(); + } + hash = (37 * hash) + METAGENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMetageneration()); + hash = (37 * hash) + STORAGE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getStorageClass().hashCode(); + hash = (37 * hash) + SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSize()); + hash = (37 * hash) + CONTENT_ENCODING_FIELD_NUMBER; + hash = (53 * hash) + getContentEncoding().hashCode(); + hash = (37 * hash) + CONTENT_DISPOSITION_FIELD_NUMBER; + hash = (53 * hash) + getContentDisposition().hashCode(); + hash = (37 * hash) + CACHE_CONTROL_FIELD_NUMBER; + hash = (53 * hash) + getCacheControl().hashCode(); + if (getAclCount() > 0) { + hash = (37 * hash) + ACL_FIELD_NUMBER; + hash = (53 * hash) + getAclList().hashCode(); + } + hash = (37 * hash) + CONTENT_LANGUAGE_FIELD_NUMBER; + hash = (53 * hash) + getContentLanguage().hashCode(); + if (hasDeleteTime()) { + hash = (37 * hash) + DELETE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDeleteTime().hashCode(); + } + if (hasFinalizeTime()) { + hash = (37 * hash) + FINALIZE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getFinalizeTime().hashCode(); + } + hash = (37 * hash) + CONTENT_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getContentType().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + hash = (37 * hash) + COMPONENT_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getComponentCount(); + if (hasChecksums()) { + hash = (37 * hash) + CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getChecksums().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (37 * hash) + KMS_KEY_FIELD_NUMBER; + hash = (53 * hash) + getKmsKey().hashCode(); + if (hasUpdateStorageClassTime()) { + hash = (37 * hash) + UPDATE_STORAGE_CLASS_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateStorageClassTime().hashCode(); + } + hash = (37 * hash) + TEMPORARY_HOLD_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getTemporaryHold()); + if (hasRetentionExpireTime()) { + hash = (37 * hash) + RETENTION_EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getRetentionExpireTime().hashCode(); + } + if (!internalGetMetadata().getMap().isEmpty()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + internalGetMetadata().hashCode(); + } + if (hasContexts()) { + hash = (37 * hash) + CONTEXTS_FIELD_NUMBER; + hash = (53 * hash) + getContexts().hashCode(); + } + if (hasEventBasedHold()) { + hash = (37 * hash) + EVENT_BASED_HOLD_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEventBasedHold()); + } + if (hasOwner()) { + hash = (37 * hash) + OWNER_FIELD_NUMBER; + hash = (53 * hash) + getOwner().hashCode(); + } + if (hasCustomerEncryption()) { + hash = (37 * hash) + CUSTOMER_ENCRYPTION_FIELD_NUMBER; + hash = (53 * hash) + getCustomerEncryption().hashCode(); + } + if (hasCustomTime()) { + hash = (37 * hash) + CUSTOM_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCustomTime().hashCode(); + } + if (hasSoftDeleteTime()) { + hash = (37 * hash) + SOFT_DELETE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getSoftDeleteTime().hashCode(); + } + if (hasHardDeleteTime()) { + hash = (37 * hash) + HARD_DELETE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getHardDeleteTime().hashCode(); + } + if (hasRetention()) { + hash = (37 * hash) + RETENTION_FIELD_NUMBER; + hash = (53 * hash) + getRetention().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Object parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Object parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Object parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Object parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Object parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Object parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Object prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * An object.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Object} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Object) + com.google.storage.v2.ObjectOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Object_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 22: + return internalGetMetadata(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 22: + return internalGetMutableMetadata(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Object_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Object.class, com.google.storage.v2.Object.Builder.class); + } + + // Construct using com.google.storage.v2.Object.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAclFieldBuilder(); + internalGetDeleteTimeFieldBuilder(); + internalGetFinalizeTimeFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetChecksumsFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + internalGetUpdateStorageClassTimeFieldBuilder(); + internalGetRetentionExpireTimeFieldBuilder(); + internalGetContextsFieldBuilder(); + internalGetOwnerFieldBuilder(); + internalGetCustomerEncryptionFieldBuilder(); + internalGetCustomTimeFieldBuilder(); + internalGetSoftDeleteTimeFieldBuilder(); + internalGetHardDeleteTimeFieldBuilder(); + internalGetRetentionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bitField1_ = 0; + name_ = ""; + bucket_ = ""; + etag_ = ""; + generation_ = 0L; + restoreToken_ = ""; + metageneration_ = 0L; + storageClass_ = ""; + size_ = 0L; + contentEncoding_ = ""; + contentDisposition_ = ""; + cacheControl_ = ""; + if (aclBuilder_ == null) { + acl_ = java.util.Collections.emptyList(); + } else { + acl_ = null; + aclBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); + contentLanguage_ = ""; + deleteTime_ = null; + if (deleteTimeBuilder_ != null) { + deleteTimeBuilder_.dispose(); + deleteTimeBuilder_ = null; + } + finalizeTime_ = null; + if (finalizeTimeBuilder_ != null) { + finalizeTimeBuilder_.dispose(); + finalizeTimeBuilder_ = null; + } + contentType_ = ""; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + componentCount_ = 0; + checksums_ = null; + if (checksumsBuilder_ != null) { + checksumsBuilder_.dispose(); + checksumsBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + kmsKey_ = ""; + updateStorageClassTime_ = null; + if (updateStorageClassTimeBuilder_ != null) { + updateStorageClassTimeBuilder_.dispose(); + updateStorageClassTimeBuilder_ = null; + } + temporaryHold_ = false; + retentionExpireTime_ = null; + if (retentionExpireTimeBuilder_ != null) { + retentionExpireTimeBuilder_.dispose(); + retentionExpireTimeBuilder_ = null; + } + internalGetMutableMetadata().clear(); + contexts_ = null; + if (contextsBuilder_ != null) { + contextsBuilder_.dispose(); + contextsBuilder_ = null; + } + eventBasedHold_ = false; + owner_ = null; + if (ownerBuilder_ != null) { + ownerBuilder_.dispose(); + ownerBuilder_ = null; + } + customerEncryption_ = null; + if (customerEncryptionBuilder_ != null) { + customerEncryptionBuilder_.dispose(); + customerEncryptionBuilder_ = null; + } + customTime_ = null; + if (customTimeBuilder_ != null) { + customTimeBuilder_.dispose(); + customTimeBuilder_ = null; + } + softDeleteTime_ = null; + if (softDeleteTimeBuilder_ != null) { + softDeleteTimeBuilder_.dispose(); + softDeleteTimeBuilder_ = null; + } + hardDeleteTime_ = null; + if (hardDeleteTimeBuilder_ != null) { + hardDeleteTimeBuilder_.dispose(); + hardDeleteTimeBuilder_ = null; + } + retention_ = null; + if (retentionBuilder_ != null) { + retentionBuilder_.dispose(); + retentionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Object_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Object getDefaultInstanceForType() { + return com.google.storage.v2.Object.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Object build() { + com.google.storage.v2.Object result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Object buildPartial() { + com.google.storage.v2.Object result = new com.google.storage.v2.Object(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + if (bitField1_ != 0) { + buildPartial1(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.storage.v2.Object result) { + if (aclBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0)) { + acl_ = java.util.Collections.unmodifiableList(acl_); + bitField0_ = (bitField0_ & ~0x00000800); + } + result.acl_ = acl_; + } else { + result.acl_ = aclBuilder_.build(); + } + } + + private void buildPartial0(com.google.storage.v2.Object result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.generation_ = generation_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.restoreToken_ = restoreToken_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.metageneration_ = metageneration_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.storageClass_ = storageClass_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.size_ = size_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.contentEncoding_ = contentEncoding_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.contentDisposition_ = contentDisposition_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.cacheControl_ = cacheControl_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.contentLanguage_ = contentLanguage_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.deleteTime_ = deleteTimeBuilder_ == null ? deleteTime_ : deleteTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.finalizeTime_ = + finalizeTimeBuilder_ == null ? finalizeTime_ : finalizeTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00008000) != 0)) { + result.contentType_ = contentType_; + } + if (((from_bitField0_ & 0x00010000) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00020000) != 0)) { + result.componentCount_ = componentCount_; + } + if (((from_bitField0_ & 0x00040000) != 0)) { + result.checksums_ = checksumsBuilder_ == null ? checksums_ : checksumsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00080000) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00100000) != 0)) { + result.kmsKey_ = kmsKey_; + } + if (((from_bitField0_ & 0x00200000) != 0)) { + result.updateStorageClassTime_ = + updateStorageClassTimeBuilder_ == null + ? updateStorageClassTime_ + : updateStorageClassTimeBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00400000) != 0)) { + result.temporaryHold_ = temporaryHold_; + } + if (((from_bitField0_ & 0x00800000) != 0)) { + result.retentionExpireTime_ = + retentionExpireTimeBuilder_ == null + ? retentionExpireTime_ + : retentionExpireTimeBuilder_.build(); + to_bitField0_ |= 0x00000080; + } + if (((from_bitField0_ & 0x01000000) != 0)) { + result.metadata_ = internalGetMetadata(); + result.metadata_.makeImmutable(); + } + if (((from_bitField0_ & 0x02000000) != 0)) { + result.contexts_ = contextsBuilder_ == null ? contexts_ : contextsBuilder_.build(); + to_bitField0_ |= 0x00000100; + } + if (((from_bitField0_ & 0x04000000) != 0)) { + result.eventBasedHold_ = eventBasedHold_; + to_bitField0_ |= 0x00000200; + } + if (((from_bitField0_ & 0x08000000) != 0)) { + result.owner_ = ownerBuilder_ == null ? owner_ : ownerBuilder_.build(); + to_bitField0_ |= 0x00000400; + } + if (((from_bitField0_ & 0x10000000) != 0)) { + result.customerEncryption_ = + customerEncryptionBuilder_ == null + ? customerEncryption_ + : customerEncryptionBuilder_.build(); + to_bitField0_ |= 0x00000800; + } + if (((from_bitField0_ & 0x20000000) != 0)) { + result.customTime_ = customTimeBuilder_ == null ? customTime_ : customTimeBuilder_.build(); + to_bitField0_ |= 0x00001000; + } + if (((from_bitField0_ & 0x40000000) != 0)) { + result.softDeleteTime_ = + softDeleteTimeBuilder_ == null ? softDeleteTime_ : softDeleteTimeBuilder_.build(); + to_bitField0_ |= 0x00002000; + } + if (((from_bitField0_ & 0x80000000) != 0)) { + result.hardDeleteTime_ = + hardDeleteTimeBuilder_ == null ? hardDeleteTime_ : hardDeleteTimeBuilder_.build(); + to_bitField0_ |= 0x00004000; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartial1(com.google.storage.v2.Object result) { + int from_bitField1_ = bitField1_; + int to_bitField0_ = 0; + if (((from_bitField1_ & 0x00000001) != 0)) { + result.retention_ = retentionBuilder_ == null ? retention_ : retentionBuilder_.build(); + to_bitField0_ |= 0x00008000; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Object) { + return mergeFrom((com.google.storage.v2.Object) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Object other) { + if (other == com.google.storage.v2.Object.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.hasRestoreToken()) { + restoreToken_ = other.restoreToken_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.getMetageneration() != 0L) { + setMetageneration(other.getMetageneration()); + } + if (!other.getStorageClass().isEmpty()) { + storageClass_ = other.storageClass_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (other.getSize() != 0L) { + setSize(other.getSize()); + } + if (!other.getContentEncoding().isEmpty()) { + contentEncoding_ = other.contentEncoding_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (!other.getContentDisposition().isEmpty()) { + contentDisposition_ = other.contentDisposition_; + bitField0_ |= 0x00000200; + onChanged(); + } + if (!other.getCacheControl().isEmpty()) { + cacheControl_ = other.cacheControl_; + bitField0_ |= 0x00000400; + onChanged(); + } + if (aclBuilder_ == null) { + if (!other.acl_.isEmpty()) { + if (acl_.isEmpty()) { + acl_ = other.acl_; + bitField0_ = (bitField0_ & ~0x00000800); + } else { + ensureAclIsMutable(); + acl_.addAll(other.acl_); + } + onChanged(); + } + } else { + if (!other.acl_.isEmpty()) { + if (aclBuilder_.isEmpty()) { + aclBuilder_.dispose(); + aclBuilder_ = null; + acl_ = other.acl_; + bitField0_ = (bitField0_ & ~0x00000800); + aclBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetAclFieldBuilder() + : null; + } else { + aclBuilder_.addAllMessages(other.acl_); + } + } + } + if (!other.getContentLanguage().isEmpty()) { + contentLanguage_ = other.contentLanguage_; + bitField0_ |= 0x00001000; + onChanged(); + } + if (other.hasDeleteTime()) { + mergeDeleteTime(other.getDeleteTime()); + } + if (other.hasFinalizeTime()) { + mergeFinalizeTime(other.getFinalizeTime()); + } + if (!other.getContentType().isEmpty()) { + contentType_ = other.contentType_; + bitField0_ |= 0x00008000; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.getComponentCount() != 0) { + setComponentCount(other.getComponentCount()); + } + if (other.hasChecksums()) { + mergeChecksums(other.getChecksums()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (!other.getKmsKey().isEmpty()) { + kmsKey_ = other.kmsKey_; + bitField0_ |= 0x00100000; + onChanged(); + } + if (other.hasUpdateStorageClassTime()) { + mergeUpdateStorageClassTime(other.getUpdateStorageClassTime()); + } + if (other.getTemporaryHold() != false) { + setTemporaryHold(other.getTemporaryHold()); + } + if (other.hasRetentionExpireTime()) { + mergeRetentionExpireTime(other.getRetentionExpireTime()); + } + internalGetMutableMetadata().mergeFrom(other.internalGetMetadata()); + bitField0_ |= 0x01000000; + if (other.hasContexts()) { + mergeContexts(other.getContexts()); + } + if (other.hasEventBasedHold()) { + setEventBasedHold(other.getEventBasedHold()); + } + if (other.hasOwner()) { + mergeOwner(other.getOwner()); + } + if (other.hasCustomerEncryption()) { + mergeCustomerEncryption(other.getCustomerEncryption()); + } + if (other.hasCustomTime()) { + mergeCustomTime(other.getCustomTime()); + } + if (other.hasSoftDeleteTime()) { + mergeSoftDeleteTime(other.getSoftDeleteTime()); + } + if (other.hasHardDeleteTime()) { + mergeHardDeleteTime(other.getHardDeleteTime()); + } + if (other.hasRetention()) { + mergeRetention(other.getRetention()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 24 + case 32: + { + metageneration_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 32 + case 42: + { + storageClass_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 42 + case 48: + { + size_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 48 + case 58: + { + contentEncoding_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 58 + case 66: + { + contentDisposition_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000200; + break; + } // case 66 + case 74: + { + cacheControl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000400; + break; + } // case 74 + case 82: + { + com.google.storage.v2.ObjectAccessControl m = + input.readMessage( + com.google.storage.v2.ObjectAccessControl.parser(), extensionRegistry); + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(m); + } else { + aclBuilder_.addMessage(m); + } + break; + } // case 82 + case 90: + { + contentLanguage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00001000; + break; + } // case 90 + case 98: + { + input.readMessage( + internalGetDeleteTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00002000; + break; + } // case 98 + case 106: + { + contentType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00008000; + break; + } // case 106 + case 114: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00010000; + break; + } // case 114 + case 120: + { + componentCount_ = input.readInt32(); + bitField0_ |= 0x00020000; + break; + } // case 120 + case 130: + { + input.readMessage( + internalGetChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00040000; + break; + } // case 130 + case 138: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00080000; + break; + } // case 138 + case 146: + { + kmsKey_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00100000; + break; + } // case 146 + case 154: + { + input.readMessage( + internalGetUpdateStorageClassTimeFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00200000; + break; + } // case 154 + case 160: + { + temporaryHold_ = input.readBool(); + bitField0_ |= 0x00400000; + break; + } // case 160 + case 170: + { + input.readMessage( + internalGetRetentionExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00800000; + break; + } // case 170 + case 178: + { + com.google.protobuf.MapEntry metadata__ = + input.readMessage( + MetadataDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableMetadata() + .getMutableMap() + .put(metadata__.getKey(), metadata__.getValue()); + bitField0_ |= 0x01000000; + break; + } // case 178 + case 184: + { + eventBasedHold_ = input.readBool(); + bitField0_ |= 0x04000000; + break; + } // case 184 + case 194: + { + input.readMessage(internalGetOwnerFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x08000000; + break; + } // case 194 + case 202: + { + input.readMessage( + internalGetCustomerEncryptionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x10000000; + break; + } // case 202 + case 210: + { + input.readMessage( + internalGetCustomTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x20000000; + break; + } // case 210 + case 218: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 218 + case 226: + { + input.readMessage( + internalGetSoftDeleteTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x40000000; + break; + } // case 226 + case 234: + { + input.readMessage( + internalGetHardDeleteTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x80000000; + break; + } // case 234 + case 242: + { + input.readMessage( + internalGetRetentionFieldBuilder().getBuilder(), extensionRegistry); + bitField1_ |= 0x00000001; + break; + } // case 242 + case 282: + { + restoreToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 282 + case 290: + { + input.readMessage( + internalGetFinalizeTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00004000; + break; + } // case 290 + case 306: + { + input.readMessage( + internalGetContextsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x02000000; + break; + } // case 306 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + private int bitField1_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Immutable. The name of this object. Nearly any sequence of unicode
+     * characters is valid. See
+     * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The name of this object. Nearly any sequence of unicode
+     * characters is valid. See
+     * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The name of this object. Nearly any sequence of unicode
+     * characters is valid. See
+     * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The name of this object. Nearly any sequence of unicode
+     * characters is valid. See
+     * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The name of this object. Nearly any sequence of unicode
+     * characters is valid. See
+     * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Immutable. The name of the bucket containing this object.
+     * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. The name of the bucket containing this object.
+     * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. The name of the bucket containing this object.
+     * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The name of the bucket containing this object.
+     * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The name of the bucket containing this object.
+     * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
+     * Optional. The `etag` of an object.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object.
+     * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The `etag` of an object.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object.
+     * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The `etag` of an object.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object.
+     * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The `etag` of an object.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object.
+     * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The `etag` of an object.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object.
+     * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Immutable. The content generation of this object. Used for object
+     * versioning.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Immutable. The content generation of this object. Used for object
+     * versioning.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. The content generation of this object. Used for object
+     * versioning.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000008); + generation_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the restoreToken field is set. + */ + public boolean hasRestoreToken() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The restoreToken. + */ + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for restoreToken. + */ + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restoreToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearRestoreToken() { + restoreToken_ = getDefaultInstance().getRestoreToken(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Restore token used to differentiate deleted objects with the
+     * same name and generation. This field is output only, and only set for
+     * deleted objects in HNS buckets.
+     * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restoreToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private long metageneration_; + + /** + * + * + *
+     * Output only. The version of the metadata for this generation of this
+     * object. Used for preconditions and for detecting changes in metadata. A
+     * metageneration number is only meaningful in the context of a particular
+     * generation of a particular object.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + @java.lang.Override + public long getMetageneration() { + return metageneration_; + } + + /** + * + * + *
+     * Output only. The version of the metadata for this generation of this
+     * object. Used for preconditions and for detecting changes in metadata. A
+     * metageneration number is only meaningful in the context of a particular
+     * generation of a particular object.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The metageneration to set. + * @return This builder for chaining. + */ + public Builder setMetageneration(long value) { + + metageneration_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The version of the metadata for this generation of this
+     * object. Used for preconditions and for detecting changes in metadata. A
+     * metageneration number is only meaningful in the context of a particular
+     * generation of a particular object.
+     * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearMetageneration() { + bitField0_ = (bitField0_ & ~0x00000020); + metageneration_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object storageClass_ = ""; + + /** + * + * + *
+     * Optional. Storage class of the object.
+     * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + public java.lang.String getStorageClass() { + java.lang.Object ref = storageClass_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + storageClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Storage class of the object.
+     * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + public com.google.protobuf.ByteString getStorageClassBytes() { + java.lang.Object ref = storageClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + storageClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Storage class of the object.
+     * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + storageClass_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Storage class of the object.
+     * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearStorageClass() { + storageClass_ = getDefaultInstance().getStorageClass(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Storage class of the object.
+     * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for storageClass to set. + * @return This builder for chaining. + */ + public Builder setStorageClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + storageClass_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private long size_; + + /** + * + * + *
+     * Output only. Content-Length of the object data in bytes, matching
+     * [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]).
+     * 
+ * + * int64 size = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The size. + */ + @java.lang.Override + public long getSize() { + return size_; + } + + /** + * + * + *
+     * Output only. Content-Length of the object data in bytes, matching
+     * [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]).
+     * 
+ * + * int64 size = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The size to set. + * @return This builder for chaining. + */ + public Builder setSize(long value) { + + size_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Content-Length of the object data in bytes, matching
+     * [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]).
+     * 
+ * + * int64 size = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearSize() { + bitField0_ = (bitField0_ & ~0x00000080); + size_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object contentEncoding_ = ""; + + /** + * + * + *
+     * Optional. Content-Encoding of the object data, matching
+     * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+     * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentEncoding. + */ + public java.lang.String getContentEncoding() { + java.lang.Object ref = contentEncoding_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentEncoding_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Encoding of the object data, matching
+     * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+     * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentEncoding. + */ + public com.google.protobuf.ByteString getContentEncodingBytes() { + java.lang.Object ref = contentEncoding_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentEncoding_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Encoding of the object data, matching
+     * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+     * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The contentEncoding to set. + * @return This builder for chaining. + */ + public Builder setContentEncoding(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + contentEncoding_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Encoding of the object data, matching
+     * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+     * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContentEncoding() { + contentEncoding_ = getDefaultInstance().getContentEncoding(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Encoding of the object data, matching
+     * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+     * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for contentEncoding to set. + * @return This builder for chaining. + */ + public Builder setContentEncodingBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + contentEncoding_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private java.lang.Object contentDisposition_ = ""; + + /** + * + * + *
+     * Optional. Content-Disposition of the object data, matching
+     * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+     * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentDisposition. + */ + public java.lang.String getContentDisposition() { + java.lang.Object ref = contentDisposition_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentDisposition_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Disposition of the object data, matching
+     * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+     * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentDisposition. + */ + public com.google.protobuf.ByteString getContentDispositionBytes() { + java.lang.Object ref = contentDisposition_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentDisposition_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Disposition of the object data, matching
+     * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+     * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The contentDisposition to set. + * @return This builder for chaining. + */ + public Builder setContentDisposition(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + contentDisposition_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Disposition of the object data, matching
+     * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+     * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContentDisposition() { + contentDisposition_ = getDefaultInstance().getContentDisposition(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Disposition of the object data, matching
+     * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+     * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for contentDisposition to set. + * @return This builder for chaining. + */ + public Builder setContentDispositionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + contentDisposition_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + private java.lang.Object cacheControl_ = ""; + + /** + * + * + *
+     * Optional. Cache-Control directive for the object data, matching
+     * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+     * If omitted, and the object is accessible to all anonymous users, the
+     * default is `public, max-age=3600`.
+     * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The cacheControl. + */ + public java.lang.String getCacheControl() { + java.lang.Object ref = cacheControl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cacheControl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Cache-Control directive for the object data, matching
+     * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+     * If omitted, and the object is accessible to all anonymous users, the
+     * default is `public, max-age=3600`.
+     * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for cacheControl. + */ + public com.google.protobuf.ByteString getCacheControlBytes() { + java.lang.Object ref = cacheControl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cacheControl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Cache-Control directive for the object data, matching
+     * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+     * If omitted, and the object is accessible to all anonymous users, the
+     * default is `public, max-age=3600`.
+     * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The cacheControl to set. + * @return This builder for chaining. + */ + public Builder setCacheControl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + cacheControl_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Cache-Control directive for the object data, matching
+     * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+     * If omitted, and the object is accessible to all anonymous users, the
+     * default is `public, max-age=3600`.
+     * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearCacheControl() { + cacheControl_ = getDefaultInstance().getCacheControl(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Cache-Control directive for the object data, matching
+     * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+     * If omitted, and the object is accessible to all anonymous users, the
+     * default is `public, max-age=3600`.
+     * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for cacheControl to set. + * @return This builder for chaining. + */ + public Builder setCacheControlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + cacheControl_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + private java.util.List acl_ = + java.util.Collections.emptyList(); + + private void ensureAclIsMutable() { + if (!((bitField0_ & 0x00000800) != 0)) { + acl_ = new java.util.ArrayList(acl_); + bitField0_ |= 0x00000800; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder> + aclBuilder_; + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getAclList() { + if (aclBuilder_ == null) { + return java.util.Collections.unmodifiableList(acl_); + } else { + return aclBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getAclCount() { + if (aclBuilder_ == null) { + return acl_.size(); + } else { + return aclBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl getAcl(int index) { + if (aclBuilder_ == null) { + return acl_.get(index); + } else { + return aclBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAcl(int index, com.google.storage.v2.ObjectAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.set(index, value); + onChanged(); + } else { + aclBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAcl( + int index, com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.set(index, builderForValue.build()); + onChanged(); + } else { + aclBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(com.google.storage.v2.ObjectAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.add(value); + onChanged(); + } else { + aclBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(int index, com.google.storage.v2.ObjectAccessControl value) { + if (aclBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAclIsMutable(); + acl_.add(index, value); + onChanged(); + } else { + aclBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl(com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(builderForValue.build()); + onChanged(); + } else { + aclBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAcl( + int index, com.google.storage.v2.ObjectAccessControl.Builder builderForValue) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.add(index, builderForValue.build()); + onChanged(); + } else { + aclBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllAcl( + java.lang.Iterable values) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, acl_); + onChanged(); + } else { + aclBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAcl() { + if (aclBuilder_ == null) { + acl_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + } else { + aclBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeAcl(int index) { + if (aclBuilder_ == null) { + ensureAclIsMutable(); + acl_.remove(index); + onChanged(); + } else { + aclBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder getAclBuilder(int index) { + return internalGetAclFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControlOrBuilder getAclOrBuilder(int index) { + if (aclBuilder_ == null) { + return acl_.get(index); + } else { + return aclBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getAclOrBuilderList() { + if (aclBuilder_ != null) { + return aclBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(acl_); + } + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder addAclBuilder() { + return internalGetAclFieldBuilder() + .addBuilder(com.google.storage.v2.ObjectAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectAccessControl.Builder addAclBuilder(int index) { + return internalGetAclFieldBuilder() + .addBuilder(index, com.google.storage.v2.ObjectAccessControl.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Access controls on the object.
+     * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+     * bucket, requests to set, read, or modify acl is an error.
+     * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getAclBuilderList() { + return internalGetAclFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder> + internalGetAclFieldBuilder() { + if (aclBuilder_ == null) { + aclBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.storage.v2.ObjectAccessControl, + com.google.storage.v2.ObjectAccessControl.Builder, + com.google.storage.v2.ObjectAccessControlOrBuilder>( + acl_, ((bitField0_ & 0x00000800) != 0), getParentForChildren(), isClean()); + acl_ = null; + } + return aclBuilder_; + } + + private java.lang.Object contentLanguage_ = ""; + + /** + * + * + *
+     * Optional. Content-Language of the object data, matching
+     * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+     * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentLanguage. + */ + public java.lang.String getContentLanguage() { + java.lang.Object ref = contentLanguage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentLanguage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Language of the object data, matching
+     * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+     * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentLanguage. + */ + public com.google.protobuf.ByteString getContentLanguageBytes() { + java.lang.Object ref = contentLanguage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentLanguage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Language of the object data, matching
+     * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+     * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The contentLanguage to set. + * @return This builder for chaining. + */ + public Builder setContentLanguage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + contentLanguage_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Language of the object data, matching
+     * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+     * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContentLanguage() { + contentLanguage_ = getDefaultInstance().getContentLanguage(); + bitField0_ = (bitField0_ & ~0x00001000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Language of the object data, matching
+     * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+     * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for contentLanguage to set. + * @return This builder for chaining. + */ + public Builder setContentLanguageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + contentLanguage_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp deleteTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + deleteTimeBuilder_; + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the deleteTime field is set. + */ + public boolean hasDeleteTime() { + return ((bitField0_ & 0x00002000) != 0); + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The deleteTime. + */ + public com.google.protobuf.Timestamp getDeleteTime() { + if (deleteTimeBuilder_ == null) { + return deleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deleteTime_; + } else { + return deleteTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDeleteTime(com.google.protobuf.Timestamp value) { + if (deleteTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deleteTime_ = value; + } else { + deleteTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDeleteTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (deleteTimeBuilder_ == null) { + deleteTime_ = builderForValue.build(); + } else { + deleteTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeDeleteTime(com.google.protobuf.Timestamp value) { + if (deleteTimeBuilder_ == null) { + if (((bitField0_ & 0x00002000) != 0) + && deleteTime_ != null + && deleteTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getDeleteTimeBuilder().mergeFrom(value); + } else { + deleteTime_ = value; + } + } else { + deleteTimeBuilder_.mergeFrom(value); + } + if (deleteTime_ != null) { + bitField0_ |= 0x00002000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearDeleteTime() { + bitField0_ = (bitField0_ & ~0x00002000); + deleteTime_ = null; + if (deleteTimeBuilder_ != null) { + deleteTimeBuilder_.dispose(); + deleteTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getDeleteTimeBuilder() { + bitField0_ |= 0x00002000; + onChanged(); + return internalGetDeleteTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getDeleteTimeOrBuilder() { + if (deleteTimeBuilder_ != null) { + return deleteTimeBuilder_.getMessageOrBuilder(); + } else { + return deleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deleteTime_; + } + } + + /** + * + * + *
+     * Output only. If this object is noncurrent, this is the time when the object
+     * became noncurrent.
+     * 
+ * + * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetDeleteTimeFieldBuilder() { + if (deleteTimeBuilder_ == null) { + deleteTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getDeleteTime(), getParentForChildren(), isClean()); + deleteTime_ = null; + } + return deleteTimeBuilder_; + } + + private com.google.protobuf.Timestamp finalizeTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + finalizeTimeBuilder_; + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the finalizeTime field is set. + */ + public boolean hasFinalizeTime() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The finalizeTime. + */ + public com.google.protobuf.Timestamp getFinalizeTime() { + if (finalizeTimeBuilder_ == null) { + return finalizeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : finalizeTime_; + } else { + return finalizeTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setFinalizeTime(com.google.protobuf.Timestamp value) { + if (finalizeTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + finalizeTime_ = value; + } else { + finalizeTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setFinalizeTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (finalizeTimeBuilder_ == null) { + finalizeTime_ = builderForValue.build(); + } else { + finalizeTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeFinalizeTime(com.google.protobuf.Timestamp value) { + if (finalizeTimeBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && finalizeTime_ != null + && finalizeTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getFinalizeTimeBuilder().mergeFrom(value); + } else { + finalizeTime_ = value; + } + } else { + finalizeTimeBuilder_.mergeFrom(value); + } + if (finalizeTime_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearFinalizeTime() { + bitField0_ = (bitField0_ & ~0x00004000); + finalizeTime_ = null; + if (finalizeTimeBuilder_ != null) { + finalizeTimeBuilder_.dispose(); + finalizeTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getFinalizeTimeBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return internalGetFinalizeTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getFinalizeTimeOrBuilder() { + if (finalizeTimeBuilder_ != null) { + return finalizeTimeBuilder_.getMessageOrBuilder(); + } else { + return finalizeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : finalizeTime_; + } + } + + /** + * + * + *
+     * Output only. The time when the object was finalized.
+     * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetFinalizeTimeFieldBuilder() { + if (finalizeTimeBuilder_ == null) { + finalizeTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getFinalizeTime(), getParentForChildren(), isClean()); + finalizeTime_ = null; + } + return finalizeTimeBuilder_; + } + + private java.lang.Object contentType_ = ""; + + /** + * + * + *
+     * Optional. Content-Type of the object data, matching
+     * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+     * If an object is stored without a Content-Type, it is served as
+     * `application/octet-stream`.
+     * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentType. + */ + public java.lang.String getContentType() { + java.lang.Object ref = contentType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + contentType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Type of the object data, matching
+     * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+     * If an object is stored without a Content-Type, it is served as
+     * `application/octet-stream`.
+     * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentType. + */ + public com.google.protobuf.ByteString getContentTypeBytes() { + java.lang.Object ref = contentType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + contentType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Content-Type of the object data, matching
+     * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+     * If an object is stored without a Content-Type, it is served as
+     * `application/octet-stream`.
+     * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The contentType to set. + * @return This builder for chaining. + */ + public Builder setContentType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + contentType_ = value; + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Type of the object data, matching
+     * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+     * If an object is stored without a Content-Type, it is served as
+     * `application/octet-stream`.
+     * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContentType() { + contentType_ = getDefaultInstance().getContentType(); + bitField0_ = (bitField0_ & ~0x00008000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Content-Type of the object data, matching
+     * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+     * If an object is stored without a Content-Type, it is served as
+     * `application/octet-stream`.
+     * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for contentType to set. + * @return This builder for chaining. + */ + public Builder setContentTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + contentType_ = value; + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00010000) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00010000) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00010000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00010000); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00010000; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the object.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private int componentCount_; + + /** + * + * + *
+     * Output only. Number of underlying components that make up this object.
+     * Components are accumulated by compose operations.
+     * 
+ * + * int32 component_count = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The componentCount. + */ + @java.lang.Override + public int getComponentCount() { + return componentCount_; + } + + /** + * + * + *
+     * Output only. Number of underlying components that make up this object.
+     * Components are accumulated by compose operations.
+     * 
+ * + * int32 component_count = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The componentCount to set. + * @return This builder for chaining. + */ + public Builder setComponentCount(int value) { + + componentCount_ = value; + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Number of underlying components that make up this object.
+     * Components are accumulated by compose operations.
+     * 
+ * + * int32 component_count = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearComponentCount() { + bitField0_ = (bitField0_ & ~0x00020000); + componentCount_ = 0; + onChanged(); + return this; + } + + private com.google.storage.v2.ObjectChecksums checksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + checksumsBuilder_; + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the checksums field is set. + */ + public boolean hasChecksums() { + return ((bitField0_ & 0x00040000) != 0); + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The checksums. + */ + public com.google.storage.v2.ObjectChecksums getChecksums() { + if (checksumsBuilder_ == null) { + return checksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : checksums_; + } else { + return checksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setChecksums(com.google.storage.v2.ObjectChecksums value) { + if (checksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + checksums_ = value; + } else { + checksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setChecksums(com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (checksumsBuilder_ == null) { + checksums_ = builderForValue.build(); + } else { + checksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeChecksums(com.google.storage.v2.ObjectChecksums value) { + if (checksumsBuilder_ == null) { + if (((bitField0_ & 0x00040000) != 0) + && checksums_ != null + && checksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getChecksumsBuilder().mergeFrom(value); + } else { + checksums_ = value; + } + } else { + checksumsBuilder_.mergeFrom(value); + } + if (checksums_ != null) { + bitField0_ |= 0x00040000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearChecksums() { + bitField0_ = (bitField0_ & ~0x00040000); + checksums_ = null; + if (checksumsBuilder_ != null) { + checksumsBuilder_.dispose(); + checksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getChecksumsBuilder() { + bitField0_ |= 0x00040000; + onChanged(); + return internalGetChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getChecksumsOrBuilder() { + if (checksumsBuilder_ != null) { + return checksumsBuilder_.getMessageOrBuilder(); + } else { + return checksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : checksums_; + } + } + + /** + * + * + *
+     * Output only. Hashes for the data part of this object. This field is used
+     * for output only and is silently ignored if provided in requests. The
+     * checksums of the complete object regardless of data range. If the object is
+     * downloaded in full, the client should compute one of these checksums over
+     * the downloaded object and compare it against the value provided here.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetChecksumsFieldBuilder() { + if (checksumsBuilder_ == null) { + checksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getChecksums(), getParentForChildren(), isClean()); + checksums_ = null; + } + return checksumsBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00080000) != 0); + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00080000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00080000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00080000) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00080000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00080000); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00080000; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The modification time of the object metadata.
+     * Set initially to object creation time and then updated whenever any
+     * metadata of the object changes. This includes changes made by a requester,
+     * such as modifying custom metadata, as well as changes made by Cloud Storage
+     * on behalf of a requester, such as changing the storage class based on an
+     * Object Lifecycle Configuration.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private java.lang.Object kmsKey_ = ""; + + /** + * + * + *
+     * Optional. Cloud KMS Key used to encrypt this object, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + public java.lang.String getKmsKey() { + java.lang.Object ref = kmsKey_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKey_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Cloud KMS Key used to encrypt this object, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + public com.google.protobuf.ByteString getKmsKeyBytes() { + java.lang.Object ref = kmsKey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Cloud KMS Key used to encrypt this object, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKey to set. + * @return This builder for chaining. + */ + public Builder setKmsKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKey_ = value; + bitField0_ |= 0x00100000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Cloud KMS Key used to encrypt this object, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKey() { + kmsKey_ = getDefaultInstance().getKmsKey(); + bitField0_ = (bitField0_ & ~0x00100000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Cloud KMS Key used to encrypt this object, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKey to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKey_ = value; + bitField0_ |= 0x00100000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp updateStorageClassTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateStorageClassTimeBuilder_; + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateStorageClassTime field is set. + */ + public boolean hasUpdateStorageClassTime() { + return ((bitField0_ & 0x00200000) != 0); + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateStorageClassTime. + */ + public com.google.protobuf.Timestamp getUpdateStorageClassTime() { + if (updateStorageClassTimeBuilder_ == null) { + return updateStorageClassTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateStorageClassTime_; + } else { + return updateStorageClassTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateStorageClassTime(com.google.protobuf.Timestamp value) { + if (updateStorageClassTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateStorageClassTime_ = value; + } else { + updateStorageClassTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateStorageClassTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateStorageClassTimeBuilder_ == null) { + updateStorageClassTime_ = builderForValue.build(); + } else { + updateStorageClassTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateStorageClassTime(com.google.protobuf.Timestamp value) { + if (updateStorageClassTimeBuilder_ == null) { + if (((bitField0_ & 0x00200000) != 0) + && updateStorageClassTime_ != null + && updateStorageClassTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateStorageClassTimeBuilder().mergeFrom(value); + } else { + updateStorageClassTime_ = value; + } + } else { + updateStorageClassTimeBuilder_.mergeFrom(value); + } + if (updateStorageClassTime_ != null) { + bitField0_ |= 0x00200000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateStorageClassTime() { + bitField0_ = (bitField0_ & ~0x00200000); + updateStorageClassTime_ = null; + if (updateStorageClassTimeBuilder_ != null) { + updateStorageClassTimeBuilder_.dispose(); + updateStorageClassTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateStorageClassTimeBuilder() { + bitField0_ |= 0x00200000; + onChanged(); + return internalGetUpdateStorageClassTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateStorageClassTimeOrBuilder() { + if (updateStorageClassTimeBuilder_ != null) { + return updateStorageClassTimeBuilder_.getMessageOrBuilder(); + } else { + return updateStorageClassTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateStorageClassTime_; + } + } + + /** + * + * + *
+     * Output only. The time at which the object's storage class was last changed.
+     * When the object is initially created, it is set to `time_created`.
+     * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateStorageClassTimeFieldBuilder() { + if (updateStorageClassTimeBuilder_ == null) { + updateStorageClassTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateStorageClassTime(), getParentForChildren(), isClean()); + updateStorageClassTime_ = null; + } + return updateStorageClassTimeBuilder_; + } + + private boolean temporaryHold_; + + /** + * + * + *
+     * Optional. Whether an object is under temporary hold. While this flag is set
+     * to true, the object is protected against deletion and overwrites.  A common
+     * use case of this flag is regulatory investigations where objects need to be
+     * retained while the investigation is ongoing. Note that unlike event-based
+     * hold, temporary hold does not impact retention expiration time of an
+     * object.
+     * 
+ * + * bool temporary_hold = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The temporaryHold. + */ + @java.lang.Override + public boolean getTemporaryHold() { + return temporaryHold_; + } + + /** + * + * + *
+     * Optional. Whether an object is under temporary hold. While this flag is set
+     * to true, the object is protected against deletion and overwrites.  A common
+     * use case of this flag is regulatory investigations where objects need to be
+     * retained while the investigation is ongoing. Note that unlike event-based
+     * hold, temporary hold does not impact retention expiration time of an
+     * object.
+     * 
+ * + * bool temporary_hold = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The temporaryHold to set. + * @return This builder for chaining. + */ + public Builder setTemporaryHold(boolean value) { + + temporaryHold_ = value; + bitField0_ |= 0x00400000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Whether an object is under temporary hold. While this flag is set
+     * to true, the object is protected against deletion and overwrites.  A common
+     * use case of this flag is regulatory investigations where objects need to be
+     * retained while the investigation is ongoing. Note that unlike event-based
+     * hold, temporary hold does not impact retention expiration time of an
+     * object.
+     * 
+ * + * bool temporary_hold = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTemporaryHold() { + bitField0_ = (bitField0_ & ~0x00400000); + temporaryHold_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp retentionExpireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + retentionExpireTimeBuilder_; + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionExpireTime field is set. + */ + public boolean hasRetentionExpireTime() { + return ((bitField0_ & 0x00800000) != 0); + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionExpireTime. + */ + public com.google.protobuf.Timestamp getRetentionExpireTime() { + if (retentionExpireTimeBuilder_ == null) { + return retentionExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retentionExpireTime_; + } else { + return retentionExpireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionExpireTime(com.google.protobuf.Timestamp value) { + if (retentionExpireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionExpireTime_ = value; + } else { + retentionExpireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00800000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (retentionExpireTimeBuilder_ == null) { + retentionExpireTime_ = builderForValue.build(); + } else { + retentionExpireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00800000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetentionExpireTime(com.google.protobuf.Timestamp value) { + if (retentionExpireTimeBuilder_ == null) { + if (((bitField0_ & 0x00800000) != 0) + && retentionExpireTime_ != null + && retentionExpireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getRetentionExpireTimeBuilder().mergeFrom(value); + } else { + retentionExpireTime_ = value; + } + } else { + retentionExpireTimeBuilder_.mergeFrom(value); + } + if (retentionExpireTime_ != null) { + bitField0_ |= 0x00800000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetentionExpireTime() { + bitField0_ = (bitField0_ & ~0x00800000); + retentionExpireTime_ = null; + if (retentionExpireTimeBuilder_ != null) { + retentionExpireTimeBuilder_.dispose(); + retentionExpireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getRetentionExpireTimeBuilder() { + bitField0_ |= 0x00800000; + onChanged(); + return internalGetRetentionExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getRetentionExpireTimeOrBuilder() { + if (retentionExpireTimeBuilder_ != null) { + return retentionExpireTimeBuilder_.getMessageOrBuilder(); + } else { + return retentionExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : retentionExpireTime_; + } + } + + /** + * + * + *
+     * Optional. A server-determined value that specifies the earliest time that
+     * the object's retention period expires. Note 1: This field is not provided
+     * for objects with an active event-based hold, since retention expiration is
+     * unknown until the hold is removed. Note 2: This value can be provided even
+     * when temporary hold is set (so that the user can reason about policy
+     * without having to first unset the temporary hold).
+     * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetRetentionExpireTimeFieldBuilder() { + if (retentionExpireTimeBuilder_ == null) { + retentionExpireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getRetentionExpireTime(), getParentForChildren(), isClean()); + retentionExpireTime_ = null; + } + return retentionExpireTimeBuilder_; + } + + private com.google.protobuf.MapField metadata_; + + private com.google.protobuf.MapField internalGetMetadata() { + if (metadata_ == null) { + return com.google.protobuf.MapField.emptyMapField(MetadataDefaultEntryHolder.defaultEntry); + } + return metadata_; + } + + private com.google.protobuf.MapField + internalGetMutableMetadata() { + if (metadata_ == null) { + metadata_ = + com.google.protobuf.MapField.newMapField(MetadataDefaultEntryHolder.defaultEntry); + } + if (!metadata_.isMutable()) { + metadata_ = metadata_.copy(); + } + bitField0_ |= 0x01000000; + onChanged(); + return metadata_; + } + + public int getMetadataCount() { + return internalGetMetadata().getMap().size(); + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsMetadata(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetMetadata().getMap().containsKey(key); + } + + /** Use {@link #getMetadataMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getMetadata() { + return getMetadataMap(); + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getMetadataMap() { + return internalGetMetadata().getMap(); + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getMetadataOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetMetadata().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getMetadataOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetMetadata().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x01000000); + internalGetMutableMetadata().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeMetadata(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableMetadata().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableMetadata() { + bitField0_ |= 0x01000000; + return internalGetMutableMetadata().getMutableMap(); + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putMetadata(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableMetadata().getMutableMap().put(key, value); + bitField0_ |= 0x01000000; + return this; + } + + /** + * + * + *
+     * Optional. User-provided metadata, in key/value pairs.
+     * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllMetadata(java.util.Map values) { + internalGetMutableMetadata().getMutableMap().putAll(values); + bitField0_ |= 0x01000000; + return this; + } + + private com.google.storage.v2.ObjectContexts contexts_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectContexts, + com.google.storage.v2.ObjectContexts.Builder, + com.google.storage.v2.ObjectContextsOrBuilder> + contextsBuilder_; + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the contexts field is set. + */ + public boolean hasContexts() { + return ((bitField0_ & 0x02000000) != 0); + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The contexts. + */ + public com.google.storage.v2.ObjectContexts getContexts() { + if (contextsBuilder_ == null) { + return contexts_ == null + ? com.google.storage.v2.ObjectContexts.getDefaultInstance() + : contexts_; + } else { + return contextsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setContexts(com.google.storage.v2.ObjectContexts value) { + if (contextsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + contexts_ = value; + } else { + contextsBuilder_.setMessage(value); + } + bitField0_ |= 0x02000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setContexts(com.google.storage.v2.ObjectContexts.Builder builderForValue) { + if (contextsBuilder_ == null) { + contexts_ = builderForValue.build(); + } else { + contextsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x02000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeContexts(com.google.storage.v2.ObjectContexts value) { + if (contextsBuilder_ == null) { + if (((bitField0_ & 0x02000000) != 0) + && contexts_ != null + && contexts_ != com.google.storage.v2.ObjectContexts.getDefaultInstance()) { + getContextsBuilder().mergeFrom(value); + } else { + contexts_ = value; + } + } else { + contextsBuilder_.mergeFrom(value); + } + if (contexts_ != null) { + bitField0_ |= 0x02000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearContexts() { + bitField0_ = (bitField0_ & ~0x02000000); + contexts_ = null; + if (contextsBuilder_ != null) { + contextsBuilder_.dispose(); + contextsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectContexts.Builder getContextsBuilder() { + bitField0_ |= 0x02000000; + onChanged(); + return internalGetContextsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectContextsOrBuilder getContextsOrBuilder() { + if (contextsBuilder_ != null) { + return contextsBuilder_.getMessageOrBuilder(); + } else { + return contexts_ == null + ? com.google.storage.v2.ObjectContexts.getDefaultInstance() + : contexts_; + } + } + + /** + * + * + *
+     * Optional. User-defined or system-defined object contexts. Each object
+     * context is a key-payload pair, where the key provides the identification
+     * and the payload holds the associated value and additional metadata.
+     * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectContexts, + com.google.storage.v2.ObjectContexts.Builder, + com.google.storage.v2.ObjectContextsOrBuilder> + internalGetContextsFieldBuilder() { + if (contextsBuilder_ == null) { + contextsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectContexts, + com.google.storage.v2.ObjectContexts.Builder, + com.google.storage.v2.ObjectContextsOrBuilder>( + getContexts(), getParentForChildren(), isClean()); + contexts_ = null; + } + return contextsBuilder_; + } + + private boolean eventBasedHold_; + + /** + * + * + *
+     * Whether an object is under event-based hold.
+     * An event-based hold is a way to force the retention of an object until
+     * after some event occurs. Once the hold is released by explicitly setting
+     * this field to `false`, the object becomes subject to any bucket-level
+     * retention policy, except that the retention duration is calculated
+     * from the time the event based hold was lifted, rather than the time the
+     * object was created.
+     *
+     * In a `WriteObject` request, not setting this field implies that the value
+     * should be taken from the parent bucket's `default_event_based_hold` field.
+     * In a response, this field is always set to `true` or `false`.
+     * 
+ * + * optional bool event_based_hold = 23; + * + * @return Whether the eventBasedHold field is set. + */ + @java.lang.Override + public boolean hasEventBasedHold() { + return ((bitField0_ & 0x04000000) != 0); + } + + /** + * + * + *
+     * Whether an object is under event-based hold.
+     * An event-based hold is a way to force the retention of an object until
+     * after some event occurs. Once the hold is released by explicitly setting
+     * this field to `false`, the object becomes subject to any bucket-level
+     * retention policy, except that the retention duration is calculated
+     * from the time the event based hold was lifted, rather than the time the
+     * object was created.
+     *
+     * In a `WriteObject` request, not setting this field implies that the value
+     * should be taken from the parent bucket's `default_event_based_hold` field.
+     * In a response, this field is always set to `true` or `false`.
+     * 
+ * + * optional bool event_based_hold = 23; + * + * @return The eventBasedHold. + */ + @java.lang.Override + public boolean getEventBasedHold() { + return eventBasedHold_; + } + + /** + * + * + *
+     * Whether an object is under event-based hold.
+     * An event-based hold is a way to force the retention of an object until
+     * after some event occurs. Once the hold is released by explicitly setting
+     * this field to `false`, the object becomes subject to any bucket-level
+     * retention policy, except that the retention duration is calculated
+     * from the time the event based hold was lifted, rather than the time the
+     * object was created.
+     *
+     * In a `WriteObject` request, not setting this field implies that the value
+     * should be taken from the parent bucket's `default_event_based_hold` field.
+     * In a response, this field is always set to `true` or `false`.
+     * 
+ * + * optional bool event_based_hold = 23; + * + * @param value The eventBasedHold to set. + * @return This builder for chaining. + */ + public Builder setEventBasedHold(boolean value) { + + eventBasedHold_ = value; + bitField0_ |= 0x04000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Whether an object is under event-based hold.
+     * An event-based hold is a way to force the retention of an object until
+     * after some event occurs. Once the hold is released by explicitly setting
+     * this field to `false`, the object becomes subject to any bucket-level
+     * retention policy, except that the retention duration is calculated
+     * from the time the event based hold was lifted, rather than the time the
+     * object was created.
+     *
+     * In a `WriteObject` request, not setting this field implies that the value
+     * should be taken from the parent bucket's `default_event_based_hold` field.
+     * In a response, this field is always set to `true` or `false`.
+     * 
+ * + * optional bool event_based_hold = 23; + * + * @return This builder for chaining. + */ + public Builder clearEventBasedHold() { + bitField0_ = (bitField0_ & ~0x04000000); + eventBasedHold_ = false; + onChanged(); + return this; + } + + private com.google.storage.v2.Owner owner_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder> + ownerBuilder_; + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the owner field is set. + */ + public boolean hasOwner() { + return ((bitField0_ & 0x08000000) != 0); + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The owner. + */ + public com.google.storage.v2.Owner getOwner() { + if (ownerBuilder_ == null) { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } else { + return ownerBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOwner(com.google.storage.v2.Owner value) { + if (ownerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + owner_ = value; + } else { + ownerBuilder_.setMessage(value); + } + bitField0_ |= 0x08000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOwner(com.google.storage.v2.Owner.Builder builderForValue) { + if (ownerBuilder_ == null) { + owner_ = builderForValue.build(); + } else { + ownerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x08000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeOwner(com.google.storage.v2.Owner value) { + if (ownerBuilder_ == null) { + if (((bitField0_ & 0x08000000) != 0) + && owner_ != null + && owner_ != com.google.storage.v2.Owner.getDefaultInstance()) { + getOwnerBuilder().mergeFrom(value); + } else { + owner_ = value; + } + } else { + ownerBuilder_.mergeFrom(value); + } + if (owner_ != null) { + bitField0_ |= 0x08000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x08000000); + owner_ = null; + if (ownerBuilder_ != null) { + ownerBuilder_.dispose(); + ownerBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.Owner.Builder getOwnerBuilder() { + bitField0_ |= 0x08000000; + onChanged(); + return internalGetOwnerFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder() { + if (ownerBuilder_ != null) { + return ownerBuilder_.getMessageOrBuilder(); + } else { + return owner_ == null ? com.google.storage.v2.Owner.getDefaultInstance() : owner_; + } + } + + /** + * + * + *
+     * Output only. The owner of the object. This is always the uploader of the
+     * object.
+     * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder> + internalGetOwnerFieldBuilder() { + if (ownerBuilder_ == null) { + ownerBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Owner, + com.google.storage.v2.Owner.Builder, + com.google.storage.v2.OwnerOrBuilder>( + getOwner(), getParentForChildren(), isClean()); + owner_ = null; + } + return ownerBuilder_; + } + + private com.google.storage.v2.CustomerEncryption customerEncryption_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CustomerEncryption, + com.google.storage.v2.CustomerEncryption.Builder, + com.google.storage.v2.CustomerEncryptionOrBuilder> + customerEncryptionBuilder_; + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerEncryption field is set. + */ + public boolean hasCustomerEncryption() { + return ((bitField0_ & 0x10000000) != 0); + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerEncryption. + */ + public com.google.storage.v2.CustomerEncryption getCustomerEncryption() { + if (customerEncryptionBuilder_ == null) { + return customerEncryption_ == null + ? com.google.storage.v2.CustomerEncryption.getDefaultInstance() + : customerEncryption_; + } else { + return customerEncryptionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerEncryption(com.google.storage.v2.CustomerEncryption value) { + if (customerEncryptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customerEncryption_ = value; + } else { + customerEncryptionBuilder_.setMessage(value); + } + bitField0_ |= 0x10000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomerEncryption( + com.google.storage.v2.CustomerEncryption.Builder builderForValue) { + if (customerEncryptionBuilder_ == null) { + customerEncryption_ = builderForValue.build(); + } else { + customerEncryptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x10000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomerEncryption(com.google.storage.v2.CustomerEncryption value) { + if (customerEncryptionBuilder_ == null) { + if (((bitField0_ & 0x10000000) != 0) + && customerEncryption_ != null + && customerEncryption_ + != com.google.storage.v2.CustomerEncryption.getDefaultInstance()) { + getCustomerEncryptionBuilder().mergeFrom(value); + } else { + customerEncryption_ = value; + } + } else { + customerEncryptionBuilder_.mergeFrom(value); + } + if (customerEncryption_ != null) { + bitField0_ |= 0x10000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomerEncryption() { + bitField0_ = (bitField0_ & ~0x10000000); + customerEncryption_ = null; + if (customerEncryptionBuilder_ != null) { + customerEncryptionBuilder_.dispose(); + customerEncryptionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CustomerEncryption.Builder getCustomerEncryptionBuilder() { + bitField0_ |= 0x10000000; + onChanged(); + return internalGetCustomerEncryptionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CustomerEncryptionOrBuilder getCustomerEncryptionOrBuilder() { + if (customerEncryptionBuilder_ != null) { + return customerEncryptionBuilder_.getMessageOrBuilder(); + } else { + return customerEncryption_ == null + ? com.google.storage.v2.CustomerEncryption.getDefaultInstance() + : customerEncryption_; + } + } + + /** + * + * + *
+     * Optional. Metadata of customer-supplied encryption key, if the object is
+     * encrypted by such a key.
+     * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CustomerEncryption, + com.google.storage.v2.CustomerEncryption.Builder, + com.google.storage.v2.CustomerEncryptionOrBuilder> + internalGetCustomerEncryptionFieldBuilder() { + if (customerEncryptionBuilder_ == null) { + customerEncryptionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CustomerEncryption, + com.google.storage.v2.CustomerEncryption.Builder, + com.google.storage.v2.CustomerEncryptionOrBuilder>( + getCustomerEncryption(), getParentForChildren(), isClean()); + customerEncryption_ = null; + } + return customerEncryptionBuilder_; + } + + private com.google.protobuf.Timestamp customTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + customTimeBuilder_; + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTime field is set. + */ + public boolean hasCustomTime() { + return ((bitField0_ & 0x20000000) != 0); + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTime. + */ + public com.google.protobuf.Timestamp getCustomTime() { + if (customTimeBuilder_ == null) { + return customTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : customTime_; + } else { + return customTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomTime(com.google.protobuf.Timestamp value) { + if (customTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customTime_ = value; + } else { + customTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x20000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCustomTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (customTimeBuilder_ == null) { + customTime_ = builderForValue.build(); + } else { + customTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x20000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCustomTime(com.google.protobuf.Timestamp value) { + if (customTimeBuilder_ == null) { + if (((bitField0_ & 0x20000000) != 0) + && customTime_ != null + && customTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCustomTimeBuilder().mergeFrom(value); + } else { + customTime_ = value; + } + } else { + customTimeBuilder_.mergeFrom(value); + } + if (customTime_ != null) { + bitField0_ |= 0x20000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCustomTime() { + bitField0_ = (bitField0_ & ~0x20000000); + customTime_ = null; + if (customTimeBuilder_ != null) { + customTimeBuilder_.dispose(); + customTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getCustomTimeBuilder() { + bitField0_ |= 0x20000000; + onChanged(); + return internalGetCustomTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCustomTimeOrBuilder() { + if (customTimeBuilder_ != null) { + return customTimeBuilder_.getMessageOrBuilder(); + } else { + return customTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : customTime_; + } + } + + /** + * + * + *
+     * Optional. A user-specified timestamp set on an object.
+     * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCustomTimeFieldBuilder() { + if (customTimeBuilder_ == null) { + customTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCustomTime(), getParentForChildren(), isClean()); + customTime_ = null; + } + return customTimeBuilder_; + } + + private com.google.protobuf.Timestamp softDeleteTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + softDeleteTimeBuilder_; + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the softDeleteTime field is set. + */ + public boolean hasSoftDeleteTime() { + return ((bitField0_ & 0x40000000) != 0); + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The softDeleteTime. + */ + public com.google.protobuf.Timestamp getSoftDeleteTime() { + if (softDeleteTimeBuilder_ == null) { + return softDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : softDeleteTime_; + } else { + return softDeleteTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setSoftDeleteTime(com.google.protobuf.Timestamp value) { + if (softDeleteTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + softDeleteTime_ = value; + } else { + softDeleteTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x40000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setSoftDeleteTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (softDeleteTimeBuilder_ == null) { + softDeleteTime_ = builderForValue.build(); + } else { + softDeleteTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x40000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeSoftDeleteTime(com.google.protobuf.Timestamp value) { + if (softDeleteTimeBuilder_ == null) { + if (((bitField0_ & 0x40000000) != 0) + && softDeleteTime_ != null + && softDeleteTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSoftDeleteTimeBuilder().mergeFrom(value); + } else { + softDeleteTime_ = value; + } + } else { + softDeleteTimeBuilder_.mergeFrom(value); + } + if (softDeleteTime_ != null) { + bitField0_ |= 0x40000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearSoftDeleteTime() { + bitField0_ = (bitField0_ & ~0x40000000); + softDeleteTime_ = null; + if (softDeleteTimeBuilder_ != null) { + softDeleteTimeBuilder_.dispose(); + softDeleteTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getSoftDeleteTimeBuilder() { + bitField0_ |= 0x40000000; + onChanged(); + return internalGetSoftDeleteTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getSoftDeleteTimeOrBuilder() { + if (softDeleteTimeBuilder_ != null) { + return softDeleteTimeBuilder_.getMessageOrBuilder(); + } else { + return softDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : softDeleteTime_; + } + } + + /** + * + * + *
+     * Output only. This is the time when the object became soft-deleted.
+     *
+     * Soft-deleted objects are only accessible if a soft_delete_policy is
+     * enabled. Also see `hard_delete_time`.
+     * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetSoftDeleteTimeFieldBuilder() { + if (softDeleteTimeBuilder_ == null) { + softDeleteTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSoftDeleteTime(), getParentForChildren(), isClean()); + softDeleteTime_ = null; + } + return softDeleteTimeBuilder_; + } + + private com.google.protobuf.Timestamp hardDeleteTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + hardDeleteTimeBuilder_; + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hardDeleteTime field is set. + */ + public boolean hasHardDeleteTime() { + return ((bitField0_ & 0x80000000) != 0); + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hardDeleteTime. + */ + public com.google.protobuf.Timestamp getHardDeleteTime() { + if (hardDeleteTimeBuilder_ == null) { + return hardDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hardDeleteTime_; + } else { + return hardDeleteTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setHardDeleteTime(com.google.protobuf.Timestamp value) { + if (hardDeleteTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hardDeleteTime_ = value; + } else { + hardDeleteTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x80000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setHardDeleteTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (hardDeleteTimeBuilder_ == null) { + hardDeleteTime_ = builderForValue.build(); + } else { + hardDeleteTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x80000000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeHardDeleteTime(com.google.protobuf.Timestamp value) { + if (hardDeleteTimeBuilder_ == null) { + if (((bitField0_ & 0x80000000) != 0) + && hardDeleteTime_ != null + && hardDeleteTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getHardDeleteTimeBuilder().mergeFrom(value); + } else { + hardDeleteTime_ = value; + } + } else { + hardDeleteTimeBuilder_.mergeFrom(value); + } + if (hardDeleteTime_ != null) { + bitField0_ |= 0x80000000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearHardDeleteTime() { + bitField0_ = (bitField0_ & ~0x80000000); + hardDeleteTime_ = null; + if (hardDeleteTimeBuilder_ != null) { + hardDeleteTimeBuilder_.dispose(); + hardDeleteTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getHardDeleteTimeBuilder() { + bitField0_ |= 0x80000000; + onChanged(); + return internalGetHardDeleteTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getHardDeleteTimeOrBuilder() { + if (hardDeleteTimeBuilder_ != null) { + return hardDeleteTimeBuilder_.getMessageOrBuilder(); + } else { + return hardDeleteTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hardDeleteTime_; + } + } + + /** + * + * + *
+     * Output only. The time when the object is permanently deleted.
+     *
+     * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+     * Otherwise, the object is not accessible.
+     * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetHardDeleteTimeFieldBuilder() { + if (hardDeleteTimeBuilder_ == null) { + hardDeleteTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getHardDeleteTime(), getParentForChildren(), isClean()); + hardDeleteTime_ = null; + } + return hardDeleteTimeBuilder_; + } + + private com.google.storage.v2.Object.Retention retention_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object.Retention, + com.google.storage.v2.Object.Retention.Builder, + com.google.storage.v2.Object.RetentionOrBuilder> + retentionBuilder_; + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retention field is set. + */ + public boolean hasRetention() { + return ((bitField1_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retention. + */ + public com.google.storage.v2.Object.Retention getRetention() { + if (retentionBuilder_ == null) { + return retention_ == null + ? com.google.storage.v2.Object.Retention.getDefaultInstance() + : retention_; + } else { + return retentionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetention(com.google.storage.v2.Object.Retention value) { + if (retentionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retention_ = value; + } else { + retentionBuilder_.setMessage(value); + } + bitField1_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetention(com.google.storage.v2.Object.Retention.Builder builderForValue) { + if (retentionBuilder_ == null) { + retention_ = builderForValue.build(); + } else { + retentionBuilder_.setMessage(builderForValue.build()); + } + bitField1_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetention(com.google.storage.v2.Object.Retention value) { + if (retentionBuilder_ == null) { + if (((bitField1_ & 0x00000001) != 0) + && retention_ != null + && retention_ != com.google.storage.v2.Object.Retention.getDefaultInstance()) { + getRetentionBuilder().mergeFrom(value); + } else { + retention_ = value; + } + } else { + retentionBuilder_.mergeFrom(value); + } + if (retention_ != null) { + bitField1_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetention() { + bitField1_ = (bitField1_ & ~0x00000001); + retention_ = null; + if (retentionBuilder_ != null) { + retentionBuilder_.dispose(); + retentionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Object.Retention.Builder getRetentionBuilder() { + bitField1_ |= 0x00000001; + onChanged(); + return internalGetRetentionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Object.RetentionOrBuilder getRetentionOrBuilder() { + if (retentionBuilder_ != null) { + return retentionBuilder_.getMessageOrBuilder(); + } else { + return retention_ == null + ? com.google.storage.v2.Object.Retention.getDefaultInstance() + : retention_; + } + } + + /** + * + * + *
+     * Optional. Retention configuration of this object.
+     * Might only be configured if the bucket has object retention enabled.
+     * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object.Retention, + com.google.storage.v2.Object.Retention.Builder, + com.google.storage.v2.Object.RetentionOrBuilder> + internalGetRetentionFieldBuilder() { + if (retentionBuilder_ == null) { + retentionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object.Retention, + com.google.storage.v2.Object.Retention.Builder, + com.google.storage.v2.Object.RetentionOrBuilder>( + getRetention(), getParentForChildren(), isClean()); + retention_ = null; + } + return retentionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Object) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Object) + private static final com.google.storage.v2.Object DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Object(); + } + + public static com.google.storage.v2.Object getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Object parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Object getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControl.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControl.java new file mode 100644 index 000000000000..6e64b37703d0 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControl.java @@ -0,0 +1,2410 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * An access-control entry.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ObjectAccessControl} + */ +@com.google.protobuf.Generated +public final class ObjectAccessControl extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ObjectAccessControl) + ObjectAccessControlOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectAccessControl"); + } + + // Use ObjectAccessControl.newBuilder() to construct. + private ObjectAccessControl(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectAccessControl() { + role_ = ""; + id_ = ""; + entity_ = ""; + entityAlt_ = ""; + entityId_ = ""; + etag_ = ""; + email_ = ""; + domain_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectAccessControl_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectAccessControl_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectAccessControl.class, + com.google.storage.v2.ObjectAccessControl.Builder.class); + } + + private int bitField0_; + public static final int ROLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object role_ = ""; + + /** + * + * + *
+   * Optional. The access permission for the entity. One of the following
+   * values:
+   * * `READER`
+   * * `WRITER`
+   * * `OWNER`
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + @java.lang.Override + public java.lang.String getRole() { + java.lang.Object ref = role_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + role_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The access permission for the entity. One of the following
+   * values:
+   * * `READER`
+   * * `WRITER`
+   * * `OWNER`
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRoleBytes() { + java.lang.Object ref = role_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + role_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object id_ = ""; + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + @java.lang.Override + public java.lang.String getId() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object entity_ = ""; + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`.
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`.
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned in the response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`.
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`.
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned in the response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_ALT_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityAlt_ = ""; + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + @java.lang.Override + public java.lang.String getEntityAlt() { + java.lang.Object ref = entityAlt_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityAlt_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityAltBytes() { + java.lang.Object ref = entityAlt_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityAlt_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityId_ = ""; + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + @java.lang.Override + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
+   * Optional. The etag of the ObjectAccessControl.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object's ObjectAccessControl.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The etag of the ObjectAccessControl.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object's ObjectAccessControl.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EMAIL_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object email_ = ""; + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + @java.lang.Override + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEmailBytes() { + java.lang.Object ref = email_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DOMAIN_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object domain_ = ""; + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + @java.lang.Override + public java.lang.String getDomain() { + java.lang.Object ref = domain_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + domain_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDomainBytes() { + java.lang.Object ref = domain_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + domain_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_TEAM_FIELD_NUMBER = 7; + private com.google.storage.v2.ProjectTeam projectTeam_; + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + @java.lang.Override + public boolean hasProjectTeam() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + @java.lang.Override + public com.google.storage.v2.ProjectTeam getProjectTeam() { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder() { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(role_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, role_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(id_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, id_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, entityId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(email_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, email_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(domain_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, domain_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(7, getProjectTeam()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, etag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityAlt_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, entityAlt_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(role_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, role_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(id_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, id_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, entityId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(email_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, email_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(domain_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, domain_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getProjectTeam()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, etag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityAlt_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, entityAlt_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ObjectAccessControl)) { + return super.equals(obj); + } + com.google.storage.v2.ObjectAccessControl other = + (com.google.storage.v2.ObjectAccessControl) obj; + + if (!getRole().equals(other.getRole())) return false; + if (!getId().equals(other.getId())) return false; + if (!getEntity().equals(other.getEntity())) return false; + if (!getEntityAlt().equals(other.getEntityAlt())) return false; + if (!getEntityId().equals(other.getEntityId())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getEmail().equals(other.getEmail())) return false; + if (!getDomain().equals(other.getDomain())) return false; + if (hasProjectTeam() != other.hasProjectTeam()) return false; + if (hasProjectTeam()) { + if (!getProjectTeam().equals(other.getProjectTeam())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROLE_FIELD_NUMBER; + hash = (53 * hash) + getRole().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ENTITY_ALT_FIELD_NUMBER; + hash = (53 * hash) + getEntityAlt().hashCode(); + hash = (37 * hash) + ENTITY_ID_FIELD_NUMBER; + hash = (53 * hash) + getEntityId().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (37 * hash) + EMAIL_FIELD_NUMBER; + hash = (53 * hash) + getEmail().hashCode(); + hash = (37 * hash) + DOMAIN_FIELD_NUMBER; + hash = (53 * hash) + getDomain().hashCode(); + if (hasProjectTeam()) { + hash = (37 * hash) + PROJECT_TEAM_FIELD_NUMBER; + hash = (53 * hash) + getProjectTeam().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectAccessControl parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectAccessControl parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectAccessControl parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ObjectAccessControl prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * An access-control entry.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ObjectAccessControl} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ObjectAccessControl) + com.google.storage.v2.ObjectAccessControlOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectAccessControl_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectAccessControl_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectAccessControl.class, + com.google.storage.v2.ObjectAccessControl.Builder.class); + } + + // Construct using com.google.storage.v2.ObjectAccessControl.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProjectTeamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + role_ = ""; + id_ = ""; + entity_ = ""; + entityAlt_ = ""; + entityId_ = ""; + etag_ = ""; + email_ = ""; + domain_ = ""; + projectTeam_ = null; + if (projectTeamBuilder_ != null) { + projectTeamBuilder_.dispose(); + projectTeamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectAccessControl_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl getDefaultInstanceForType() { + return com.google.storage.v2.ObjectAccessControl.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl build() { + com.google.storage.v2.ObjectAccessControl result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl buildPartial() { + com.google.storage.v2.ObjectAccessControl result = + new com.google.storage.v2.ObjectAccessControl(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ObjectAccessControl result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.role_ = role_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.id_ = id_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.entity_ = entity_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.entityAlt_ = entityAlt_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.entityId_ = entityId_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.email_ = email_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.domain_ = domain_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000100) != 0)) { + result.projectTeam_ = + projectTeamBuilder_ == null ? projectTeam_ : projectTeamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ObjectAccessControl) { + return mergeFrom((com.google.storage.v2.ObjectAccessControl) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ObjectAccessControl other) { + if (other == com.google.storage.v2.ObjectAccessControl.getDefaultInstance()) return this; + if (!other.getRole().isEmpty()) { + role_ = other.role_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getId().isEmpty()) { + id_ = other.id_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getEntityAlt().isEmpty()) { + entityAlt_ = other.entityAlt_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getEntityId().isEmpty()) { + entityId_ = other.entityId_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (!other.getEmail().isEmpty()) { + email_ = other.email_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (!other.getDomain().isEmpty()) { + domain_ = other.domain_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (other.hasProjectTeam()) { + mergeProjectTeam(other.getProjectTeam()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + role_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + id_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + entity_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + entityId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 42: + { + email_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 42 + case 50: + { + domain_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetProjectTeamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 58 + case 66: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 66 + case 74: + { + entityAlt_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object role_ = ""; + + /** + * + * + *
+     * Optional. The access permission for the entity. One of the following
+     * values:
+     * * `READER`
+     * * `WRITER`
+     * * `OWNER`
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + public java.lang.String getRole() { + java.lang.Object ref = role_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + role_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The access permission for the entity. One of the following
+     * values:
+     * * `READER`
+     * * `WRITER`
+     * * `OWNER`
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + public com.google.protobuf.ByteString getRoleBytes() { + java.lang.Object ref = role_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + role_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The access permission for the entity. One of the following
+     * values:
+     * * `READER`
+     * * `WRITER`
+     * * `OWNER`
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The role to set. + * @return This builder for chaining. + */ + public Builder setRole(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + role_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The access permission for the entity. One of the following
+     * values:
+     * * `READER`
+     * * `WRITER`
+     * * `OWNER`
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRole() { + role_ = getDefaultInstance().getRole(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The access permission for the entity. One of the following
+     * values:
+     * * `READER`
+     * * `WRITER`
+     * * `OWNER`
+     * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for role to set. + * @return This builder for chaining. + */ + public Builder setRoleBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + role_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object id_ = ""; + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + public com.google.protobuf.ByteString getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearId() { + id_ = getDefaultInstance().getId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID of the access-control entry.
+     * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for id to set. + * @return This builder for chaining. + */ + public Builder setIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + id_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object entity_ = ""; + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`.
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`.
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned in the response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`.
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`.
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned in the response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`.
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`.
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned in the response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`.
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`.
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned in the response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + entity_ = getDefaultInstance().getEntity(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity holding the permission, in one of the following forms:
+     * * `user-{userid}`
+     * * `user-{email}`
+     * * `group-{groupid}`
+     * * `group-{email}`
+     * * `domain-{domain}`
+     * * `project-{team}-{projectnumber}`
+     * * `project-{team}-{projectid}`
+     * * `allUsers`
+     * * `allAuthenticatedUsers`
+     * Examples:
+     * * The user `liz@example.com` would be `user-liz@example.com`.
+     * * The group `example@googlegroups.com` would be
+     * `group-example@googlegroups.com`.
+     * * All members of the Google Apps for Business domain `example.com` would be
+     * `domain-example.com`.
+     * For project entities, `project-{team}-{projectnumber}` format is
+     * returned in the response.
+     * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entity_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object entityAlt_ = ""; + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + public java.lang.String getEntityAlt() { + java.lang.Object ref = entityAlt_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityAlt_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + public com.google.protobuf.ByteString getEntityAltBytes() { + java.lang.Object ref = entityAlt_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityAlt_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The entityAlt to set. + * @return This builder for chaining. + */ + public Builder setEntityAlt(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityAlt_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearEntityAlt() { + entityAlt_ = getDefaultInstance().getEntityAlt(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The alternative entity format, if exists. For project
+     * entities, `project-{team}-{projectid}` format is returned in the response.
+     * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for entityAlt to set. + * @return This builder for chaining. + */ + public Builder setEntityAltBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityAlt_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object entityId_ = ""; + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntityId() { + entityId_ = getDefaultInstance().getEntityId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity, if any.
+     * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
+     * Optional. The etag of the ObjectAccessControl.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object's ObjectAccessControl.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The etag of the ObjectAccessControl.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object's ObjectAccessControl.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The etag of the ObjectAccessControl.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object's ObjectAccessControl.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The etag of the ObjectAccessControl.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object's ObjectAccessControl.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The etag of the ObjectAccessControl.
+     * If included in the metadata of an update or delete request message, the
+     * operation is only performed if the etag matches that of the live
+     * object's ObjectAccessControl.
+     * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private java.lang.Object email_ = ""; + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + public java.lang.String getEmail() { + java.lang.Object ref = email_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + email_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + public com.google.protobuf.ByteString getEmailBytes() { + java.lang.Object ref = email_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + email_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The email to set. + * @return This builder for chaining. + */ + public Builder setEmail(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + email_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEmail() { + email_ = getDefaultInstance().getEmail(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The email address associated with the entity, if any.
+     * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for email to set. + * @return This builder for chaining. + */ + public Builder setEmailBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + email_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private java.lang.Object domain_ = ""; + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + public java.lang.String getDomain() { + java.lang.Object ref = domain_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + domain_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + public com.google.protobuf.ByteString getDomainBytes() { + java.lang.Object ref = domain_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + domain_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The domain to set. + * @return This builder for chaining. + */ + public Builder setDomain(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + domain_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDomain() { + domain_ = getDefaultInstance().getDomain(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The domain associated with the entity, if any.
+     * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for domain to set. + * @return This builder for chaining. + */ + public Builder setDomainBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + domain_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private com.google.storage.v2.ProjectTeam projectTeam_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder> + projectTeamBuilder_; + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + public boolean hasProjectTeam() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + public com.google.storage.v2.ProjectTeam getProjectTeam() { + if (projectTeamBuilder_ == null) { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } else { + return projectTeamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setProjectTeam(com.google.storage.v2.ProjectTeam value) { + if (projectTeamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + projectTeam_ = value; + } else { + projectTeamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setProjectTeam(com.google.storage.v2.ProjectTeam.Builder builderForValue) { + if (projectTeamBuilder_ == null) { + projectTeam_ = builderForValue.build(); + } else { + projectTeamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeProjectTeam(com.google.storage.v2.ProjectTeam value) { + if (projectTeamBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && projectTeam_ != null + && projectTeam_ != com.google.storage.v2.ProjectTeam.getDefaultInstance()) { + getProjectTeamBuilder().mergeFrom(value); + } else { + projectTeam_ = value; + } + } else { + projectTeamBuilder_.mergeFrom(value); + } + if (projectTeam_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearProjectTeam() { + bitField0_ = (bitField0_ & ~0x00000100); + projectTeam_ = null; + if (projectTeamBuilder_ != null) { + projectTeamBuilder_.dispose(); + projectTeamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ProjectTeam.Builder getProjectTeamBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetProjectTeamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder() { + if (projectTeamBuilder_ != null) { + return projectTeamBuilder_.getMessageOrBuilder(); + } else { + return projectTeam_ == null + ? com.google.storage.v2.ProjectTeam.getDefaultInstance() + : projectTeam_; + } + } + + /** + * + * + *
+     * Optional. The project team associated with the entity, if any.
+     * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder> + internalGetProjectTeamFieldBuilder() { + if (projectTeamBuilder_ == null) { + projectTeamBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ProjectTeam, + com.google.storage.v2.ProjectTeam.Builder, + com.google.storage.v2.ProjectTeamOrBuilder>( + getProjectTeam(), getParentForChildren(), isClean()); + projectTeam_ = null; + } + return projectTeamBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ObjectAccessControl) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ObjectAccessControl) + private static final com.google.storage.v2.ObjectAccessControl DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ObjectAccessControl(); + } + + public static com.google.storage.v2.ObjectAccessControl getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectAccessControl parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ObjectAccessControl getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControlOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControlOrBuilder.java new file mode 100644 index 000000000000..d6271fac7723 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectAccessControlOrBuilder.java @@ -0,0 +1,329 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectAccessControlOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ObjectAccessControl) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The access permission for the entity. One of the following
+   * values:
+   * * `READER`
+   * * `WRITER`
+   * * `OWNER`
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The role. + */ + java.lang.String getRole(); + + /** + * + * + *
+   * Optional. The access permission for the entity. One of the following
+   * values:
+   * * `READER`
+   * * `WRITER`
+   * * `OWNER`
+   * 
+ * + * string role = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for role. + */ + com.google.protobuf.ByteString getRoleBytes(); + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The id. + */ + java.lang.String getId(); + + /** + * + * + *
+   * Optional. The ID of the access-control entry.
+   * 
+ * + * string id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for id. + */ + com.google.protobuf.ByteString getIdBytes(); + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`.
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`.
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned in the response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + java.lang.String getEntity(); + + /** + * + * + *
+   * Optional. The entity holding the permission, in one of the following forms:
+   * * `user-{userid}`
+   * * `user-{email}`
+   * * `group-{groupid}`
+   * * `group-{email}`
+   * * `domain-{domain}`
+   * * `project-{team}-{projectnumber}`
+   * * `project-{team}-{projectid}`
+   * * `allUsers`
+   * * `allAuthenticatedUsers`
+   * Examples:
+   * * The user `liz@example.com` would be `user-liz@example.com`.
+   * * The group `example@googlegroups.com` would be
+   * `group-example@googlegroups.com`.
+   * * All members of the Google Apps for Business domain `example.com` would be
+   * `domain-example.com`.
+   * For project entities, `project-{team}-{projectnumber}` format is
+   * returned in the response.
+   * 
+ * + * string entity = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The entityAlt. + */ + java.lang.String getEntityAlt(); + + /** + * + * + *
+   * Output only. The alternative entity format, if exists. For project
+   * entities, `project-{team}-{projectid}` format is returned in the response.
+   * 
+ * + * string entity_alt = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for entityAlt. + */ + com.google.protobuf.ByteString getEntityAltBytes(); + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + java.lang.String getEntityId(); + + /** + * + * + *
+   * Optional. The ID for the entity, if any.
+   * 
+ * + * string entity_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + com.google.protobuf.ByteString getEntityIdBytes(); + + /** + * + * + *
+   * Optional. The etag of the ObjectAccessControl.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object's ObjectAccessControl.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
+   * Optional. The etag of the ObjectAccessControl.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object's ObjectAccessControl.
+   * 
+ * + * string etag = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The email. + */ + java.lang.String getEmail(); + + /** + * + * + *
+   * Optional. The email address associated with the entity, if any.
+   * 
+ * + * string email = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for email. + */ + com.google.protobuf.ByteString getEmailBytes(); + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The domain. + */ + java.lang.String getDomain(); + + /** + * + * + *
+   * Optional. The domain associated with the entity, if any.
+   * 
+ * + * string domain = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for domain. + */ + com.google.protobuf.ByteString getDomainBytes(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the projectTeam field is set. + */ + boolean hasProjectTeam(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The projectTeam. + */ + com.google.storage.v2.ProjectTeam getProjectTeam(); + + /** + * + * + *
+   * Optional. The project team associated with the entity, if any.
+   * 
+ * + * + * .google.storage.v2.ProjectTeam project_team = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ProjectTeamOrBuilder getProjectTeamOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksums.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksums.java new file mode 100644 index 000000000000..c0e60ebb606e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksums.java @@ -0,0 +1,680 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Message used for storing full (not subrange) object checksums.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ObjectChecksums} + */ +@com.google.protobuf.Generated +public final class ObjectChecksums extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ObjectChecksums) + ObjectChecksumsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectChecksums"); + } + + // Use ObjectChecksums.newBuilder() to construct. + private ObjectChecksums(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectChecksums() { + md5Hash_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectChecksums_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectChecksums_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectChecksums.class, + com.google.storage.v2.ObjectChecksums.Builder.class); + } + + private int bitField0_; + public static final int CRC32C_FIELD_NUMBER = 1; + private int crc32C_ = 0; + + /** + * + * + *
+   * CRC32C digest of the object data. Computed by the Cloud Storage service for
+   * all written objects.
+   * If set in a WriteObjectRequest, service validates that the stored
+   * object matches this checksum.
+   * 
+ * + * optional fixed32 crc32c = 1; + * + * @return Whether the crc32c field is set. + */ + @java.lang.Override + public boolean hasCrc32C() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * CRC32C digest of the object data. Computed by the Cloud Storage service for
+   * all written objects.
+   * If set in a WriteObjectRequest, service validates that the stored
+   * object matches this checksum.
+   * 
+ * + * optional fixed32 crc32c = 1; + * + * @return The crc32c. + */ + @java.lang.Override + public int getCrc32C() { + return crc32C_; + } + + public static final int MD5_HASH_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString md5Hash_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. 128 bit MD5 hash of the object data. For more information about
+   * using the MD5 hash, see [Data validation and change
+   * detection](https://cloud.google.com/storage/docs/data-validation). Not all
+   * objects provide an MD5 hash. For example, composite objects provide only
+   * crc32c hashes. This value is equivalent to running `cat object.txt |
+   * openssl md5 -binary`
+   * 
+ * + * bytes md5_hash = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The md5Hash. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMd5Hash() { + return md5Hash_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeFixed32(1, crc32C_); + } + if (!md5Hash_.isEmpty()) { + output.writeBytes(2, md5Hash_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeFixed32Size(1, crc32C_); + } + if (!md5Hash_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, md5Hash_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ObjectChecksums)) { + return super.equals(obj); + } + com.google.storage.v2.ObjectChecksums other = (com.google.storage.v2.ObjectChecksums) obj; + + if (hasCrc32C() != other.hasCrc32C()) return false; + if (hasCrc32C()) { + if (getCrc32C() != other.getCrc32C()) return false; + } + if (!getMd5Hash().equals(other.getMd5Hash())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCrc32C()) { + hash = (37 * hash) + CRC32C_FIELD_NUMBER; + hash = (53 * hash) + getCrc32C(); + } + hash = (37 * hash) + MD5_HASH_FIELD_NUMBER; + hash = (53 * hash) + getMd5Hash().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ObjectChecksums parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectChecksums parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectChecksums parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectChecksums parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ObjectChecksums prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Message used for storing full (not subrange) object checksums.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ObjectChecksums} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ObjectChecksums) + com.google.storage.v2.ObjectChecksumsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectChecksums_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectChecksums_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectChecksums.class, + com.google.storage.v2.ObjectChecksums.Builder.class); + } + + // Construct using com.google.storage.v2.ObjectChecksums.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + crc32C_ = 0; + md5Hash_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectChecksums_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getDefaultInstanceForType() { + return com.google.storage.v2.ObjectChecksums.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ObjectChecksums build() { + com.google.storage.v2.ObjectChecksums result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ObjectChecksums buildPartial() { + com.google.storage.v2.ObjectChecksums result = + new com.google.storage.v2.ObjectChecksums(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ObjectChecksums result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.crc32C_ = crc32C_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.md5Hash_ = md5Hash_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ObjectChecksums) { + return mergeFrom((com.google.storage.v2.ObjectChecksums) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ObjectChecksums other) { + if (other == com.google.storage.v2.ObjectChecksums.getDefaultInstance()) return this; + if (other.hasCrc32C()) { + setCrc32C(other.getCrc32C()); + } + if (!other.getMd5Hash().isEmpty()) { + setMd5Hash(other.getMd5Hash()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 13: + { + crc32C_ = input.readFixed32(); + bitField0_ |= 0x00000001; + break; + } // case 13 + case 18: + { + md5Hash_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int crc32C_; + + /** + * + * + *
+     * CRC32C digest of the object data. Computed by the Cloud Storage service for
+     * all written objects.
+     * If set in a WriteObjectRequest, service validates that the stored
+     * object matches this checksum.
+     * 
+ * + * optional fixed32 crc32c = 1; + * + * @return Whether the crc32c field is set. + */ + @java.lang.Override + public boolean hasCrc32C() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * CRC32C digest of the object data. Computed by the Cloud Storage service for
+     * all written objects.
+     * If set in a WriteObjectRequest, service validates that the stored
+     * object matches this checksum.
+     * 
+ * + * optional fixed32 crc32c = 1; + * + * @return The crc32c. + */ + @java.lang.Override + public int getCrc32C() { + return crc32C_; + } + + /** + * + * + *
+     * CRC32C digest of the object data. Computed by the Cloud Storage service for
+     * all written objects.
+     * If set in a WriteObjectRequest, service validates that the stored
+     * object matches this checksum.
+     * 
+ * + * optional fixed32 crc32c = 1; + * + * @param value The crc32c to set. + * @return This builder for chaining. + */ + public Builder setCrc32C(int value) { + + crc32C_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * CRC32C digest of the object data. Computed by the Cloud Storage service for
+     * all written objects.
+     * If set in a WriteObjectRequest, service validates that the stored
+     * object matches this checksum.
+     * 
+ * + * optional fixed32 crc32c = 1; + * + * @return This builder for chaining. + */ + public Builder clearCrc32C() { + bitField0_ = (bitField0_ & ~0x00000001); + crc32C_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString md5Hash_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. 128 bit MD5 hash of the object data. For more information about
+     * using the MD5 hash, see [Data validation and change
+     * detection](https://cloud.google.com/storage/docs/data-validation). Not all
+     * objects provide an MD5 hash. For example, composite objects provide only
+     * crc32c hashes. This value is equivalent to running `cat object.txt |
+     * openssl md5 -binary`
+     * 
+ * + * bytes md5_hash = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The md5Hash. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMd5Hash() { + return md5Hash_; + } + + /** + * + * + *
+     * Optional. 128 bit MD5 hash of the object data. For more information about
+     * using the MD5 hash, see [Data validation and change
+     * detection](https://cloud.google.com/storage/docs/data-validation). Not all
+     * objects provide an MD5 hash. For example, composite objects provide only
+     * crc32c hashes. This value is equivalent to running `cat object.txt |
+     * openssl md5 -binary`
+     * 
+ * + * bytes md5_hash = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The md5Hash to set. + * @return This builder for chaining. + */ + public Builder setMd5Hash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + md5Hash_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. 128 bit MD5 hash of the object data. For more information about
+     * using the MD5 hash, see [Data validation and change
+     * detection](https://cloud.google.com/storage/docs/data-validation). Not all
+     * objects provide an MD5 hash. For example, composite objects provide only
+     * crc32c hashes. This value is equivalent to running `cat object.txt |
+     * openssl md5 -binary`
+     * 
+ * + * bytes md5_hash = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMd5Hash() { + bitField0_ = (bitField0_ & ~0x00000002); + md5Hash_ = getDefaultInstance().getMd5Hash(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ObjectChecksums) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ObjectChecksums) + private static final com.google.storage.v2.ObjectChecksums DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ObjectChecksums(); + } + + public static com.google.storage.v2.ObjectChecksums getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectChecksums parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksumsOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksumsOrBuilder.java new file mode 100644 index 000000000000..7d744b635555 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectChecksumsOrBuilder.java @@ -0,0 +1,78 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectChecksumsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ObjectChecksums) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * CRC32C digest of the object data. Computed by the Cloud Storage service for
+   * all written objects.
+   * If set in a WriteObjectRequest, service validates that the stored
+   * object matches this checksum.
+   * 
+ * + * optional fixed32 crc32c = 1; + * + * @return Whether the crc32c field is set. + */ + boolean hasCrc32C(); + + /** + * + * + *
+   * CRC32C digest of the object data. Computed by the Cloud Storage service for
+   * all written objects.
+   * If set in a WriteObjectRequest, service validates that the stored
+   * object matches this checksum.
+   * 
+ * + * optional fixed32 crc32c = 1; + * + * @return The crc32c. + */ + int getCrc32C(); + + /** + * + * + *
+   * Optional. 128 bit MD5 hash of the object data. For more information about
+   * using the MD5 hash, see [Data validation and change
+   * detection](https://cloud.google.com/storage/docs/data-validation). Not all
+   * objects provide an MD5 hash. For example, composite objects provide only
+   * crc32c hashes. This value is equivalent to running `cat object.txt |
+   * openssl md5 -binary`
+   * 
+ * + * bytes md5_hash = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The md5Hash. + */ + com.google.protobuf.ByteString getMd5Hash(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContexts.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContexts.java new file mode 100644 index 000000000000..ccd83b49654c --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContexts.java @@ -0,0 +1,900 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * All contexts of an object grouped by type.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ObjectContexts} + */ +@com.google.protobuf.Generated +public final class ObjectContexts extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ObjectContexts) + ObjectContextsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectContexts"); + } + + // Use ObjectContexts.newBuilder() to construct. + private ObjectContexts(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectContexts() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetCustom(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectContexts.class, + com.google.storage.v2.ObjectContexts.Builder.class); + } + + public static final int CUSTOM_FIELD_NUMBER = 1; + + private static final class CustomDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + defaultEntry = + com.google.protobuf.MapEntry + . + newDefaultInstance( + com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_CustomEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + com.google.storage.v2.ObjectCustomContextPayload.getDefaultInstance()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + custom_; + + private com.google.protobuf.MapField< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + internalGetCustom() { + if (custom_ == null) { + return com.google.protobuf.MapField.emptyMapField(CustomDefaultEntryHolder.defaultEntry); + } + return custom_; + } + + public int getCustomCount() { + return internalGetCustom().getMap().size(); + } + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsCustom(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetCustom().getMap().containsKey(key); + } + + /** Use {@link #getCustomMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map + getCustom() { + return getCustomMap(); + } + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map + getCustomMap() { + return internalGetCustom().getMap(); + } + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.storage.v2.ObjectCustomContextPayload getCustomOrDefault( + java.lang.String key, + /* nullable */ + com.google.storage.v2.ObjectCustomContextPayload defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetCustom().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload getCustomOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetCustom().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetCustom(), CustomDefaultEntryHolder.defaultEntry, 1); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry + entry : internalGetCustom().getMap().entrySet()) { + com.google.protobuf.MapEntry< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + custom__ = + CustomDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, custom__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ObjectContexts)) { + return super.equals(obj); + } + com.google.storage.v2.ObjectContexts other = (com.google.storage.v2.ObjectContexts) obj; + + if (!internalGetCustom().equals(other.internalGetCustom())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetCustom().getMap().isEmpty()) { + hash = (37 * hash) + CUSTOM_FIELD_NUMBER; + hash = (53 * hash) + internalGetCustom().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ObjectContexts parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectContexts parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectContexts parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectContexts parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectContexts parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectContexts parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectContexts parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ObjectContexts prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * All contexts of an object grouped by type.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ObjectContexts} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ObjectContexts) + com.google.storage.v2.ObjectContextsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetCustom(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetMutableCustom(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectContexts.class, + com.google.storage.v2.ObjectContexts.Builder.class); + } + + // Construct using com.google.storage.v2.ObjectContexts.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + internalGetMutableCustom().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectContexts_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ObjectContexts getDefaultInstanceForType() { + return com.google.storage.v2.ObjectContexts.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ObjectContexts build() { + com.google.storage.v2.ObjectContexts result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ObjectContexts buildPartial() { + com.google.storage.v2.ObjectContexts result = new com.google.storage.v2.ObjectContexts(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ObjectContexts result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.custom_ = internalGetCustom().build(CustomDefaultEntryHolder.defaultEntry); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ObjectContexts) { + return mergeFrom((com.google.storage.v2.ObjectContexts) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ObjectContexts other) { + if (other == com.google.storage.v2.ObjectContexts.getDefaultInstance()) return this; + internalGetMutableCustom().mergeFrom(other.internalGetCustom()); + bitField0_ |= 0x00000001; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.MapEntry< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + custom__ = + input.readMessage( + CustomDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableCustom() + .ensureBuilderMap() + .put(custom__.getKey(), custom__.getValue()); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private static final class CustomConverter + implements com.google.protobuf.MapFieldBuilder.Converter< + java.lang.String, + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder, + com.google.storage.v2.ObjectCustomContextPayload> { + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload build( + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder val) { + if (val instanceof com.google.storage.v2.ObjectCustomContextPayload) { + return (com.google.storage.v2.ObjectCustomContextPayload) val; + } + return ((com.google.storage.v2.ObjectCustomContextPayload.Builder) val).build(); + } + + @java.lang.Override + public com.google.protobuf.MapEntry< + java.lang.String, com.google.storage.v2.ObjectCustomContextPayload> + defaultEntry() { + return CustomDefaultEntryHolder.defaultEntry; + } + } + ; + + private static final CustomConverter customConverter = new CustomConverter(); + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder, + com.google.storage.v2.ObjectCustomContextPayload, + com.google.storage.v2.ObjectCustomContextPayload.Builder> + custom_; + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder, + com.google.storage.v2.ObjectCustomContextPayload, + com.google.storage.v2.ObjectCustomContextPayload.Builder> + internalGetCustom() { + if (custom_ == null) { + return new com.google.protobuf.MapFieldBuilder<>(customConverter); + } + return custom_; + } + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder, + com.google.storage.v2.ObjectCustomContextPayload, + com.google.storage.v2.ObjectCustomContextPayload.Builder> + internalGetMutableCustom() { + if (custom_ == null) { + custom_ = new com.google.protobuf.MapFieldBuilder<>(customConverter); + } + bitField0_ |= 0x00000001; + onChanged(); + return custom_; + } + + public int getCustomCount() { + return internalGetCustom().ensureBuilderMap().size(); + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsCustom(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetCustom().ensureBuilderMap().containsKey(key); + } + + /** Use {@link #getCustomMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map + getCustom() { + return getCustomMap(); + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map + getCustomMap() { + return internalGetCustom().getImmutableMap(); + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.storage.v2.ObjectCustomContextPayload getCustomOrDefault( + java.lang.String key, + /* nullable */ + com.google.storage.v2.ObjectCustomContextPayload defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map + map = internalGetMutableCustom().ensureBuilderMap(); + return map.containsKey(key) ? customConverter.build(map.get(key)) : defaultValue; + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload getCustomOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map + map = internalGetMutableCustom().ensureBuilderMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return customConverter.build(map.get(key)); + } + + public Builder clearCustom() { + bitField0_ = (bitField0_ & ~0x00000001); + internalGetMutableCustom().clear(); + return this; + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeCustom(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableCustom().ensureBuilderMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map + getMutableCustom() { + bitField0_ |= 0x00000001; + return internalGetMutableCustom().ensureMessageMap(); + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putCustom( + java.lang.String key, com.google.storage.v2.ObjectCustomContextPayload value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableCustom().ensureBuilderMap().put(key, value); + bitField0_ |= 0x00000001; + return this; + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllCustom( + java.util.Map values) { + for (java.util.Map.Entry + e : values.entrySet()) { + if (e.getKey() == null || e.getValue() == null) { + throw new NullPointerException(); + } + } + internalGetMutableCustom().ensureBuilderMap().putAll(values); + bitField0_ |= 0x00000001; + return this; + } + + /** + * + * + *
+     * Optional. User-defined object contexts.
+     * The maximum key or value size is `256` characters.
+     * The maximum number of entries is `50`.
+     * The maximum total serialized size of all entries is `25KiB`.
+     * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectCustomContextPayload.Builder putCustomBuilderIfAbsent( + java.lang.String key) { + java.util.Map + builderMap = internalGetMutableCustom().ensureBuilderMap(); + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder entry = builderMap.get(key); + if (entry == null) { + entry = com.google.storage.v2.ObjectCustomContextPayload.newBuilder(); + builderMap.put(key, entry); + } + if (entry instanceof com.google.storage.v2.ObjectCustomContextPayload) { + entry = ((com.google.storage.v2.ObjectCustomContextPayload) entry).toBuilder(); + builderMap.put(key, entry); + } + return (com.google.storage.v2.ObjectCustomContextPayload.Builder) entry; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ObjectContexts) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ObjectContexts) + private static final com.google.storage.v2.ObjectContexts DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ObjectContexts(); + } + + public static com.google.storage.v2.ObjectContexts getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectContexts parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ObjectContexts getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContextsOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContextsOrBuilder.java new file mode 100644 index 000000000000..ce825834ce28 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectContextsOrBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectContextsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ObjectContexts) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getCustomCount(); + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsCustom(java.lang.String key); + + /** Use {@link #getCustomMap()} instead. */ + @java.lang.Deprecated + java.util.Map getCustom(); + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getCustomMap(); + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + com.google.storage.v2.ObjectCustomContextPayload getCustomOrDefault( + java.lang.String key, + /* nullable */ + com.google.storage.v2.ObjectCustomContextPayload defaultValue); + + /** + * + * + *
+   * Optional. User-defined object contexts.
+   * The maximum key or value size is `256` characters.
+   * The maximum number of entries is `50`.
+   * The maximum total serialized size of all entries is `25KiB`.
+   * 
+ * + * + * map<string, .google.storage.v2.ObjectCustomContextPayload> custom = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectCustomContextPayload getCustomOrThrow(java.lang.String key); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayload.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayload.java new file mode 100644 index 000000000000..f6601e957919 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayload.java @@ -0,0 +1,1203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * The payload of a single user-defined object context.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ObjectCustomContextPayload} + */ +@com.google.protobuf.Generated +public final class ObjectCustomContextPayload extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ObjectCustomContextPayload) + ObjectCustomContextPayloadOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectCustomContextPayload"); + } + + // Use ObjectCustomContextPayload.newBuilder() to construct. + private ObjectCustomContextPayload(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectCustomContextPayload() { + value_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectCustomContextPayload_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectCustomContextPayload.class, + com.google.storage.v2.ObjectCustomContextPayload.Builder.class); + } + + private int bitField0_; + public static final int VALUE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object value_ = ""; + + /** + * + * + *
+   * Required. The value of the object context.
+   * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The value. + */ + @java.lang.Override + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + value_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The value of the object context.
+   * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for value. + */ + @java.lang.Override + public com.google.protobuf.ByteString getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(value_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, value_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getUpdateTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(value_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, value_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdateTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ObjectCustomContextPayload)) { + return super.equals(obj); + } + com.google.storage.v2.ObjectCustomContextPayload other = + (com.google.storage.v2.ObjectCustomContextPayload) obj; + + if (!getValue().equals(other.getValue())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectCustomContextPayload parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ObjectCustomContextPayload prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The payload of a single user-defined object context.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ObjectCustomContextPayload} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ObjectCustomContextPayload) + com.google.storage.v2.ObjectCustomContextPayloadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectCustomContextPayload_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectCustomContextPayload.class, + com.google.storage.v2.ObjectCustomContextPayload.Builder.class); + } + + // Construct using com.google.storage.v2.ObjectCustomContextPayload.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + value_ = ""; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload getDefaultInstanceForType() { + return com.google.storage.v2.ObjectCustomContextPayload.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload build() { + com.google.storage.v2.ObjectCustomContextPayload result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload buildPartial() { + com.google.storage.v2.ObjectCustomContextPayload result = + new com.google.storage.v2.ObjectCustomContextPayload(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ObjectCustomContextPayload result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.value_ = value_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ObjectCustomContextPayload) { + return mergeFrom((com.google.storage.v2.ObjectCustomContextPayload) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ObjectCustomContextPayload other) { + if (other == com.google.storage.v2.ObjectCustomContextPayload.getDefaultInstance()) + return this; + if (!other.getValue().isEmpty()) { + value_ = other.value_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + value_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object value_ = ""; + + /** + * + * + *
+     * Required. The value of the object context.
+     * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The value. + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The value of the object context.
+     * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for value. + */ + public com.google.protobuf.ByteString getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The value of the object context.
+     * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The value to set. + * @return This builder for chaining. + */ + public Builder setValue(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The value of the object context.
+     * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValue() { + value_ = getDefaultInstance().getValue(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The value of the object context.
+     * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for value to set. + * @return This builder for chaining. + */ + public Builder setValueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + value_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The time at which the object context was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
+     * Output only. The time at which the object context was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ObjectCustomContextPayload) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ObjectCustomContextPayload) + private static final com.google.storage.v2.ObjectCustomContextPayload DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ObjectCustomContextPayload(); + } + + public static com.google.storage.v2.ObjectCustomContextPayload getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectCustomContextPayload parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ObjectCustomContextPayload getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayloadOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayloadOrBuilder.java new file mode 100644 index 000000000000..b9e5ab855cfc --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectCustomContextPayloadOrBuilder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectCustomContextPayloadOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ObjectCustomContextPayload) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The value of the object context.
+   * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The value. + */ + java.lang.String getValue(); + + /** + * + * + *
+   * Required. The value of the object context.
+   * 
+ * + * string value = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for value. + */ + com.google.protobuf.ByteString getValueBytes(); + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The time at which the object context was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The time at which the object context was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectOrBuilder.java new file mode 100644 index 000000000000..3b893123e8a7 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectOrBuilder.java @@ -0,0 +1,1305 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Object) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Immutable. The name of this object. Nearly any sequence of unicode
+   * characters is valid. See
+   * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Immutable. The name of this object. Nearly any sequence of unicode
+   * characters is valid. See
+   * [Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Immutable. The name of the bucket containing this object.
+   * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Immutable. The name of the bucket containing this object.
+   * 
+ * + * + * string bucket = 2 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Optional. The `etag` of an object.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object.
+   * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
+   * Optional. The `etag` of an object.
+   * If included in the metadata of an update or delete request message, the
+   * operation is only performed if the etag matches that of the live
+   * object.
+   * 
+ * + * string etag = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
+   * Immutable. The content generation of this object. Used for object
+   * versioning.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the restoreToken field is set. + */ + boolean hasRestoreToken(); + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The restoreToken. + */ + java.lang.String getRestoreToken(); + + /** + * + * + *
+   * Output only. Restore token used to differentiate deleted objects with the
+   * same name and generation. This field is output only, and only set for
+   * deleted objects in HNS buckets.
+   * 
+ * + * optional string restore_token = 35 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for restoreToken. + */ + com.google.protobuf.ByteString getRestoreTokenBytes(); + + /** + * + * + *
+   * Output only. The version of the metadata for this generation of this
+   * object. Used for preconditions and for detecting changes in metadata. A
+   * metageneration number is only meaningful in the context of a particular
+   * generation of a particular object.
+   * 
+ * + * int64 metageneration = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The metageneration. + */ + long getMetageneration(); + + /** + * + * + *
+   * Optional. Storage class of the object.
+   * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The storageClass. + */ + java.lang.String getStorageClass(); + + /** + * + * + *
+   * Optional. Storage class of the object.
+   * 
+ * + * string storage_class = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for storageClass. + */ + com.google.protobuf.ByteString getStorageClassBytes(); + + /** + * + * + *
+   * Output only. Content-Length of the object data in bytes, matching
+   * [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]).
+   * 
+ * + * int64 size = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The size. + */ + long getSize(); + + /** + * + * + *
+   * Optional. Content-Encoding of the object data, matching
+   * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+   * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentEncoding. + */ + java.lang.String getContentEncoding(); + + /** + * + * + *
+   * Optional. Content-Encoding of the object data, matching
+   * [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2)
+   * 
+ * + * string content_encoding = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentEncoding. + */ + com.google.protobuf.ByteString getContentEncodingBytes(); + + /** + * + * + *
+   * Optional. Content-Disposition of the object data, matching
+   * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+   * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentDisposition. + */ + java.lang.String getContentDisposition(); + + /** + * + * + *
+   * Optional. Content-Disposition of the object data, matching
+   * [RFC 6266](https://tools.ietf.org/html/rfc6266).
+   * 
+ * + * string content_disposition = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentDisposition. + */ + com.google.protobuf.ByteString getContentDispositionBytes(); + + /** + * + * + *
+   * Optional. Cache-Control directive for the object data, matching
+   * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+   * If omitted, and the object is accessible to all anonymous users, the
+   * default is `public, max-age=3600`.
+   * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The cacheControl. + */ + java.lang.String getCacheControl(); + + /** + * + * + *
+   * Optional. Cache-Control directive for the object data, matching
+   * [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2).
+   * If omitted, and the object is accessible to all anonymous users, the
+   * default is `public, max-age=3600`.
+   * 
+ * + * string cache_control = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for cacheControl. + */ + com.google.protobuf.ByteString getCacheControlBytes(); + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getAclList(); + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectAccessControl getAcl(int index); + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getAclCount(); + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getAclOrBuilderList(); + + /** + * + * + *
+   * Optional. Access controls on the object.
+   * If `iam_config.uniform_bucket_level_access` is enabled on the parent
+   * bucket, requests to set, read, or modify acl is an error.
+   * 
+ * + * + * repeated .google.storage.v2.ObjectAccessControl acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectAccessControlOrBuilder getAclOrBuilder(int index); + + /** + * + * + *
+   * Optional. Content-Language of the object data, matching
+   * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+   * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentLanguage. + */ + java.lang.String getContentLanguage(); + + /** + * + * + *
+   * Optional. Content-Language of the object data, matching
+   * [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2).
+   * 
+ * + * string content_language = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentLanguage. + */ + com.google.protobuf.ByteString getContentLanguageBytes(); + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the deleteTime field is set. + */ + boolean hasDeleteTime(); + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The deleteTime. + */ + com.google.protobuf.Timestamp getDeleteTime(); + + /** + * + * + *
+   * Output only. If this object is noncurrent, this is the time when the object
+   * became noncurrent.
+   * 
+ * + * .google.protobuf.Timestamp delete_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getDeleteTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the finalizeTime field is set. + */ + boolean hasFinalizeTime(); + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The finalizeTime. + */ + com.google.protobuf.Timestamp getFinalizeTime(); + + /** + * + * + *
+   * Output only. The time when the object was finalized.
+   * 
+ * + * + * .google.protobuf.Timestamp finalize_time = 36 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getFinalizeTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Content-Type of the object data, matching
+   * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+   * If an object is stored without a Content-Type, it is served as
+   * `application/octet-stream`.
+   * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The contentType. + */ + java.lang.String getContentType(); + + /** + * + * + *
+   * Optional. Content-Type of the object data, matching
+   * [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5).
+   * If an object is stored without a Content-Type, it is served as
+   * `application/octet-stream`.
+   * 
+ * + * string content_type = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for contentType. + */ + com.google.protobuf.ByteString getContentTypeBytes(); + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the object.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Number of underlying components that make up this object.
+   * Components are accumulated by compose operations.
+   * 
+ * + * int32 component_count = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The componentCount. + */ + int getComponentCount(); + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the checksums field is set. + */ + boolean hasChecksums(); + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The checksums. + */ + com.google.storage.v2.ObjectChecksums getChecksums(); + + /** + * + * + *
+   * Output only. Hashes for the data part of this object. This field is used
+   * for output only and is silently ignored if provided in requests. The
+   * checksums of the complete object regardless of data range. If the object is
+   * downloaded in full, the client should compute one of these checksums over
+   * the downloaded object and compare it against the value provided here.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums checksums = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getChecksumsOrBuilder(); + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
+   * Output only. The modification time of the object metadata.
+   * Set initially to object creation time and then updated whenever any
+   * metadata of the object changes. This includes changes made by a requester,
+   * such as modifying custom metadata, as well as changes made by Cloud Storage
+   * on behalf of a requester, such as changing the storage class based on an
+   * Object Lifecycle Configuration.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Cloud KMS Key used to encrypt this object, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKey. + */ + java.lang.String getKmsKey(); + + /** + * + * + *
+   * Optional. Cloud KMS Key used to encrypt this object, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * string kms_key = 18 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKey. + */ + com.google.protobuf.ByteString getKmsKeyBytes(); + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateStorageClassTime field is set. + */ + boolean hasUpdateStorageClassTime(); + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateStorageClassTime. + */ + com.google.protobuf.Timestamp getUpdateStorageClassTime(); + + /** + * + * + *
+   * Output only. The time at which the object's storage class was last changed.
+   * When the object is initially created, it is set to `time_created`.
+   * 
+ * + * + * .google.protobuf.Timestamp update_storage_class_time = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateStorageClassTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Whether an object is under temporary hold. While this flag is set
+   * to true, the object is protected against deletion and overwrites.  A common
+   * use case of this flag is regulatory investigations where objects need to be
+   * retained while the investigation is ongoing. Note that unlike event-based
+   * hold, temporary hold does not impact retention expiration time of an
+   * object.
+   * 
+ * + * bool temporary_hold = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The temporaryHold. + */ + boolean getTemporaryHold(); + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionExpireTime field is set. + */ + boolean hasRetentionExpireTime(); + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionExpireTime. + */ + com.google.protobuf.Timestamp getRetentionExpireTime(); + + /** + * + * + *
+   * Optional. A server-determined value that specifies the earliest time that
+   * the object's retention period expires. Note 1: This field is not provided
+   * for objects with an active event-based hold, since retention expiration is
+   * unknown until the hold is removed. Note 2: This value can be provided even
+   * when temporary hold is set (so that the user can reason about policy
+   * without having to first unset the temporary hold).
+   * 
+ * + * + * .google.protobuf.Timestamp retention_expire_time = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getRetentionExpireTimeOrBuilder(); + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getMetadataCount(); + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsMetadata(java.lang.String key); + + /** Use {@link #getMetadataMap()} instead. */ + @java.lang.Deprecated + java.util.Map getMetadata(); + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getMetadataMap(); + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + /* nullable */ + java.lang.String getMetadataOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. User-provided metadata, in key/value pairs.
+   * 
+ * + * map<string, string> metadata = 22 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getMetadataOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the contexts field is set. + */ + boolean hasContexts(); + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The contexts. + */ + com.google.storage.v2.ObjectContexts getContexts(); + + /** + * + * + *
+   * Optional. User-defined or system-defined object contexts. Each object
+   * context is a key-payload pair, where the key provides the identification
+   * and the payload holds the associated value and additional metadata.
+   * 
+ * + * + * .google.storage.v2.ObjectContexts contexts = 38 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectContextsOrBuilder getContextsOrBuilder(); + + /** + * + * + *
+   * Whether an object is under event-based hold.
+   * An event-based hold is a way to force the retention of an object until
+   * after some event occurs. Once the hold is released by explicitly setting
+   * this field to `false`, the object becomes subject to any bucket-level
+   * retention policy, except that the retention duration is calculated
+   * from the time the event based hold was lifted, rather than the time the
+   * object was created.
+   *
+   * In a `WriteObject` request, not setting this field implies that the value
+   * should be taken from the parent bucket's `default_event_based_hold` field.
+   * In a response, this field is always set to `true` or `false`.
+   * 
+ * + * optional bool event_based_hold = 23; + * + * @return Whether the eventBasedHold field is set. + */ + boolean hasEventBasedHold(); + + /** + * + * + *
+   * Whether an object is under event-based hold.
+   * An event-based hold is a way to force the retention of an object until
+   * after some event occurs. Once the hold is released by explicitly setting
+   * this field to `false`, the object becomes subject to any bucket-level
+   * retention policy, except that the retention duration is calculated
+   * from the time the event based hold was lifted, rather than the time the
+   * object was created.
+   *
+   * In a `WriteObject` request, not setting this field implies that the value
+   * should be taken from the parent bucket's `default_event_based_hold` field.
+   * In a response, this field is always set to `true` or `false`.
+   * 
+ * + * optional bool event_based_hold = 23; + * + * @return The eventBasedHold. + */ + boolean getEventBasedHold(); + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return Whether the owner field is set. + */ + boolean hasOwner(); + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The owner. + */ + com.google.storage.v2.Owner getOwner(); + + /** + * + * + *
+   * Output only. The owner of the object. This is always the uploader of the
+   * object.
+   * 
+ * + * .google.storage.v2.Owner owner = 24 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + com.google.storage.v2.OwnerOrBuilder getOwnerOrBuilder(); + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customerEncryption field is set. + */ + boolean hasCustomerEncryption(); + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customerEncryption. + */ + com.google.storage.v2.CustomerEncryption getCustomerEncryption(); + + /** + * + * + *
+   * Optional. Metadata of customer-supplied encryption key, if the object is
+   * encrypted by such a key.
+   * 
+ * + * + * .google.storage.v2.CustomerEncryption customer_encryption = 25 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CustomerEncryptionOrBuilder getCustomerEncryptionOrBuilder(); + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the customTime field is set. + */ + boolean hasCustomTime(); + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The customTime. + */ + com.google.protobuf.Timestamp getCustomTime(); + + /** + * + * + *
+   * Optional. A user-specified timestamp set on an object.
+   * 
+ * + * .google.protobuf.Timestamp custom_time = 26 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getCustomTimeOrBuilder(); + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the softDeleteTime field is set. + */ + boolean hasSoftDeleteTime(); + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The softDeleteTime. + */ + com.google.protobuf.Timestamp getSoftDeleteTime(); + + /** + * + * + *
+   * Output only. This is the time when the object became soft-deleted.
+   *
+   * Soft-deleted objects are only accessible if a soft_delete_policy is
+   * enabled. Also see `hard_delete_time`.
+   * 
+ * + * + * optional .google.protobuf.Timestamp soft_delete_time = 28 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getSoftDeleteTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the hardDeleteTime field is set. + */ + boolean hasHardDeleteTime(); + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The hardDeleteTime. + */ + com.google.protobuf.Timestamp getHardDeleteTime(); + + /** + * + * + *
+   * Output only. The time when the object is permanently deleted.
+   *
+   * Only set when an object becomes soft-deleted with a `soft_delete_policy`.
+   * Otherwise, the object is not accessible.
+   * 
+ * + * + * optional .google.protobuf.Timestamp hard_delete_time = 29 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getHardDeleteTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retention field is set. + */ + boolean hasRetention(); + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retention. + */ + com.google.storage.v2.Object.Retention getRetention(); + + /** + * + * + *
+   * Optional. Retention configuration of this object.
+   * Might only be configured if the bucket has object retention enabled.
+   * 
+ * + * + * .google.storage.v2.Object.Retention retention = 30 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.Object.RetentionOrBuilder getRetentionOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeData.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeData.java new file mode 100644 index 000000000000..4c9dbf251d58 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeData.java @@ -0,0 +1,1119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Contains data and metadata for a range of an object.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ObjectRangeData} + */ +@com.google.protobuf.Generated +public final class ObjectRangeData extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ObjectRangeData) + ObjectRangeDataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ObjectRangeData"); + } + + // Use ObjectRangeData.newBuilder() to construct. + private ObjectRangeData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ObjectRangeData() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectRangeData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectRangeData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectRangeData.class, + com.google.storage.v2.ObjectRangeData.Builder.class); + } + + private int bitField0_; + public static final int CHECKSUMMED_DATA_FIELD_NUMBER = 1; + private com.google.storage.v2.ChecksummedData checksummedData_; + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + + public static final int READ_RANGE_FIELD_NUMBER = 2; + private com.google.storage.v2.ReadRange readRange_; + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return Whether the readRange field is set. + */ + @java.lang.Override + public boolean hasReadRange() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return The readRange. + */ + @java.lang.Override + public com.google.storage.v2.ReadRange getReadRange() { + return readRange_ == null ? com.google.storage.v2.ReadRange.getDefaultInstance() : readRange_; + } + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + @java.lang.Override + public com.google.storage.v2.ReadRangeOrBuilder getReadRangeOrBuilder() { + return readRange_ == null ? com.google.storage.v2.ReadRange.getDefaultInstance() : readRange_; + } + + public static final int RANGE_END_FIELD_NUMBER = 3; + private boolean rangeEnd_ = false; + + /** + * + * + *
+   * If set, indicates there are no more bytes to read for the given ReadRange.
+   * 
+ * + * bool range_end = 3; + * + * @return The rangeEnd. + */ + @java.lang.Override + public boolean getRangeEnd() { + return rangeEnd_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getChecksummedData()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getReadRange()); + } + if (rangeEnd_ != false) { + output.writeBool(3, rangeEnd_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getChecksummedData()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadRange()); + } + if (rangeEnd_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, rangeEnd_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ObjectRangeData)) { + return super.equals(obj); + } + com.google.storage.v2.ObjectRangeData other = (com.google.storage.v2.ObjectRangeData) obj; + + if (hasChecksummedData() != other.hasChecksummedData()) return false; + if (hasChecksummedData()) { + if (!getChecksummedData().equals(other.getChecksummedData())) return false; + } + if (hasReadRange() != other.hasReadRange()) return false; + if (hasReadRange()) { + if (!getReadRange().equals(other.getReadRange())) return false; + } + if (getRangeEnd() != other.getRangeEnd()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasChecksummedData()) { + hash = (37 * hash) + CHECKSUMMED_DATA_FIELD_NUMBER; + hash = (53 * hash) + getChecksummedData().hashCode(); + } + if (hasReadRange()) { + hash = (37 * hash) + READ_RANGE_FIELD_NUMBER; + hash = (53 * hash) + getReadRange().hashCode(); + } + hash = (37 * hash) + RANGE_END_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRangeEnd()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ObjectRangeData parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectRangeData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectRangeData parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ObjectRangeData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ObjectRangeData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains data and metadata for a range of an object.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ObjectRangeData} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ObjectRangeData) + com.google.storage.v2.ObjectRangeDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectRangeData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectRangeData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ObjectRangeData.class, + com.google.storage.v2.ObjectRangeData.Builder.class); + } + + // Construct using com.google.storage.v2.ObjectRangeData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetChecksummedDataFieldBuilder(); + internalGetReadRangeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + checksummedData_ = null; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.dispose(); + checksummedDataBuilder_ = null; + } + readRange_ = null; + if (readRangeBuilder_ != null) { + readRangeBuilder_.dispose(); + readRangeBuilder_ = null; + } + rangeEnd_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ObjectRangeData_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ObjectRangeData getDefaultInstanceForType() { + return com.google.storage.v2.ObjectRangeData.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ObjectRangeData build() { + com.google.storage.v2.ObjectRangeData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ObjectRangeData buildPartial() { + com.google.storage.v2.ObjectRangeData result = + new com.google.storage.v2.ObjectRangeData(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ObjectRangeData result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.checksummedData_ = + checksummedDataBuilder_ == null ? checksummedData_ : checksummedDataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.readRange_ = readRangeBuilder_ == null ? readRange_ : readRangeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.rangeEnd_ = rangeEnd_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ObjectRangeData) { + return mergeFrom((com.google.storage.v2.ObjectRangeData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ObjectRangeData other) { + if (other == com.google.storage.v2.ObjectRangeData.getDefaultInstance()) return this; + if (other.hasChecksummedData()) { + mergeChecksummedData(other.getChecksummedData()); + } + if (other.hasReadRange()) { + mergeReadRange(other.getReadRange()); + } + if (other.getRangeEnd() != false) { + setRangeEnd(other.getRangeEnd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetChecksummedDataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetReadRangeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + rangeEnd_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.ChecksummedData checksummedData_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + checksummedDataBuilder_; + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + public boolean hasChecksummedData() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (checksummedDataBuilder_ == null) { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } else { + return checksummedDataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder setChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + checksummedData_ = value; + } else { + checksummedDataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder setChecksummedData( + com.google.storage.v2.ChecksummedData.Builder builderForValue) { + if (checksummedDataBuilder_ == null) { + checksummedData_ = builderForValue.build(); + } else { + checksummedDataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder mergeChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && checksummedData_ != null + && checksummedData_ != com.google.storage.v2.ChecksummedData.getDefaultInstance()) { + getChecksummedDataBuilder().mergeFrom(value); + } else { + checksummedData_ = value; + } + } else { + checksummedDataBuilder_.mergeFrom(value); + } + if (checksummedData_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder clearChecksummedData() { + bitField0_ = (bitField0_ & ~0x00000001); + checksummedData_ = null; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.dispose(); + checksummedDataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public com.google.storage.v2.ChecksummedData.Builder getChecksummedDataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetChecksummedDataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if (checksummedDataBuilder_ != null) { + return checksummedDataBuilder_.getMessageOrBuilder(); + } else { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + } + + /** + * + * + *
+     * A portion of the data for the object.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + internalGetChecksummedDataFieldBuilder() { + if (checksummedDataBuilder_ == null) { + checksummedDataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder>( + getChecksummedData(), getParentForChildren(), isClean()); + checksummedData_ = null; + } + return checksummedDataBuilder_; + } + + private com.google.storage.v2.ReadRange readRange_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder> + readRangeBuilder_; + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return Whether the readRange field is set. + */ + public boolean hasReadRange() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return The readRange. + */ + public com.google.storage.v2.ReadRange getReadRange() { + if (readRangeBuilder_ == null) { + return readRange_ == null + ? com.google.storage.v2.ReadRange.getDefaultInstance() + : readRange_; + } else { + return readRangeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public Builder setReadRange(com.google.storage.v2.ReadRange value) { + if (readRangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readRange_ = value; + } else { + readRangeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public Builder setReadRange(com.google.storage.v2.ReadRange.Builder builderForValue) { + if (readRangeBuilder_ == null) { + readRange_ = builderForValue.build(); + } else { + readRangeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public Builder mergeReadRange(com.google.storage.v2.ReadRange value) { + if (readRangeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && readRange_ != null + && readRange_ != com.google.storage.v2.ReadRange.getDefaultInstance()) { + getReadRangeBuilder().mergeFrom(value); + } else { + readRange_ = value; + } + } else { + readRangeBuilder_.mergeFrom(value); + } + if (readRange_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public Builder clearReadRange() { + bitField0_ = (bitField0_ & ~0x00000002); + readRange_ = null; + if (readRangeBuilder_ != null) { + readRangeBuilder_.dispose(); + readRangeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public com.google.storage.v2.ReadRange.Builder getReadRangeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetReadRangeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + public com.google.storage.v2.ReadRangeOrBuilder getReadRangeOrBuilder() { + if (readRangeBuilder_ != null) { + return readRangeBuilder_.getMessageOrBuilder(); + } else { + return readRange_ == null + ? com.google.storage.v2.ReadRange.getDefaultInstance() + : readRange_; + } + } + + /** + * + * + *
+     * The `ReadRange` describes the content being returned with `read_id` set to
+     * the corresponding `ReadObjectRequest` in the stream. Multiple
+     * `ObjectRangeData` messages might have the same read_id but increasing
+     * offsets. `ReadObjectResponse` messages with the same `read_id` are
+     * guaranteed to be delivered in increasing offset order.
+     * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder> + internalGetReadRangeFieldBuilder() { + if (readRangeBuilder_ == null) { + readRangeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ReadRange, + com.google.storage.v2.ReadRange.Builder, + com.google.storage.v2.ReadRangeOrBuilder>( + getReadRange(), getParentForChildren(), isClean()); + readRange_ = null; + } + return readRangeBuilder_; + } + + private boolean rangeEnd_; + + /** + * + * + *
+     * If set, indicates there are no more bytes to read for the given ReadRange.
+     * 
+ * + * bool range_end = 3; + * + * @return The rangeEnd. + */ + @java.lang.Override + public boolean getRangeEnd() { + return rangeEnd_; + } + + /** + * + * + *
+     * If set, indicates there are no more bytes to read for the given ReadRange.
+     * 
+ * + * bool range_end = 3; + * + * @param value The rangeEnd to set. + * @return This builder for chaining. + */ + public Builder setRangeEnd(boolean value) { + + rangeEnd_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, indicates there are no more bytes to read for the given ReadRange.
+     * 
+ * + * bool range_end = 3; + * + * @return This builder for chaining. + */ + public Builder clearRangeEnd() { + bitField0_ = (bitField0_ & ~0x00000004); + rangeEnd_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ObjectRangeData) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ObjectRangeData) + private static final com.google.storage.v2.ObjectRangeData DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ObjectRangeData(); + } + + public static com.google.storage.v2.ObjectRangeData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ObjectRangeData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ObjectRangeData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeDataOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeDataOrBuilder.java new file mode 100644 index 000000000000..9afb5105b084 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ObjectRangeDataOrBuilder.java @@ -0,0 +1,127 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ObjectRangeDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ObjectRangeData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + boolean hasChecksummedData(); + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + com.google.storage.v2.ChecksummedData getChecksummedData(); + + /** + * + * + *
+   * A portion of the data for the object.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder(); + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return Whether the readRange field is set. + */ + boolean hasReadRange(); + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + * + * @return The readRange. + */ + com.google.storage.v2.ReadRange getReadRange(); + + /** + * + * + *
+   * The `ReadRange` describes the content being returned with `read_id` set to
+   * the corresponding `ReadObjectRequest` in the stream. Multiple
+   * `ObjectRangeData` messages might have the same read_id but increasing
+   * offsets. `ReadObjectResponse` messages with the same `read_id` are
+   * guaranteed to be delivered in increasing offset order.
+   * 
+ * + * .google.storage.v2.ReadRange read_range = 2; + */ + com.google.storage.v2.ReadRangeOrBuilder getReadRangeOrBuilder(); + + /** + * + * + *
+   * If set, indicates there are no more bytes to read for the given ReadRange.
+   * 
+ * + * bool range_end = 3; + * + * @return The rangeEnd. + */ + boolean getRangeEnd(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Owner.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Owner.java new file mode 100644 index 000000000000..41d1b1cba853 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/Owner.java @@ -0,0 +1,776 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * The owner of a specific resource.
+ * 
+ * + * Protobuf type {@code google.storage.v2.Owner} + */ +@com.google.protobuf.Generated +public final class Owner extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.Owner) + OwnerOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Owner"); + } + + // Use Owner.newBuilder() to construct. + private Owner(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Owner() { + entity_ = ""; + entityId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Owner_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Owner_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Owner.class, com.google.storage.v2.Owner.Builder.class); + } + + public static final int ENTITY_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object entity_ = ""; + + /** + * + * + *
+   * Optional. The entity, in the form `user-`*userId*.
+   * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The entity, in the form `user-`*userId*.
+   * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityId_ = ""; + + /** + * + * + *
+   * Optional. The ID for the entity.
+   * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + @java.lang.Override + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The ID for the entity.
+   * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, entityId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entity_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, entity_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, entityId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.Owner)) { + return super.equals(obj); + } + com.google.storage.v2.Owner other = (com.google.storage.v2.Owner) obj; + + if (!getEntity().equals(other.getEntity())) return false; + if (!getEntityId().equals(other.getEntityId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ENTITY_ID_FIELD_NUMBER; + hash = (53 * hash) + getEntityId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.Owner parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Owner parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Owner parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Owner parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Owner parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.Owner parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.Owner parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Owner parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Owner parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Owner parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.Owner parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.Owner parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.Owner prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The owner of a specific resource.
+   * 
+ * + * Protobuf type {@code google.storage.v2.Owner} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.Owner) + com.google.storage.v2.OwnerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Owner_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_Owner_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.Owner.class, com.google.storage.v2.Owner.Builder.class); + } + + // Construct using com.google.storage.v2.Owner.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + entity_ = ""; + entityId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto.internal_static_google_storage_v2_Owner_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.Owner getDefaultInstanceForType() { + return com.google.storage.v2.Owner.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.Owner build() { + com.google.storage.v2.Owner result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.Owner buildPartial() { + com.google.storage.v2.Owner result = new com.google.storage.v2.Owner(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.Owner result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.entity_ = entity_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.entityId_ = entityId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.Owner) { + return mergeFrom((com.google.storage.v2.Owner) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.Owner other) { + if (other == com.google.storage.v2.Owner.getDefaultInstance()) return this; + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getEntityId().isEmpty()) { + entityId_ = other.entityId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + entity_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + entityId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object entity_ = ""; + + /** + * + * + *
+     * Optional. The entity, in the form `user-`*userId*.
+     * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The entity, in the form `user-`*userId*.
+     * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The entity, in the form `user-`*userId*.
+     * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity, in the form `user-`*userId*.
+     * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + entity_ = getDefaultInstance().getEntity(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The entity, in the form `user-`*userId*.
+     * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entity_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object entityId_ = ""; + + /** + * + * + *
+     * Optional. The ID for the entity.
+     * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + public java.lang.String getEntityId() { + java.lang.Object ref = entityId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity.
+     * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + public com.google.protobuf.ByteString getEntityIdBytes() { + java.lang.Object ref = entityId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The ID for the entity.
+     * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity.
+     * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEntityId() { + entityId_ = getDefaultInstance().getEntityId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The ID for the entity.
+     * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for entityId to set. + * @return This builder for chaining. + */ + public Builder setEntityIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.Owner) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.Owner) + private static final com.google.storage.v2.Owner DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.Owner(); + } + + public static com.google.storage.v2.Owner getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Owner parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.Owner getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/OwnerOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/OwnerOrBuilder.java new file mode 100644 index 000000000000..4c73e7a3dc8c --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/OwnerOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface OwnerOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.Owner) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The entity, in the form `user-`*userId*.
+   * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entity. + */ + java.lang.String getEntity(); + + /** + * + * + *
+   * Optional. The entity, in the form `user-`*userId*.
+   * 
+ * + * string entity = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Optional. The ID for the entity.
+   * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The entityId. + */ + java.lang.String getEntityId(); + + /** + * + * + *
+   * Optional. The ID for the entity.
+   * 
+ * + * string entity_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for entityId. + */ + com.google.protobuf.ByteString getEntityIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectName.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectName.java new file mode 100644 index 000000000000..813ee0879415 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.storage.v2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(java.lang.Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeam.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeam.java new file mode 100644 index 000000000000..c1edcd6418e2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeam.java @@ -0,0 +1,781 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Represents the Viewers, Editors, or Owners of a given project.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ProjectTeam} + */ +@com.google.protobuf.Generated +public final class ProjectTeam extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ProjectTeam) + ProjectTeamOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ProjectTeam"); + } + + // Use ProjectTeam.newBuilder() to construct. + private ProjectTeam(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ProjectTeam() { + projectNumber_ = ""; + team_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ProjectTeam_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ProjectTeam_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ProjectTeam.class, + com.google.storage.v2.ProjectTeam.Builder.class); + } + + public static final int PROJECT_NUMBER_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectNumber_ = ""; + + /** + * + * + *
+   * Optional. The project number.
+   * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The projectNumber. + */ + @java.lang.Override + public java.lang.String getProjectNumber() { + java.lang.Object ref = projectNumber_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectNumber_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The project number.
+   * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for projectNumber. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectNumberBytes() { + java.lang.Object ref = projectNumber_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectNumber_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TEAM_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object team_ = ""; + + /** + * + * + *
+   * Optional. The team.
+   * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The team. + */ + @java.lang.Override + public java.lang.String getTeam() { + java.lang.Object ref = team_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + team_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The team.
+   * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for team. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTeamBytes() { + java.lang.Object ref = team_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + team_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectNumber_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectNumber_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(team_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, team_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectNumber_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectNumber_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(team_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, team_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ProjectTeam)) { + return super.equals(obj); + } + com.google.storage.v2.ProjectTeam other = (com.google.storage.v2.ProjectTeam) obj; + + if (!getProjectNumber().equals(other.getProjectNumber())) return false; + if (!getTeam().equals(other.getTeam())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + getProjectNumber().hashCode(); + hash = (37 * hash) + TEAM_FIELD_NUMBER; + hash = (53 * hash) + getTeam().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ProjectTeam parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ProjectTeam parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ProjectTeam parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ProjectTeam parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ProjectTeam parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ProjectTeam parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ProjectTeam parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ProjectTeam prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Represents the Viewers, Editors, or Owners of a given project.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ProjectTeam} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ProjectTeam) + com.google.storage.v2.ProjectTeamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ProjectTeam_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ProjectTeam_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ProjectTeam.class, + com.google.storage.v2.ProjectTeam.Builder.class); + } + + // Construct using com.google.storage.v2.ProjectTeam.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectNumber_ = ""; + team_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ProjectTeam_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ProjectTeam getDefaultInstanceForType() { + return com.google.storage.v2.ProjectTeam.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ProjectTeam build() { + com.google.storage.v2.ProjectTeam result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ProjectTeam buildPartial() { + com.google.storage.v2.ProjectTeam result = new com.google.storage.v2.ProjectTeam(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ProjectTeam result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectNumber_ = projectNumber_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.team_ = team_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ProjectTeam) { + return mergeFrom((com.google.storage.v2.ProjectTeam) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ProjectTeam other) { + if (other == com.google.storage.v2.ProjectTeam.getDefaultInstance()) return this; + if (!other.getProjectNumber().isEmpty()) { + projectNumber_ = other.projectNumber_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTeam().isEmpty()) { + team_ = other.team_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectNumber_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + team_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectNumber_ = ""; + + /** + * + * + *
+     * Optional. The project number.
+     * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The projectNumber. + */ + public java.lang.String getProjectNumber() { + java.lang.Object ref = projectNumber_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectNumber_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The project number.
+     * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for projectNumber. + */ + public com.google.protobuf.ByteString getProjectNumberBytes() { + java.lang.Object ref = projectNumber_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectNumber_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The project number.
+     * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The projectNumber to set. + * @return This builder for chaining. + */ + public Builder setProjectNumber(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectNumber_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project number.
+     * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearProjectNumber() { + projectNumber_ = getDefaultInstance().getProjectNumber(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The project number.
+     * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for projectNumber to set. + * @return This builder for chaining. + */ + public Builder setProjectNumberBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectNumber_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object team_ = ""; + + /** + * + * + *
+     * Optional. The team.
+     * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The team. + */ + public java.lang.String getTeam() { + java.lang.Object ref = team_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + team_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The team.
+     * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for team. + */ + public com.google.protobuf.ByteString getTeamBytes() { + java.lang.Object ref = team_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + team_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The team.
+     * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The team to set. + * @return This builder for chaining. + */ + public Builder setTeam(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + team_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The team.
+     * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTeam() { + team_ = getDefaultInstance().getTeam(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The team.
+     * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for team to set. + * @return This builder for chaining. + */ + public Builder setTeamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + team_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ProjectTeam) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ProjectTeam) + private static final com.google.storage.v2.ProjectTeam DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ProjectTeam(); + } + + public static com.google.storage.v2.ProjectTeam getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProjectTeam parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ProjectTeam getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeamOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeamOrBuilder.java new file mode 100644 index 000000000000..6bfb30f0b5f1 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ProjectTeamOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ProjectTeamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ProjectTeam) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The project number.
+   * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The projectNumber. + */ + java.lang.String getProjectNumber(); + + /** + * + * + *
+   * Optional. The project number.
+   * 
+ * + * string project_number = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for projectNumber. + */ + com.google.protobuf.ByteString getProjectNumberBytes(); + + /** + * + * + *
+   * Optional. The team.
+   * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The team. + */ + java.lang.String getTeam(); + + /** + * + * + *
+   * Optional. The team.
+   * 
+ * + * string team = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for team. + */ + com.google.protobuf.ByteString getTeamBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequest.java new file mode 100644 index 000000000000..d34115291c68 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequest.java @@ -0,0 +1,946 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request object for
+ * [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus].
+ * 
+ * + * Protobuf type {@code google.storage.v2.QueryWriteStatusRequest} + */ +@com.google.protobuf.Generated +public final class QueryWriteStatusRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.QueryWriteStatusRequest) + QueryWriteStatusRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryWriteStatusRequest"); + } + + // Use QueryWriteStatusRequest.newBuilder() to construct. + private QueryWriteStatusRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryWriteStatusRequest() { + uploadId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.QueryWriteStatusRequest.class, + com.google.storage.v2.QueryWriteStatusRequest.Builder.class); + } + + private int bitField0_; + public static final int UPLOAD_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object uploadId_ = ""; + + /** + * + * + *
+   * Required. The name of the resume token for the object whose write status is
+   * being requested.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + @java.lang.Override + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the resume token for the object whose write status is
+   * being requested.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 2; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, uploadId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getCommonObjectRequestParams()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, uploadId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, getCommonObjectRequestParams()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.QueryWriteStatusRequest)) { + return super.equals(obj); + } + com.google.storage.v2.QueryWriteStatusRequest other = + (com.google.storage.v2.QueryWriteStatusRequest) obj; + + if (!getUploadId().equals(other.getUploadId())) return false; + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + UPLOAD_ID_FIELD_NUMBER; + hash = (53 * hash) + getUploadId().hashCode(); + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.QueryWriteStatusRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request object for
+   * [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus].
+   * 
+ * + * Protobuf type {@code google.storage.v2.QueryWriteStatusRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.QueryWriteStatusRequest) + com.google.storage.v2.QueryWriteStatusRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.QueryWriteStatusRequest.class, + com.google.storage.v2.QueryWriteStatusRequest.Builder.class); + } + + // Construct using com.google.storage.v2.QueryWriteStatusRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + uploadId_ = ""; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusRequest getDefaultInstanceForType() { + return com.google.storage.v2.QueryWriteStatusRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusRequest build() { + com.google.storage.v2.QueryWriteStatusRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusRequest buildPartial() { + com.google.storage.v2.QueryWriteStatusRequest result = + new com.google.storage.v2.QueryWriteStatusRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.QueryWriteStatusRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.uploadId_ = uploadId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.QueryWriteStatusRequest) { + return mergeFrom((com.google.storage.v2.QueryWriteStatusRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.QueryWriteStatusRequest other) { + if (other == com.google.storage.v2.QueryWriteStatusRequest.getDefaultInstance()) return this; + if (!other.getUploadId().isEmpty()) { + uploadId_ = other.uploadId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + uploadId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object uploadId_ = ""; + + /** + * + * + *
+     * Required. The name of the resume token for the object whose write status is
+     * being requested.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the resume token for the object whose write status is
+     * being requested.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the resume token for the object whose write status is
+     * being requested.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the resume token for the object whose write status is
+     * being requested.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearUploadId() { + uploadId_ = getDefaultInstance().getUploadId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the resume token for the object whose write status is
+     * being requested.
+     * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000002); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.QueryWriteStatusRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.QueryWriteStatusRequest) + private static final com.google.storage.v2.QueryWriteStatusRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.QueryWriteStatusRequest(); + } + + public static com.google.storage.v2.QueryWriteStatusRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryWriteStatusRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequestOrBuilder.java new file mode 100644 index 000000000000..b5d9bc22dc32 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusRequestOrBuilder.java @@ -0,0 +1,102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface QueryWriteStatusRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.QueryWriteStatusRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the resume token for the object whose write status is
+   * being requested.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The uploadId. + */ + java.lang.String getUploadId(); + + /** + * + * + *
+   * Required. The name of the resume token for the object whose write status is
+   * being requested.
+   * 
+ * + * string upload_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for uploadId. + */ + com.google.protobuf.ByteString getUploadIdBytes(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponse.java new file mode 100644 index 000000000000..31e9b8a65877 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponse.java @@ -0,0 +1,961 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response object for
+ * [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus].
+ * 
+ * + * Protobuf type {@code google.storage.v2.QueryWriteStatusResponse} + */ +@com.google.protobuf.Generated +public final class QueryWriteStatusResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.QueryWriteStatusResponse) + QueryWriteStatusResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryWriteStatusResponse"); + } + + // Use QueryWriteStatusResponse.newBuilder() to construct. + private QueryWriteStatusResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryWriteStatusResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.QueryWriteStatusResponse.class, + com.google.storage.v2.QueryWriteStatusResponse.Builder.class); + } + + private int writeStatusCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object writeStatus_; + + public enum WriteStatusCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PERSISTED_SIZE(1), + RESOURCE(2), + WRITESTATUS_NOT_SET(0); + private final int value; + + private WriteStatusCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static WriteStatusCase valueOf(int value) { + return forNumber(value); + } + + public static WriteStatusCase forNumber(int value) { + switch (value) { + case 1: + return PERSISTED_SIZE; + case 2: + return RESOURCE; + case 0: + return WRITESTATUS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public static final int PERSISTED_SIZE_FIELD_NUMBER = 1; + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. This is the correct value for the
+   * 'write_offset' field to use when resuming the `WriteObject` operation.
+   * Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + @java.lang.Override + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. This is the correct value for the
+   * 'write_offset' field to use when resuming the `WriteObject` operation.
+   * Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + @java.lang.Override + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + public static final int RESOURCE_FIELD_NUMBER = 2; + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (writeStatusCase_ == 1) { + output.writeInt64(1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + output.writeMessage(2, (com.google.storage.v2.Object) writeStatus_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (writeStatusCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.storage.v2.Object) writeStatus_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.QueryWriteStatusResponse)) { + return super.equals(obj); + } + com.google.storage.v2.QueryWriteStatusResponse other = + (com.google.storage.v2.QueryWriteStatusResponse) obj; + + if (!getWriteStatusCase().equals(other.getWriteStatusCase())) return false; + switch (writeStatusCase_) { + case 1: + if (getPersistedSize() != other.getPersistedSize()) return false; + break; + case 2: + if (!getResource().equals(other.getResource())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (writeStatusCase_) { + case 1: + hash = (37 * hash) + PERSISTED_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPersistedSize()); + break; + case 2: + hash = (37 * hash) + RESOURCE_FIELD_NUMBER; + hash = (53 * hash) + getResource().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.QueryWriteStatusResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.QueryWriteStatusResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response object for
+   * [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus].
+   * 
+ * + * Protobuf type {@code google.storage.v2.QueryWriteStatusResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.QueryWriteStatusResponse) + com.google.storage.v2.QueryWriteStatusResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.QueryWriteStatusResponse.class, + com.google.storage.v2.QueryWriteStatusResponse.Builder.class); + } + + // Construct using com.google.storage.v2.QueryWriteStatusResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (resourceBuilder_ != null) { + resourceBuilder_.clear(); + } + writeStatusCase_ = 0; + writeStatus_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusResponse getDefaultInstanceForType() { + return com.google.storage.v2.QueryWriteStatusResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusResponse build() { + com.google.storage.v2.QueryWriteStatusResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusResponse buildPartial() { + com.google.storage.v2.QueryWriteStatusResponse result = + new com.google.storage.v2.QueryWriteStatusResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.QueryWriteStatusResponse result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.storage.v2.QueryWriteStatusResponse result) { + result.writeStatusCase_ = writeStatusCase_; + result.writeStatus_ = this.writeStatus_; + if (writeStatusCase_ == 2 && resourceBuilder_ != null) { + result.writeStatus_ = resourceBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.QueryWriteStatusResponse) { + return mergeFrom((com.google.storage.v2.QueryWriteStatusResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.QueryWriteStatusResponse other) { + if (other == com.google.storage.v2.QueryWriteStatusResponse.getDefaultInstance()) return this; + switch (other.getWriteStatusCase()) { + case PERSISTED_SIZE: + { + setPersistedSize(other.getPersistedSize()); + break; + } + case RESOURCE: + { + mergeResource(other.getResource()); + break; + } + case WRITESTATUS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + writeStatus_ = input.readInt64(); + writeStatusCase_ = 1; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetResourceFieldBuilder().getBuilder(), extensionRegistry); + writeStatusCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int writeStatusCase_ = 0; + private java.lang.Object writeStatus_; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public Builder clearWriteStatus() { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. This is the correct value for the
+     * 'write_offset' field to use when resuming the `WriteObject` operation.
+     * Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. This is the correct value for the
+     * 'write_offset' field to use when resuming the `WriteObject` operation.
+     * Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. This is the correct value for the
+     * 'write_offset' field to use when resuming the `WriteObject` operation.
+     * Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @param value The persistedSize to set. + * @return This builder for chaining. + */ + public Builder setPersistedSize(long value) { + + writeStatusCase_ = 1; + writeStatus_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. This is the correct value for the
+     * 'write_offset' field to use when resuming the `WriteObject` operation.
+     * Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearPersistedSize() { + if (writeStatusCase_ == 1) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + resourceBuilder_; + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } else { + if (writeStatusCase_ == 2) { + return resourceBuilder_.getMessage(); + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStatus_ = value; + onChanged(); + } else { + resourceBuilder_.setMessage(value); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object.Builder builderForValue) { + if (resourceBuilder_ == null) { + writeStatus_ = builderForValue.build(); + onChanged(); + } else { + resourceBuilder_.setMessage(builderForValue.build()); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder mergeResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2 + && writeStatus_ != com.google.storage.v2.Object.getDefaultInstance()) { + writeStatus_ = + com.google.storage.v2.Object.newBuilder((com.google.storage.v2.Object) writeStatus_) + .mergeFrom(value) + .buildPartial(); + } else { + writeStatus_ = value; + } + onChanged(); + } else { + if (writeStatusCase_ == 2) { + resourceBuilder_.mergeFrom(value); + } else { + resourceBuilder_.setMessage(value); + } + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder clearResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + } else { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + } + resourceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public com.google.storage.v2.Object.Builder getResourceBuilder() { + return internalGetResourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if ((writeStatusCase_ == 2) && (resourceBuilder_ != null)) { + return resourceBuilder_.getMessageOrBuilder(); + } else { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetResourceFieldBuilder() { + if (resourceBuilder_ == null) { + if (!(writeStatusCase_ == 2)) { + writeStatus_ = com.google.storage.v2.Object.getDefaultInstance(); + } + resourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + (com.google.storage.v2.Object) writeStatus_, getParentForChildren(), isClean()); + writeStatus_ = null; + } + writeStatusCase_ = 2; + onChanged(); + return resourceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.QueryWriteStatusResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.QueryWriteStatusResponse) + private static final com.google.storage.v2.QueryWriteStatusResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.QueryWriteStatusResponse(); + } + + public static com.google.storage.v2.QueryWriteStatusResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryWriteStatusResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.QueryWriteStatusResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponseOrBuilder.java new file mode 100644 index 000000000000..fda3b384bf11 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/QueryWriteStatusResponseOrBuilder.java @@ -0,0 +1,102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface QueryWriteStatusResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.QueryWriteStatusResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. This is the correct value for the
+   * 'write_offset' field to use when resuming the `WriteObject` operation.
+   * Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + boolean hasPersistedSize(); + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. This is the correct value for the
+   * 'write_offset' field to use when resuming the `WriteObject` operation.
+   * Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + long getPersistedSize(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + boolean hasResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + com.google.storage.v2.Object getResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder(); + + com.google.storage.v2.QueryWriteStatusResponse.WriteStatusCase getWriteStatusCase(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequest.java new file mode 100644 index 000000000000..0d65ff5cc863 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequest.java @@ -0,0 +1,2397 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [ReadObject][google.storage.v2.Storage.ReadObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ReadObjectRequest} + */ +@com.google.protobuf.Generated +public final class ReadObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ReadObjectRequest) + ReadObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadObjectRequest"); + } + + // Use ReadObjectRequest.newBuilder() to construct. + private ReadObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadObjectRequest() { + bucket_ = ""; + object_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadObjectRequest.class, + com.google.storage.v2.ReadObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int READ_OFFSET_FIELD_NUMBER = 4; + private long readOffset_ = 0L; + + /** + * + * + *
+   * Optional. The offset for the first byte to return in the read, relative to
+   * the start of the object.
+   *
+   * A negative `read_offset` value is interpreted as the number of bytes
+   * back from the end of the object to be returned. For example, if an object's
+   * length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and
+   * `read_limit` = `3` would return bytes `10` through `12` of the object.
+   * Requesting a negative offset with magnitude larger than the size of the
+   * object returns the entire object.
+   * 
+ * + * int64 read_offset = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readOffset. + */ + @java.lang.Override + public long getReadOffset() { + return readOffset_; + } + + public static final int READ_LIMIT_FIELD_NUMBER = 5; + private long readLimit_ = 0L; + + /** + * + * + *
+   * Optional. The maximum number of `data` bytes the server is allowed to
+   * return in the sum of all `Object` messages. A `read_limit` of zero
+   * indicates that there is no limit, and a negative `read_limit` causes an
+   * error.
+   *
+   * If the stream returns fewer bytes than allowed by the `read_limit` and no
+   * error occurred, the stream includes all data from the `read_offset` to the
+   * end of the resource.
+   * 
+ * + * int64 read_limit = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLimit. + */ + @java.lang.Override + public long getReadLimit() { + return readLimit_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 6; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 6; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 6; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 7; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 8; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 9; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 10; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int READ_MASK_FIELD_NUMBER = 12; + private com.google.protobuf.FieldMask readMask_; + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return Whether the readMask field is set. + */ + @java.lang.Override + public boolean hasReadMask() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return The readMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getReadMask() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (readOffset_ != 0L) { + output.writeInt64(4, readOffset_); + } + if (readLimit_ != 0L) { + output.writeInt64(5, readLimit_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(6, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(7, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(8, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(9, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(10, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(12, getReadMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (readOffset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, readOffset_); + } + if (readLimit_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, readLimit_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getReadMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ReadObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.ReadObjectRequest other = (com.google.storage.v2.ReadObjectRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (getReadOffset() != other.getReadOffset()) return false; + if (getReadLimit() != other.getReadLimit()) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasReadMask() != other.hasReadMask()) return false; + if (hasReadMask()) { + if (!getReadMask().equals(other.getReadMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + hash = (37 * hash) + READ_OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadOffset()); + hash = (37 * hash) + READ_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadLimit()); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasReadMask()) { + hash = (37 * hash) + READ_MASK_FIELD_NUMBER; + hash = (53 * hash) + getReadMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ReadObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [ReadObject][google.storage.v2.Storage.ReadObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ReadObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ReadObjectRequest) + com.google.storage.v2.ReadObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadObjectRequest.class, + com.google.storage.v2.ReadObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.ReadObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetReadMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + readOffset_ = 0L; + readLimit_ = 0L; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.ReadObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectRequest build() { + com.google.storage.v2.ReadObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectRequest buildPartial() { + com.google.storage.v2.ReadObjectRequest result = + new com.google.storage.v2.ReadObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ReadObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.readOffset_ = readOffset_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.readLimit_ = readLimit_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.readMask_ = readMaskBuilder_ == null ? readMask_ : readMaskBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ReadObjectRequest) { + return mergeFrom((com.google.storage.v2.ReadObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ReadObjectRequest other) { + if (other == com.google.storage.v2.ReadObjectRequest.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (other.getReadOffset() != 0L) { + setReadOffset(other.getReadOffset()); + } + if (other.getReadLimit() != 0L) { + setReadLimit(other.getReadLimit()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasReadMask()) { + mergeReadMask(other.getReadMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + readOffset_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + readLimit_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 64: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 72: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 82: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 82 + case 98: + { + input.readMessage( + internalGetReadMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 98 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the bucket containing the object to read.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to read.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of this object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private long readOffset_; + + /** + * + * + *
+     * Optional. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative `read_offset` value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and
+     * `read_limit` = `3` would return bytes `10` through `12` of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object returns the entire object.
+     * 
+ * + * int64 read_offset = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readOffset. + */ + @java.lang.Override + public long getReadOffset() { + return readOffset_; + } + + /** + * + * + *
+     * Optional. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative `read_offset` value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and
+     * `read_limit` = `3` would return bytes `10` through `12` of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object returns the entire object.
+     * 
+ * + * int64 read_offset = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The readOffset to set. + * @return This builder for chaining. + */ + public Builder setReadOffset(long value) { + + readOffset_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative `read_offset` value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and
+     * `read_limit` = `3` would return bytes `10` through `12` of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object returns the entire object.
+     * 
+ * + * int64 read_offset = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearReadOffset() { + bitField0_ = (bitField0_ & ~0x00000008); + readOffset_ = 0L; + onChanged(); + return this; + } + + private long readLimit_; + + /** + * + * + *
+     * Optional. The maximum number of `data` bytes the server is allowed to
+     * return in the sum of all `Object` messages. A `read_limit` of zero
+     * indicates that there is no limit, and a negative `read_limit` causes an
+     * error.
+     *
+     * If the stream returns fewer bytes than allowed by the `read_limit` and no
+     * error occurred, the stream includes all data from the `read_offset` to the
+     * end of the resource.
+     * 
+ * + * int64 read_limit = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLimit. + */ + @java.lang.Override + public long getReadLimit() { + return readLimit_; + } + + /** + * + * + *
+     * Optional. The maximum number of `data` bytes the server is allowed to
+     * return in the sum of all `Object` messages. A `read_limit` of zero
+     * indicates that there is no limit, and a negative `read_limit` causes an
+     * error.
+     *
+     * If the stream returns fewer bytes than allowed by the `read_limit` and no
+     * error occurred, the stream includes all data from the `read_offset` to the
+     * end of the resource.
+     * 
+ * + * int64 read_limit = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The readLimit to set. + * @return This builder for chaining. + */ + public Builder setReadLimit(long value) { + + readLimit_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The maximum number of `data` bytes the server is allowed to
+     * return in the sum of all `Object` messages. A `read_limit` of zero
+     * indicates that there is no limit, and a negative `read_limit` causes an
+     * error.
+     *
+     * If the stream returns fewer bytes than allowed by the `read_limit` and no
+     * error occurred, the stream includes all data from the `read_offset` to the
+     * end of the resource.
+     * 
+ * + * int64 read_limit = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearReadLimit() { + bitField0_ = (bitField0_ & ~0x00000010); + readLimit_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 6; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 6; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 6; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000080); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000100); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000200); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.protobuf.FieldMask readMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + readMaskBuilder_; + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return Whether the readMask field is set. + */ + public boolean hasReadMask() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return The readMask. + */ + public com.google.protobuf.FieldMask getReadMask() { + if (readMaskBuilder_ == null) { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } else { + return readMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public Builder setReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readMask_ = value; + } else { + readMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public Builder setReadMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (readMaskBuilder_ == null) { + readMask_ = builderForValue.build(); + } else { + readMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public Builder mergeReadMask(com.google.protobuf.FieldMask value) { + if (readMaskBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && readMask_ != null + && readMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getReadMaskBuilder().mergeFrom(value); + } else { + readMask_ = value; + } + } else { + readMaskBuilder_.mergeFrom(value); + } + if (readMask_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public Builder clearReadMask() { + bitField0_ = (bitField0_ & ~0x00000400); + readMask_ = null; + if (readMaskBuilder_ != null) { + readMaskBuilder_.dispose(); + readMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public com.google.protobuf.FieldMask.Builder getReadMaskBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return internalGetReadMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + public com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder() { + if (readMaskBuilder_ != null) { + return readMaskBuilder_.getMessageOrBuilder(); + } else { + return readMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : readMask_; + } + } + + /** + * + * + *
+     * Mask specifying which fields to read.
+     * The `checksummed_data` field and its children are always present.
+     * If no mask is specified, it defaults to all fields except `metadata.
+     * owner` and `metadata.acl`.
+     * `*` might be used to mean "all fields".
+     * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetReadMaskFieldBuilder() { + if (readMaskBuilder_ == null) { + readMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getReadMask(), getParentForChildren(), isClean()); + readMask_ = null; + } + return readMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ReadObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ReadObjectRequest) + private static final com.google.storage.v2.ReadObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ReadObjectRequest(); + } + + public static com.google.storage.v2.ReadObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequestOrBuilder.java new file mode 100644 index 000000000000..6159080ed944 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectRequestOrBuilder.java @@ -0,0 +1,352 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ReadObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ReadObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. The name of the bucket containing the object to read.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. The name of the object to read.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Optional. If present, selects a specific revision of this object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Optional. The offset for the first byte to return in the read, relative to
+   * the start of the object.
+   *
+   * A negative `read_offset` value is interpreted as the number of bytes
+   * back from the end of the object to be returned. For example, if an object's
+   * length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and
+   * `read_limit` = `3` would return bytes `10` through `12` of the object.
+   * Requesting a negative offset with magnitude larger than the size of the
+   * object returns the entire object.
+   * 
+ * + * int64 read_offset = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readOffset. + */ + long getReadOffset(); + + /** + * + * + *
+   * Optional. The maximum number of `data` bytes the server is allowed to
+   * return in the sum of all `Object` messages. A `read_limit` of zero
+   * indicates that there is no limit, and a negative `read_limit` causes an
+   * error.
+   *
+   * If the stream returns fewer bytes than allowed by the `read_limit` and no
+   * error occurred, the stream includes all data from the `read_offset` to the
+   * end of the resource.
+   * 
+ * + * int64 read_limit = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLimit. + */ + long getReadLimit(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 6; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 6; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 7; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 8; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 9; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return Whether the readMask field is set. + */ + boolean hasReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + * + * @return The readMask. + */ + com.google.protobuf.FieldMask getReadMask(); + + /** + * + * + *
+   * Mask specifying which fields to read.
+   * The `checksummed_data` field and its children are always present.
+   * If no mask is specified, it defaults to all fields except `metadata.
+   * owner` and `metadata.acl`.
+   * `*` might be used to mean "all fields".
+   * 
+ * + * optional .google.protobuf.FieldMask read_mask = 12; + */ + com.google.protobuf.FieldMaskOrBuilder getReadMaskOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponse.java new file mode 100644 index 000000000000..b727c0272049 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponse.java @@ -0,0 +1,1632 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response message for [ReadObject][google.storage.v2.Storage.ReadObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.ReadObjectResponse} + */ +@com.google.protobuf.Generated +public final class ReadObjectResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ReadObjectResponse) + ReadObjectResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadObjectResponse"); + } + + // Use ReadObjectResponse.newBuilder() to construct. + private ReadObjectResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadObjectResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadObjectResponse.class, + com.google.storage.v2.ReadObjectResponse.Builder.class); + } + + private int bitField0_; + public static final int CHECKSUMMED_DATA_FIELD_NUMBER = 1; + private com.google.storage.v2.ChecksummedData checksummedData_; + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 2; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + public static final int CONTENT_RANGE_FIELD_NUMBER = 3; + private com.google.storage.v2.ContentRange contentRange_; + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return Whether the contentRange field is set. + */ + @java.lang.Override + public boolean hasContentRange() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return The contentRange. + */ + @java.lang.Override + public com.google.storage.v2.ContentRange getContentRange() { + return contentRange_ == null + ? com.google.storage.v2.ContentRange.getDefaultInstance() + : contentRange_; + } + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + @java.lang.Override + public com.google.storage.v2.ContentRangeOrBuilder getContentRangeOrBuilder() { + return contentRange_ == null + ? com.google.storage.v2.ContentRange.getDefaultInstance() + : contentRange_; + } + + public static final int METADATA_FIELD_NUMBER = 4; + private com.google.storage.v2.Object metadata_; + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + @java.lang.Override + public boolean hasMetadata() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + @java.lang.Override + public com.google.storage.v2.Object getMetadata() { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder() { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getChecksummedData()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getObjectChecksums()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getContentRange()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getMetadata()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getChecksummedData()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getObjectChecksums()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getContentRange()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getMetadata()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ReadObjectResponse)) { + return super.equals(obj); + } + com.google.storage.v2.ReadObjectResponse other = (com.google.storage.v2.ReadObjectResponse) obj; + + if (hasChecksummedData() != other.hasChecksummedData()) return false; + if (hasChecksummedData()) { + if (!getChecksummedData().equals(other.getChecksummedData())) return false; + } + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (hasContentRange() != other.hasContentRange()) return false; + if (hasContentRange()) { + if (!getContentRange().equals(other.getContentRange())) return false; + } + if (hasMetadata() != other.hasMetadata()) return false; + if (hasMetadata()) { + if (!getMetadata().equals(other.getMetadata())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasChecksummedData()) { + hash = (37 * hash) + CHECKSUMMED_DATA_FIELD_NUMBER; + hash = (53 * hash) + getChecksummedData().hashCode(); + } + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + if (hasContentRange()) { + hash = (37 * hash) + CONTENT_RANGE_FIELD_NUMBER; + hash = (53 * hash) + getContentRange().hashCode(); + } + if (hasMetadata()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadata().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ReadObjectResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for [ReadObject][google.storage.v2.Storage.ReadObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.ReadObjectResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ReadObjectResponse) + com.google.storage.v2.ReadObjectResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadObjectResponse.class, + com.google.storage.v2.ReadObjectResponse.Builder.class); + } + + // Construct using com.google.storage.v2.ReadObjectResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetChecksummedDataFieldBuilder(); + internalGetObjectChecksumsFieldBuilder(); + internalGetContentRangeFieldBuilder(); + internalGetMetadataFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + checksummedData_ = null; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.dispose(); + checksummedDataBuilder_ = null; + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + contentRange_ = null; + if (contentRangeBuilder_ != null) { + contentRangeBuilder_.dispose(); + contentRangeBuilder_ = null; + } + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadObjectResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectResponse getDefaultInstanceForType() { + return com.google.storage.v2.ReadObjectResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectResponse build() { + com.google.storage.v2.ReadObjectResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectResponse buildPartial() { + com.google.storage.v2.ReadObjectResponse result = + new com.google.storage.v2.ReadObjectResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ReadObjectResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.checksummedData_ = + checksummedDataBuilder_ == null ? checksummedData_ : checksummedDataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.contentRange_ = + contentRangeBuilder_ == null ? contentRange_ : contentRangeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ReadObjectResponse) { + return mergeFrom((com.google.storage.v2.ReadObjectResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ReadObjectResponse other) { + if (other == com.google.storage.v2.ReadObjectResponse.getDefaultInstance()) return this; + if (other.hasChecksummedData()) { + mergeChecksummedData(other.getChecksummedData()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + if (other.hasContentRange()) { + mergeContentRange(other.getContentRange()); + } + if (other.hasMetadata()) { + mergeMetadata(other.getMetadata()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetChecksummedDataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetContentRangeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.ChecksummedData checksummedData_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + checksummedDataBuilder_; + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + public boolean hasChecksummedData() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (checksummedDataBuilder_ == null) { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } else { + return checksummedDataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder setChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + checksummedData_ = value; + } else { + checksummedDataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder setChecksummedData( + com.google.storage.v2.ChecksummedData.Builder builderForValue) { + if (checksummedDataBuilder_ == null) { + checksummedData_ = builderForValue.build(); + } else { + checksummedDataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder mergeChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && checksummedData_ != null + && checksummedData_ != com.google.storage.v2.ChecksummedData.getDefaultInstance()) { + getChecksummedDataBuilder().mergeFrom(value); + } else { + checksummedData_ = value; + } + } else { + checksummedDataBuilder_.mergeFrom(value); + } + if (checksummedData_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public Builder clearChecksummedData() { + bitField0_ = (bitField0_ & ~0x00000001); + checksummedData_ = null; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.dispose(); + checksummedDataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public com.google.storage.v2.ChecksummedData.Builder getChecksummedDataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetChecksummedDataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if (checksummedDataBuilder_ != null) { + return checksummedDataBuilder_.getMessageOrBuilder(); + } else { + return checksummedData_ == null + ? com.google.storage.v2.ChecksummedData.getDefaultInstance() + : checksummedData_; + } + } + + /** + * + * + *
+     * A portion of the data for the object. The service might leave `data`
+     * empty for any given `ReadResponse`. This enables the service to inform the
+     * client that the request is still live while it is running an operation to
+     * generate more data.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + internalGetChecksummedDataFieldBuilder() { + if (checksummedDataBuilder_ == null) { + checksummedDataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder>( + getChecksummedData(), getParentForChildren(), isClean()); + checksummedData_ = null; + } + return checksummedDataBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00000002); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * The checksums of the complete object. If the object is downloaded in full,
+     * the client should compute one of these checksums over the downloaded object
+     * and compare it against the value provided here.
+     * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + private com.google.storage.v2.ContentRange contentRange_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ContentRange, + com.google.storage.v2.ContentRange.Builder, + com.google.storage.v2.ContentRangeOrBuilder> + contentRangeBuilder_; + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return Whether the contentRange field is set. + */ + public boolean hasContentRange() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return The contentRange. + */ + public com.google.storage.v2.ContentRange getContentRange() { + if (contentRangeBuilder_ == null) { + return contentRange_ == null + ? com.google.storage.v2.ContentRange.getDefaultInstance() + : contentRange_; + } else { + return contentRangeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public Builder setContentRange(com.google.storage.v2.ContentRange value) { + if (contentRangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + contentRange_ = value; + } else { + contentRangeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public Builder setContentRange(com.google.storage.v2.ContentRange.Builder builderForValue) { + if (contentRangeBuilder_ == null) { + contentRange_ = builderForValue.build(); + } else { + contentRangeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public Builder mergeContentRange(com.google.storage.v2.ContentRange value) { + if (contentRangeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && contentRange_ != null + && contentRange_ != com.google.storage.v2.ContentRange.getDefaultInstance()) { + getContentRangeBuilder().mergeFrom(value); + } else { + contentRange_ = value; + } + } else { + contentRangeBuilder_.mergeFrom(value); + } + if (contentRange_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public Builder clearContentRange() { + bitField0_ = (bitField0_ & ~0x00000004); + contentRange_ = null; + if (contentRangeBuilder_ != null) { + contentRangeBuilder_.dispose(); + contentRangeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public com.google.storage.v2.ContentRange.Builder getContentRangeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetContentRangeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + public com.google.storage.v2.ContentRangeOrBuilder getContentRangeOrBuilder() { + if (contentRangeBuilder_ != null) { + return contentRangeBuilder_.getMessageOrBuilder(); + } else { + return contentRange_ == null + ? com.google.storage.v2.ContentRange.getDefaultInstance() + : contentRange_; + } + } + + /** + * + * + *
+     * If `read_offset` and or `read_limit` is specified on the
+     * `ReadObjectRequest`, `ContentRange` is populated on the first
+     * `ReadObjectResponse` message of the read stream.
+     * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ContentRange, + com.google.storage.v2.ContentRange.Builder, + com.google.storage.v2.ContentRangeOrBuilder> + internalGetContentRangeFieldBuilder() { + if (contentRangeBuilder_ == null) { + contentRangeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ContentRange, + com.google.storage.v2.ContentRange.Builder, + com.google.storage.v2.ContentRangeOrBuilder>( + getContentRange(), getParentForChildren(), isClean()); + contentRange_ = null; + } + return contentRangeBuilder_; + } + + private com.google.storage.v2.Object metadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + metadataBuilder_; + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + public boolean hasMetadata() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + public com.google.storage.v2.Object getMetadata() { + if (metadataBuilder_ == null) { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } else { + return metadataBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder setMetadata(com.google.storage.v2.Object value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metadata_ = value; + } else { + metadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder setMetadata(com.google.storage.v2.Object.Builder builderForValue) { + if (metadataBuilder_ == null) { + metadata_ = builderForValue.build(); + } else { + metadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder mergeMetadata(com.google.storage.v2.Object value) { + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && metadata_ != null + && metadata_ != com.google.storage.v2.Object.getDefaultInstance()) { + getMetadataBuilder().mergeFrom(value); + } else { + metadata_ = value; + } + } else { + metadataBuilder_.mergeFrom(value); + } + if (metadata_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x00000008); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public com.google.storage.v2.Object.Builder getMetadataBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + public com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilder(); + } else { + return metadata_ == null ? com.google.storage.v2.Object.getDefaultInstance() : metadata_; + } + } + + /** + * + * + *
+     * Metadata of the object whose media is being returned.
+     * Only populated in the first response in the stream.
+     * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getMetadata(), getParentForChildren(), isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ReadObjectResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ReadObjectResponse) + private static final com.google.storage.v2.ReadObjectResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ReadObjectResponse(); + } + + public static com.google.storage.v2.ReadObjectResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadObjectResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ReadObjectResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponseOrBuilder.java new file mode 100644 index 000000000000..936ccd5b467f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadObjectResponseOrBuilder.java @@ -0,0 +1,200 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ReadObjectResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ReadObjectResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return Whether the checksummedData field is set. + */ + boolean hasChecksummedData(); + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + * + * @return The checksummedData. + */ + com.google.storage.v2.ChecksummedData getChecksummedData(); + + /** + * + * + *
+   * A portion of the data for the object. The service might leave `data`
+   * empty for any given `ReadResponse`. This enables the service to inform the
+   * client that the request is still live while it is running an operation to
+   * generate more data.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 1; + */ + com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder(); + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * The checksums of the complete object. If the object is downloaded in full,
+   * the client should compute one of these checksums over the downloaded object
+   * and compare it against the value provided here.
+   * 
+ * + * .google.storage.v2.ObjectChecksums object_checksums = 2; + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return Whether the contentRange field is set. + */ + boolean hasContentRange(); + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + * + * @return The contentRange. + */ + com.google.storage.v2.ContentRange getContentRange(); + + /** + * + * + *
+   * If `read_offset` and or `read_limit` is specified on the
+   * `ReadObjectRequest`, `ContentRange` is populated on the first
+   * `ReadObjectResponse` message of the read stream.
+   * 
+ * + * .google.storage.v2.ContentRange content_range = 3; + */ + com.google.storage.v2.ContentRangeOrBuilder getContentRangeOrBuilder(); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return Whether the metadata field is set. + */ + boolean hasMetadata(); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + * + * @return The metadata. + */ + com.google.storage.v2.Object getMetadata(); + + /** + * + * + *
+   * Metadata of the object whose media is being returned.
+   * Only populated in the first response in the stream.
+   * 
+ * + * .google.storage.v2.Object metadata = 4; + */ + com.google.storage.v2.ObjectOrBuilder getMetadataOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRange.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRange.java new file mode 100644 index 000000000000..ba607d4e6bd2 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRange.java @@ -0,0 +1,763 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Describes a range of bytes to read in a `BidiReadObjectRanges` request.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ReadRange} + */ +@com.google.protobuf.Generated +public final class ReadRange extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ReadRange) + ReadRangeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadRange"); + } + + // Use ReadRange.newBuilder() to construct. + private ReadRange(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadRange() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadRange.class, com.google.storage.v2.ReadRange.Builder.class); + } + + public static final int READ_OFFSET_FIELD_NUMBER = 1; + private long readOffset_ = 0L; + + /** + * + * + *
+   * Required. The offset for the first byte to return in the read, relative to
+   * the start of the object.
+   *
+   * A negative read_offset value is interpreted as the number of bytes
+   * back from the end of the object to be returned. For example, if an object's
+   * length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and
+   * `read_length` = 3 would return bytes 10 through 12 of the object.
+   * Requesting a negative offset with magnitude larger than the size of the
+   * object is equivalent to `read_offset` = 0. A `read_offset` larger than the
+   * size of the object results in an `OutOfRange` error.
+   * 
+ * + * int64 read_offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readOffset. + */ + @java.lang.Override + public long getReadOffset() { + return readOffset_; + } + + public static final int READ_LENGTH_FIELD_NUMBER = 2; + private long readLength_ = 0L; + + /** + * + * + *
+   * Optional. The maximum number of data bytes the server is allowed to return
+   * across all response messages with the same `read_id`. A `read_length` of
+   * zero indicates to read until the resource end, and a negative `read_length`
+   * causes an `OutOfRange` error. If the stream returns fewer bytes than
+   * allowed by the `read_length` and no error occurred, the stream includes all
+   * data from the `read_offset` to the resource end.
+   * 
+ * + * int64 read_length = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLength. + */ + @java.lang.Override + public long getReadLength() { + return readLength_; + } + + public static final int READ_ID_FIELD_NUMBER = 3; + private long readId_ = 0L; + + /** + * + * + *
+   * Required. Read identifier provided by the client. When the client issues
+   * more than one outstanding `ReadRange` on the same stream, responses can be
+   * mapped back to their corresponding requests using this value. Clients must
+   * ensure that all outstanding requests have different read_id values. The
+   * server might close the stream with an error if this condition is not met.
+   * 
+ * + * int64 read_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readId. + */ + @java.lang.Override + public long getReadId() { + return readId_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (readOffset_ != 0L) { + output.writeInt64(1, readOffset_); + } + if (readLength_ != 0L) { + output.writeInt64(2, readLength_); + } + if (readId_ != 0L) { + output.writeInt64(3, readId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (readOffset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, readOffset_); + } + if (readLength_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, readLength_); + } + if (readId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, readId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ReadRange)) { + return super.equals(obj); + } + com.google.storage.v2.ReadRange other = (com.google.storage.v2.ReadRange) obj; + + if (getReadOffset() != other.getReadOffset()) return false; + if (getReadLength() != other.getReadLength()) return false; + if (getReadId() != other.getReadId()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadOffset()); + hash = (37 * hash) + READ_LENGTH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadLength()); + hash = (37 * hash) + READ_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadId()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ReadRange parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRange parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRange parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRange parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRange parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRange parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRange parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRange parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadRange parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRange parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadRange parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRange parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ReadRange prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Describes a range of bytes to read in a `BidiReadObjectRanges` request.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ReadRange} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ReadRange) + com.google.storage.v2.ReadRangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadRange.class, com.google.storage.v2.ReadRange.Builder.class); + } + + // Construct using com.google.storage.v2.ReadRange.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readOffset_ = 0L; + readLength_ = 0L; + readId_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRange_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ReadRange getDefaultInstanceForType() { + return com.google.storage.v2.ReadRange.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ReadRange build() { + com.google.storage.v2.ReadRange result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ReadRange buildPartial() { + com.google.storage.v2.ReadRange result = new com.google.storage.v2.ReadRange(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ReadRange result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readOffset_ = readOffset_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.readLength_ = readLength_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.readId_ = readId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ReadRange) { + return mergeFrom((com.google.storage.v2.ReadRange) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ReadRange other) { + if (other == com.google.storage.v2.ReadRange.getDefaultInstance()) return this; + if (other.getReadOffset() != 0L) { + setReadOffset(other.getReadOffset()); + } + if (other.getReadLength() != 0L) { + setReadLength(other.getReadLength()); + } + if (other.getReadId() != 0L) { + setReadId(other.getReadId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + readOffset_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + readLength_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + readId_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long readOffset_; + + /** + * + * + *
+     * Required. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative read_offset value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and
+     * `read_length` = 3 would return bytes 10 through 12 of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object is equivalent to `read_offset` = 0. A `read_offset` larger than the
+     * size of the object results in an `OutOfRange` error.
+     * 
+ * + * int64 read_offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readOffset. + */ + @java.lang.Override + public long getReadOffset() { + return readOffset_; + } + + /** + * + * + *
+     * Required. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative read_offset value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and
+     * `read_length` = 3 would return bytes 10 through 12 of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object is equivalent to `read_offset` = 0. A `read_offset` larger than the
+     * size of the object results in an `OutOfRange` error.
+     * 
+ * + * int64 read_offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The readOffset to set. + * @return This builder for chaining. + */ + public Builder setReadOffset(long value) { + + readOffset_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The offset for the first byte to return in the read, relative to
+     * the start of the object.
+     *
+     * A negative read_offset value is interpreted as the number of bytes
+     * back from the end of the object to be returned. For example, if an object's
+     * length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and
+     * `read_length` = 3 would return bytes 10 through 12 of the object.
+     * Requesting a negative offset with magnitude larger than the size of the
+     * object is equivalent to `read_offset` = 0. A `read_offset` larger than the
+     * size of the object results in an `OutOfRange` error.
+     * 
+ * + * int64 read_offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearReadOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + readOffset_ = 0L; + onChanged(); + return this; + } + + private long readLength_; + + /** + * + * + *
+     * Optional. The maximum number of data bytes the server is allowed to return
+     * across all response messages with the same `read_id`. A `read_length` of
+     * zero indicates to read until the resource end, and a negative `read_length`
+     * causes an `OutOfRange` error. If the stream returns fewer bytes than
+     * allowed by the `read_length` and no error occurred, the stream includes all
+     * data from the `read_offset` to the resource end.
+     * 
+ * + * int64 read_length = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLength. + */ + @java.lang.Override + public long getReadLength() { + return readLength_; + } + + /** + * + * + *
+     * Optional. The maximum number of data bytes the server is allowed to return
+     * across all response messages with the same `read_id`. A `read_length` of
+     * zero indicates to read until the resource end, and a negative `read_length`
+     * causes an `OutOfRange` error. If the stream returns fewer bytes than
+     * allowed by the `read_length` and no error occurred, the stream includes all
+     * data from the `read_offset` to the resource end.
+     * 
+ * + * int64 read_length = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The readLength to set. + * @return This builder for chaining. + */ + public Builder setReadLength(long value) { + + readLength_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The maximum number of data bytes the server is allowed to return
+     * across all response messages with the same `read_id`. A `read_length` of
+     * zero indicates to read until the resource end, and a negative `read_length`
+     * causes an `OutOfRange` error. If the stream returns fewer bytes than
+     * allowed by the `read_length` and no error occurred, the stream includes all
+     * data from the `read_offset` to the resource end.
+     * 
+ * + * int64 read_length = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearReadLength() { + bitField0_ = (bitField0_ & ~0x00000002); + readLength_ = 0L; + onChanged(); + return this; + } + + private long readId_; + + /** + * + * + *
+     * Required. Read identifier provided by the client. When the client issues
+     * more than one outstanding `ReadRange` on the same stream, responses can be
+     * mapped back to their corresponding requests using this value. Clients must
+     * ensure that all outstanding requests have different read_id values. The
+     * server might close the stream with an error if this condition is not met.
+     * 
+ * + * int64 read_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readId. + */ + @java.lang.Override + public long getReadId() { + return readId_; + } + + /** + * + * + *
+     * Required. Read identifier provided by the client. When the client issues
+     * more than one outstanding `ReadRange` on the same stream, responses can be
+     * mapped back to their corresponding requests using this value. Clients must
+     * ensure that all outstanding requests have different read_id values. The
+     * server might close the stream with an error if this condition is not met.
+     * 
+ * + * int64 read_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The readId to set. + * @return This builder for chaining. + */ + public Builder setReadId(long value) { + + readId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Read identifier provided by the client. When the client issues
+     * more than one outstanding `ReadRange` on the same stream, responses can be
+     * mapped back to their corresponding requests using this value. Clients must
+     * ensure that all outstanding requests have different read_id values. The
+     * server might close the stream with an error if this condition is not met.
+     * 
+ * + * int64 read_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearReadId() { + bitField0_ = (bitField0_ & ~0x00000004); + readId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ReadRange) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ReadRange) + private static final com.google.storage.v2.ReadRange DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ReadRange(); + } + + public static com.google.storage.v2.ReadRange getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ReadRange getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeError.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeError.java new file mode 100644 index 000000000000..0fb8c191be70 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeError.java @@ -0,0 +1,777 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Error extension proto containing details for a single range read
+ * 
+ * + * Protobuf type {@code google.storage.v2.ReadRangeError} + */ +@com.google.protobuf.Generated +public final class ReadRangeError extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ReadRangeError) + ReadRangeErrorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadRangeError"); + } + + // Use ReadRangeError.newBuilder() to construct. + private ReadRangeError(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadRangeError() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRangeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRangeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadRangeError.class, + com.google.storage.v2.ReadRangeError.Builder.class); + } + + private int bitField0_; + public static final int READ_ID_FIELD_NUMBER = 1; + private long readId_ = 0L; + + /** + * + * + *
+   * The id of the corresponding read_range
+   * 
+ * + * int64 read_id = 1; + * + * @return The readId. + */ + @java.lang.Override + public long getReadId() { + return readId_; + } + + public static final int STATUS_FIELD_NUMBER = 2; + private com.google.rpc.Status status_; + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (readId_ != 0L) { + output.writeInt64(1, readId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStatus()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (readId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, readId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStatus()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ReadRangeError)) { + return super.equals(obj); + } + com.google.storage.v2.ReadRangeError other = (com.google.storage.v2.ReadRangeError) obj; + + if (getReadId() != other.getReadId()) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getReadId()); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ReadRangeError parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRangeError parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRangeError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ReadRangeError parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadRangeError parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRangeError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ReadRangeError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ReadRangeError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Error extension proto containing details for a single range read
+   * 
+ * + * Protobuf type {@code google.storage.v2.ReadRangeError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ReadRangeError) + com.google.storage.v2.ReadRangeErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRangeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRangeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ReadRangeError.class, + com.google.storage.v2.ReadRangeError.Builder.class); + } + + // Construct using com.google.storage.v2.ReadRangeError.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStatusFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readId_ = 0L; + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ReadRangeError_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ReadRangeError getDefaultInstanceForType() { + return com.google.storage.v2.ReadRangeError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ReadRangeError build() { + com.google.storage.v2.ReadRangeError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ReadRangeError buildPartial() { + com.google.storage.v2.ReadRangeError result = new com.google.storage.v2.ReadRangeError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.ReadRangeError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readId_ = readId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ReadRangeError) { + return mergeFrom((com.google.storage.v2.ReadRangeError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ReadRangeError other) { + if (other == com.google.storage.v2.ReadRangeError.getDefaultInstance()) return this; + if (other.getReadId() != 0L) { + setReadId(other.getReadId()); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + readId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage(internalGetStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long readId_; + + /** + * + * + *
+     * The id of the corresponding read_range
+     * 
+ * + * int64 read_id = 1; + * + * @return The readId. + */ + @java.lang.Override + public long getReadId() { + return readId_; + } + + /** + * + * + *
+     * The id of the corresponding read_range
+     * 
+ * + * int64 read_id = 1; + * + * @param value The readId to set. + * @return This builder for chaining. + */ + public Builder setReadId(long value) { + + readId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The id of the corresponding read_range
+     * 
+ * + * int64 read_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearReadId() { + bitField0_ = (bitField0_ & ~0x00000001); + readId_ = 0L; + onChanged(); + return this; + } + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000002); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
+     * The status which should be an enum value of [google.rpc.Code].
+     * 
+ * + * .google.rpc.Status status = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ReadRangeError) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ReadRangeError) + private static final com.google.storage.v2.ReadRangeError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ReadRangeError(); + } + + public static com.google.storage.v2.ReadRangeError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRangeError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ReadRangeError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeErrorOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeErrorOrBuilder.java new file mode 100644 index 000000000000..c986993c26ea --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeErrorOrBuilder.java @@ -0,0 +1,78 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ReadRangeErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ReadRangeError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The id of the corresponding read_range
+   * 
+ * + * int64 read_id = 1; + * + * @return The readId. + */ + long getReadId(); + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
+   * The status which should be an enum value of [google.rpc.Code].
+   * 
+ * + * .google.rpc.Status status = 2; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeOrBuilder.java new file mode 100644 index 000000000000..d7c2c416ecd7 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ReadRangeOrBuilder.java @@ -0,0 +1,85 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ReadRangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ReadRange) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The offset for the first byte to return in the read, relative to
+   * the start of the object.
+   *
+   * A negative read_offset value is interpreted as the number of bytes
+   * back from the end of the object to be returned. For example, if an object's
+   * length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and
+   * `read_length` = 3 would return bytes 10 through 12 of the object.
+   * Requesting a negative offset with magnitude larger than the size of the
+   * object is equivalent to `read_offset` = 0. A `read_offset` larger than the
+   * size of the object results in an `OutOfRange` error.
+   * 
+ * + * int64 read_offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readOffset. + */ + long getReadOffset(); + + /** + * + * + *
+   * Optional. The maximum number of data bytes the server is allowed to return
+   * across all response messages with the same `read_id`. A `read_length` of
+   * zero indicates to read until the resource end, and a negative `read_length`
+   * causes an `OutOfRange` error. If the stream returns fewer bytes than
+   * allowed by the `read_length` and no error occurred, the stream includes all
+   * data from the `read_offset` to the resource end.
+   * 
+ * + * int64 read_length = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The readLength. + */ + long getReadLength(); + + /** + * + * + *
+   * Required. Read identifier provided by the client. When the client issues
+   * more than one outstanding `ReadRange` on the same stream, responses can be
+   * mapped back to their corresponding requests using this value. Clients must
+   * ensure that all outstanding requests have different read_id values. The
+   * server might close the stream with an error if this condition is not met.
+   * 
+ * + * int64 read_id = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readId. + */ + long getReadId(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequest.java new file mode 100644 index 000000000000..be039802ba63 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequest.java @@ -0,0 +1,2187 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [RestoreObject][google.storage.v2.Storage.RestoreObject].
+ * `bucket`, `object`, and `generation` **must** be set.
+ * 
+ * + * Protobuf type {@code google.storage.v2.RestoreObjectRequest} + */ +@com.google.protobuf.Generated +public final class RestoreObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.RestoreObjectRequest) + RestoreObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreObjectRequest"); + } + + // Use RestoreObjectRequest.newBuilder() to construct. + private RestoreObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreObjectRequest() { + bucket_ = ""; + object_ = ""; + restoreToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RestoreObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RestoreObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RestoreObjectRequest.class, + com.google.storage.v2.RestoreObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object bucket_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + @java.lang.Override + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object object_ = ""; + + /** + * + * + *
+   * Required. The name of the object to restore.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the object to restore.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + @java.lang.Override + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENERATION_FIELD_NUMBER = 3; + private long generation_ = 0L; + + /** + * + * + *
+   * Required. The specific revision of the object to restore.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + public static final int RESTORE_TOKEN_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private volatile java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets. This parameter is optional, and is only required in the rare case
+   * when there are multiple soft-deleted objects with the same name and
+   * generation.
+   * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + @java.lang.Override + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets. This parameter is optional, and is only required in the rare case
+   * when there are multiple soft-deleted objects with the same name and
+   * generation.
+   * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 4; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 6; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 7; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int COPY_SOURCE_ACL_FIELD_NUMBER = 9; + private boolean copySourceAcl_ = false; + + /** + * + * + *
+   * If false or unset, the bucket's default object ACL is used.
+   * If true, copy the source object's access controls.
+   * Return an error if bucket has UBLA enabled.
+   * 
+ * + * optional bool copy_source_acl = 9; + * + * @return Whether the copySourceAcl field is set. + */ + @java.lang.Override + public boolean hasCopySourceAcl() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * If false or unset, the bucket's default object ACL is used.
+   * If true, copy the source object's access controls.
+   * Return an error if bucket has UBLA enabled.
+   * 
+ * + * optional bool copy_source_acl = 9; + * + * @return The copySourceAcl. + */ + @java.lang.Override + public boolean getCopySourceAcl() { + return copySourceAcl_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 8; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, object_); + } + if (generation_ != 0L) { + output.writeInt64(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeBool(9, copySourceAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(restoreToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, restoreToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(bucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, bucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(object_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, object_); + } + if (generation_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, generation_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, copySourceAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(restoreToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(11, restoreToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.RestoreObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.RestoreObjectRequest other = + (com.google.storage.v2.RestoreObjectRequest) obj; + + if (!getBucket().equals(other.getBucket())) return false; + if (!getObject().equals(other.getObject())) return false; + if (getGeneration() != other.getGeneration()) return false; + if (!getRestoreToken().equals(other.getRestoreToken())) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasCopySourceAcl() != other.hasCopySourceAcl()) return false; + if (hasCopySourceAcl()) { + if (getCopySourceAcl() != other.getCopySourceAcl()) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGeneration()); + hash = (37 * hash) + RESTORE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRestoreToken().hashCode(); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasCopySourceAcl()) { + hash = (37 * hash) + COPY_SOURCE_ACL_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getCopySourceAcl()); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RestoreObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RestoreObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RestoreObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.RestoreObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [RestoreObject][google.storage.v2.Storage.RestoreObject].
+   * `bucket`, `object`, and `generation` **must** be set.
+   * 
+ * + * Protobuf type {@code google.storage.v2.RestoreObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.RestoreObjectRequest) + com.google.storage.v2.RestoreObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RestoreObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RestoreObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RestoreObjectRequest.class, + com.google.storage.v2.RestoreObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.RestoreObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = ""; + object_ = ""; + generation_ = 0L; + restoreToken_ = ""; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + copySourceAcl_ = false; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RestoreObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.RestoreObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.RestoreObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.RestoreObjectRequest build() { + com.google.storage.v2.RestoreObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.RestoreObjectRequest buildPartial() { + com.google.storage.v2.RestoreObjectRequest result = + new com.google.storage.v2.RestoreObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.RestoreObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucket_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.object_ = object_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.generation_ = generation_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.restoreToken_ = restoreToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.copySourceAcl_ = copySourceAcl_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.RestoreObjectRequest) { + return mergeFrom((com.google.storage.v2.RestoreObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.RestoreObjectRequest other) { + if (other == com.google.storage.v2.RestoreObjectRequest.getDefaultInstance()) return this; + if (!other.getBucket().isEmpty()) { + bucket_ = other.bucket_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getObject().isEmpty()) { + object_ = other.object_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getGeneration() != 0L) { + setGeneration(other.getGeneration()); + } + if (!other.getRestoreToken().isEmpty()) { + restoreToken_ = other.restoreToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasCopySourceAcl()) { + setCopySourceAcl(other.getCopySourceAcl()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + bucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + object_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + generation_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 32 + case 40: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 40 + case 48: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 48 + case 56: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 66 + case 72: + { + copySourceAcl_ = input.readBool(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 90: + { + restoreToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 90 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object bucket_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + public java.lang.String getBucket() { + java.lang.Object ref = bucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + public com.google.protobuf.ByteString getBucketBytes() { + java.lang.Object ref = bucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + bucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bucket to set. + * @return This builder for chaining. + */ + public Builder setBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBucket() { + bucket_ = getDefaultInstance().getBucket(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which the object resides.
+     * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for bucket to set. + * @return This builder for chaining. + */ + public Builder setBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + bucket_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object object_ = ""; + + /** + * + * + *
+     * Required. The name of the object to restore.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public java.lang.String getObject() { + java.lang.Object ref = object_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + object_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to restore.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + public com.google.protobuf.ByteString getObjectBytes() { + java.lang.Object ref = object_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + object_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the object to restore.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The object to set. + * @return This builder for chaining. + */ + public Builder setObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to restore.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearObject() { + object_ = getDefaultInstance().getObject(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the object to restore.
+     * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for object to set. + * @return This builder for chaining. + */ + public Builder setObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + object_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long generation_; + + /** + * + * + *
+     * Required. The specific revision of the object to restore.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + @java.lang.Override + public long getGeneration() { + return generation_; + } + + /** + * + * + *
+     * Required. The specific revision of the object to restore.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(long value) { + + generation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The specific revision of the object to restore.
+     * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + generation_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object restoreToken_ = ""; + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets. This parameter is optional, and is only required in the rare case
+     * when there are multiple soft-deleted objects with the same name and
+     * generation.
+     * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + public java.lang.String getRestoreToken() { + java.lang.Object ref = restoreToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + restoreToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets. This parameter is optional, and is only required in the rare case
+     * when there are multiple soft-deleted objects with the same name and
+     * generation.
+     * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + public com.google.protobuf.ByteString getRestoreTokenBytes() { + java.lang.Object ref = restoreToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + restoreToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets. This parameter is optional, and is only required in the rare case
+     * when there are multiple soft-deleted objects with the same name and
+     * generation.
+     * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + restoreToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets. This parameter is optional, and is only required in the rare case
+     * when there are multiple soft-deleted objects with the same name and
+     * generation.
+     * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRestoreToken() { + restoreToken_ = getDefaultInstance().getRestoreToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Restore token used to differentiate soft-deleted objects with the
+     * same name and generation. Only applicable for hierarchical namespace
+     * buckets. This parameter is optional, and is only required in the rare case
+     * when there are multiple soft-deleted objects with the same name and
+     * generation.
+     * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for restoreToken to set. + * @return This builder for chaining. + */ + public Builder setRestoreTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + restoreToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000040); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000080); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private boolean copySourceAcl_; + + /** + * + * + *
+     * If false or unset, the bucket's default object ACL is used.
+     * If true, copy the source object's access controls.
+     * Return an error if bucket has UBLA enabled.
+     * 
+ * + * optional bool copy_source_acl = 9; + * + * @return Whether the copySourceAcl field is set. + */ + @java.lang.Override + public boolean hasCopySourceAcl() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+     * If false or unset, the bucket's default object ACL is used.
+     * If true, copy the source object's access controls.
+     * Return an error if bucket has UBLA enabled.
+     * 
+ * + * optional bool copy_source_acl = 9; + * + * @return The copySourceAcl. + */ + @java.lang.Override + public boolean getCopySourceAcl() { + return copySourceAcl_; + } + + /** + * + * + *
+     * If false or unset, the bucket's default object ACL is used.
+     * If true, copy the source object's access controls.
+     * Return an error if bucket has UBLA enabled.
+     * 
+ * + * optional bool copy_source_acl = 9; + * + * @param value The copySourceAcl to set. + * @return This builder for chaining. + */ + public Builder setCopySourceAcl(boolean value) { + + copySourceAcl_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * If false or unset, the bucket's default object ACL is used.
+     * If true, copy the source object's access controls.
+     * Return an error if bucket has UBLA enabled.
+     * 
+ * + * optional bool copy_source_acl = 9; + * + * @return This builder for chaining. + */ + public Builder clearCopySourceAcl() { + bitField0_ = (bitField0_ & ~0x00000100); + copySourceAcl_ = false; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000200); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.RestoreObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.RestoreObjectRequest) + private static final com.google.storage.v2.RestoreObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.RestoreObjectRequest(); + } + + public static com.google.storage.v2.RestoreObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.RestoreObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequestOrBuilder.java new file mode 100644 index 000000000000..8fe83fcbd77e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RestoreObjectRequestOrBuilder.java @@ -0,0 +1,325 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface RestoreObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.RestoreObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bucket. + */ + java.lang.String getBucket(); + + /** + * + * + *
+   * Required. Name of the bucket in which the object resides.
+   * 
+ * + * + * string bucket = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for bucket. + */ + com.google.protobuf.ByteString getBucketBytes(); + + /** + * + * + *
+   * Required. The name of the object to restore.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + java.lang.String getObject(); + + /** + * + * + *
+   * Required. The name of the object to restore.
+   * 
+ * + * string object = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for object. + */ + com.google.protobuf.ByteString getObjectBytes(); + + /** + * + * + *
+   * Required. The specific revision of the object to restore.
+   * 
+ * + * int64 generation = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The generation. + */ + long getGeneration(); + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets. This parameter is optional, and is only required in the rare case
+   * when there are multiple soft-deleted objects with the same name and
+   * generation.
+   * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The restoreToken. + */ + java.lang.String getRestoreToken(); + + /** + * + * + *
+   * Optional. Restore token used to differentiate soft-deleted objects with the
+   * same name and generation. Only applicable for hierarchical namespace
+   * buckets. This parameter is optional, and is only required in the rare case
+   * when there are multiple soft-deleted objects with the same name and
+   * generation.
+   * 
+ * + * string restore_token = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for restoreToken. + */ + com.google.protobuf.ByteString getRestoreTokenBytes(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 4; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 5; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 6; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 7; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * If false or unset, the bucket's default object ACL is used.
+   * If true, copy the source object's access controls.
+   * Return an error if bucket has UBLA enabled.
+   * 
+ * + * optional bool copy_source_acl = 9; + * + * @return Whether the copySourceAcl field is set. + */ + boolean hasCopySourceAcl(); + + /** + * + * + *
+   * If false or unset, the bucket's default object ACL is used.
+   * If true, copy the source object's access controls.
+   * Return an error if bucket has UBLA enabled.
+   * 
+ * + * optional bool copy_source_acl = 9; + * + * @return The copySourceAcl. + */ + boolean getCopySourceAcl(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequest.java new file mode 100644 index 000000000000..7c20c02cb1f1 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequest.java @@ -0,0 +1,4770 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [RewriteObject][google.storage.v2.Storage.RewriteObject].
+ * If the source object is encrypted using a Customer-Supplied Encryption Key
+ * the key information must be provided in the
+ * `copy_source_encryption_algorithm`, `copy_source_encryption_key_bytes`, and
+ * `copy_source_encryption_key_sha256_bytes` fields. If the destination object
+ * should be encrypted the keying information should be provided in the
+ * `encryption_algorithm`, `encryption_key_bytes`, and
+ * `encryption_key_sha256_bytes` fields of the
+ * `common_object_request_params.customer_encryption` field.
+ * 
+ * + * Protobuf type {@code google.storage.v2.RewriteObjectRequest} + */ +@com.google.protobuf.Generated +public final class RewriteObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.RewriteObjectRequest) + RewriteObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RewriteObjectRequest"); + } + + // Use RewriteObjectRequest.newBuilder() to construct. + private RewriteObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RewriteObjectRequest() { + destinationName_ = ""; + destinationBucket_ = ""; + destinationKmsKey_ = ""; + sourceBucket_ = ""; + sourceObject_ = ""; + rewriteToken_ = ""; + destinationPredefinedAcl_ = ""; + copySourceEncryptionAlgorithm_ = ""; + copySourceEncryptionKeyBytes_ = com.google.protobuf.ByteString.EMPTY; + copySourceEncryptionKeySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RewriteObjectRequest.class, + com.google.storage.v2.RewriteObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int DESTINATION_NAME_FIELD_NUMBER = 24; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationName_ = ""; + + /** + * + * + *
+   * Required. Immutable. The name of the destination object.
+   * See the
+   * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The destinationName. + */ + @java.lang.Override + public java.lang.String getDestinationName() { + java.lang.Object ref = destinationName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationName_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Immutable. The name of the destination object.
+   * See the
+   * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for destinationName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationNameBytes() { + java.lang.Object ref = destinationName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_BUCKET_FIELD_NUMBER = 25; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationBucket_ = ""; + + /** + * + * + *
+   * Required. Immutable. The name of the bucket containing the destination
+   * object.
+   * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationBucket. + */ + @java.lang.Override + public java.lang.String getDestinationBucket() { + java.lang.Object ref = destinationBucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationBucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Immutable. The name of the bucket containing the destination
+   * object.
+   * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationBucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationBucketBytes() { + java.lang.Object ref = destinationBucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_KMS_KEY_FIELD_NUMBER = 27; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationKmsKey_ = ""; + + /** + * + * + *
+   * Optional. The name of the Cloud KMS key that is used to encrypt the
+   * destination object. The Cloud KMS key must be located in same location as
+   * the object. If the parameter is not specified, the request uses the
+   * destination bucket's default encryption key, if any, or else the
+   * Google-managed encryption key.
+   * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationKmsKey. + */ + @java.lang.Override + public java.lang.String getDestinationKmsKey() { + java.lang.Object ref = destinationKmsKey_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationKmsKey_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The name of the Cloud KMS key that is used to encrypt the
+   * destination object. The Cloud KMS key must be located in same location as
+   * the object. If the parameter is not specified, the request uses the
+   * destination bucket's default encryption key, if any, or else the
+   * Google-managed encryption key.
+   * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationKmsKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationKmsKeyBytes() { + java.lang.Object ref = destinationKmsKey_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationKmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_FIELD_NUMBER = 1; + private com.google.storage.v2.Object destination_; + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the destination field is set. + */ + @java.lang.Override + public boolean hasDestination() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The destination. + */ + @java.lang.Override + public com.google.storage.v2.Object getDestination() { + return destination_ == null ? com.google.storage.v2.Object.getDefaultInstance() : destination_; + } + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder() { + return destination_ == null ? com.google.storage.v2.Object.getDefaultInstance() : destination_; + } + + public static final int SOURCE_BUCKET_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceBucket_ = ""; + + /** + * + * + *
+   * Required. Name of the bucket in which to find the source object.
+   * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBucket. + */ + @java.lang.Override + public java.lang.String getSourceBucket() { + java.lang.Object ref = sourceBucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBucket_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the bucket in which to find the source object.
+   * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBucket. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceBucketBytes() { + java.lang.Object ref = sourceBucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_OBJECT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceObject_ = ""; + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + @java.lang.Override + public java.lang.String getSourceObject() { + java.lang.Object ref = sourceObject_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceObject_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceObjectBytes() { + java.lang.Object ref = sourceObject_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_GENERATION_FIELD_NUMBER = 4; + private long sourceGeneration_ = 0L; + + /** + * + * + *
+   * Optional. If present, selects a specific revision of the source object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 source_generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The sourceGeneration. + */ + @java.lang.Override + public long getSourceGeneration() { + return sourceGeneration_; + } + + public static final int REWRITE_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object rewriteToken_ = ""; + + /** + * + * + *
+   * Optional. Include this field (from the previous rewrite response) on each
+   * rewrite request after the first one, until the rewrite response 'done' flag
+   * is true. Calls that provide a rewriteToken can omit all other request
+   * fields, but if included those fields must match the values provided in the
+   * first rewrite request.
+   * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rewriteToken. + */ + @java.lang.Override + public java.lang.String getRewriteToken() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rewriteToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Include this field (from the previous rewrite response) on each
+   * rewrite request after the first one, until the rewrite response 'done' flag
+   * is true. Calls that provide a rewriteToken can omit all other request
+   * fields, but if included those fields must match the values provided in the
+   * first rewrite request.
+   * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rewriteToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRewriteTokenBytes() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rewriteToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESTINATION_PREDEFINED_ACL_FIELD_NUMBER = 28; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationPredefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The destinationPredefinedAcl. + */ + @java.lang.Override + public java.lang.String getDestinationPredefinedAcl() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPredefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for destinationPredefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationPredefinedAclBytes() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPredefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 7; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 7; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 7; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 8; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 9; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 10; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int IF_SOURCE_GENERATION_MATCH_FIELD_NUMBER = 11; + private long ifSourceGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation matches the given value.
+   * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation matches the given value.
+   * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return The ifSourceGenerationMatch. + */ + @java.lang.Override + public long getIfSourceGenerationMatch() { + return ifSourceGenerationMatch_; + } + + public static final int IF_SOURCE_GENERATION_NOT_MATCH_FIELD_NUMBER = 12; + private long ifSourceGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation does not match the given value.
+   * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationNotMatch() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation does not match the given value.
+   * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return The ifSourceGenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceGenerationNotMatch() { + return ifSourceGenerationNotMatch_; + } + + public static final int IF_SOURCE_METAGENERATION_MATCH_FIELD_NUMBER = 13; + private long ifSourceMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationMatch() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return The ifSourceMetagenerationMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationMatch() { + return ifSourceMetagenerationMatch_; + } + + public static final int IF_SOURCE_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 14; + private long ifSourceMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationNotMatch() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return The ifSourceMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationNotMatch() { + return ifSourceMetagenerationNotMatch_; + } + + public static final int MAX_BYTES_REWRITTEN_PER_CALL_FIELD_NUMBER = 15; + private long maxBytesRewrittenPerCall_ = 0L; + + /** + * + * + *
+   * Optional. The maximum number of bytes that are rewritten per rewrite
+   * request. Most callers shouldn't need to specify this parameter - it is
+   * primarily in place to support testing. If specified the value must be an
+   * integral multiple of 1 MiB (1048576). Also, this only applies to requests
+   * where the source and destination span locations and/or storage classes.
+   * Finally, this value must not change across rewrite calls else you'll get an
+   * error that the `rewriteToken` is invalid.
+   * 
+ * + * int64 max_bytes_rewritten_per_call = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxBytesRewrittenPerCall. + */ + @java.lang.Override + public long getMaxBytesRewrittenPerCall() { + return maxBytesRewrittenPerCall_; + } + + public static final int COPY_SOURCE_ENCRYPTION_ALGORITHM_FIELD_NUMBER = 16; + + @SuppressWarnings("serial") + private volatile java.lang.Object copySourceEncryptionAlgorithm_ = ""; + + /** + * + * + *
+   * Optional. The algorithm used to encrypt the source object, if any. Used if
+   * the source object was encrypted with a Customer-Supplied Encryption Key.
+   * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionAlgorithm. + */ + @java.lang.Override + public java.lang.String getCopySourceEncryptionAlgorithm() { + java.lang.Object ref = copySourceEncryptionAlgorithm_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + copySourceEncryptionAlgorithm_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The algorithm used to encrypt the source object, if any. Used if
+   * the source object was encrypted with a Customer-Supplied Encryption Key.
+   * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for copySourceEncryptionAlgorithm. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCopySourceEncryptionAlgorithmBytes() { + java.lang.Object ref = copySourceEncryptionAlgorithm_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + copySourceEncryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COPY_SOURCE_ENCRYPTION_KEY_BYTES_FIELD_NUMBER = 21; + private com.google.protobuf.ByteString copySourceEncryptionKeyBytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to
+   * encrypt the source object, if it was encrypted with a Customer-Supplied
+   * Encryption Key.
+   * 
+ * + * bytes copy_source_encryption_key_bytes = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeyBytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCopySourceEncryptionKeyBytes() { + return copySourceEncryptionKeyBytes_; + } + + public static final int COPY_SOURCE_ENCRYPTION_KEY_SHA256_BYTES_FIELD_NUMBER = 22; + private com.google.protobuf.ByteString copySourceEncryptionKeySha256Bytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption
+   * key used to encrypt the source object, if it was encrypted with a
+   * Customer-Supplied Encryption Key.
+   * 
+ * + * + * bytes copy_source_encryption_key_sha256_bytes = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCopySourceEncryptionKeySha256Bytes() { + return copySourceEncryptionKeySha256Bytes_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 19; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 29; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getDestination()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, sourceBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceObject_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, sourceObject_); + } + if (sourceGeneration_ != 0L) { + output.writeInt64(4, sourceGeneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rewriteToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, rewriteToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(7, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(8, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(9, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(10, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeInt64(11, ifSourceGenerationMatch_); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeInt64(12, ifSourceGenerationNotMatch_); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeInt64(13, ifSourceMetagenerationMatch_); + } + if (((bitField0_ & 0x00000100) != 0)) { + output.writeInt64(14, ifSourceMetagenerationNotMatch_); + } + if (maxBytesRewrittenPerCall_ != 0L) { + output.writeInt64(15, maxBytesRewrittenPerCall_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(copySourceEncryptionAlgorithm_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 16, copySourceEncryptionAlgorithm_); + } + if (((bitField0_ & 0x00000200) != 0)) { + output.writeMessage(19, getCommonObjectRequestParams()); + } + if (!copySourceEncryptionKeyBytes_.isEmpty()) { + output.writeBytes(21, copySourceEncryptionKeyBytes_); + } + if (!copySourceEncryptionKeySha256Bytes_.isEmpty()) { + output.writeBytes(22, copySourceEncryptionKeySha256Bytes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 24, destinationName_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationBucket_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 25, destinationBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationKmsKey_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 27, destinationKmsKey_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPredefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 28, destinationPredefinedAcl_); + } + if (((bitField0_ & 0x00000400) != 0)) { + output.writeMessage(29, getObjectChecksums()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getDestination()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, sourceBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceObject_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, sourceObject_); + } + if (sourceGeneration_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, sourceGeneration_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rewriteToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, rewriteToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(10, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(11, ifSourceGenerationMatch_); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(12, ifSourceGenerationNotMatch_); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(13, ifSourceMetagenerationMatch_); + } + if (((bitField0_ & 0x00000100) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 14, ifSourceMetagenerationNotMatch_); + } + if (maxBytesRewrittenPerCall_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(15, maxBytesRewrittenPerCall_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(copySourceEncryptionAlgorithm_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize( + 16, copySourceEncryptionAlgorithm_); + } + if (((bitField0_ & 0x00000200) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 19, getCommonObjectRequestParams()); + } + if (!copySourceEncryptionKeyBytes_.isEmpty()) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize(21, copySourceEncryptionKeyBytes_); + } + if (!copySourceEncryptionKeySha256Bytes_.isEmpty()) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 22, copySourceEncryptionKeySha256Bytes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(24, destinationName_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationBucket_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(25, destinationBucket_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationKmsKey_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(27, destinationKmsKey_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPredefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(28, destinationPredefinedAcl_); + } + if (((bitField0_ & 0x00000400) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(29, getObjectChecksums()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.RewriteObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.RewriteObjectRequest other = + (com.google.storage.v2.RewriteObjectRequest) obj; + + if (!getDestinationName().equals(other.getDestinationName())) return false; + if (!getDestinationBucket().equals(other.getDestinationBucket())) return false; + if (!getDestinationKmsKey().equals(other.getDestinationKmsKey())) return false; + if (hasDestination() != other.hasDestination()) return false; + if (hasDestination()) { + if (!getDestination().equals(other.getDestination())) return false; + } + if (!getSourceBucket().equals(other.getSourceBucket())) return false; + if (!getSourceObject().equals(other.getSourceObject())) return false; + if (getSourceGeneration() != other.getSourceGeneration()) return false; + if (!getRewriteToken().equals(other.getRewriteToken())) return false; + if (!getDestinationPredefinedAcl().equals(other.getDestinationPredefinedAcl())) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasIfSourceGenerationMatch() != other.hasIfSourceGenerationMatch()) return false; + if (hasIfSourceGenerationMatch()) { + if (getIfSourceGenerationMatch() != other.getIfSourceGenerationMatch()) return false; + } + if (hasIfSourceGenerationNotMatch() != other.hasIfSourceGenerationNotMatch()) return false; + if (hasIfSourceGenerationNotMatch()) { + if (getIfSourceGenerationNotMatch() != other.getIfSourceGenerationNotMatch()) return false; + } + if (hasIfSourceMetagenerationMatch() != other.hasIfSourceMetagenerationMatch()) return false; + if (hasIfSourceMetagenerationMatch()) { + if (getIfSourceMetagenerationMatch() != other.getIfSourceMetagenerationMatch()) return false; + } + if (hasIfSourceMetagenerationNotMatch() != other.hasIfSourceMetagenerationNotMatch()) + return false; + if (hasIfSourceMetagenerationNotMatch()) { + if (getIfSourceMetagenerationNotMatch() != other.getIfSourceMetagenerationNotMatch()) + return false; + } + if (getMaxBytesRewrittenPerCall() != other.getMaxBytesRewrittenPerCall()) return false; + if (!getCopySourceEncryptionAlgorithm().equals(other.getCopySourceEncryptionAlgorithm())) + return false; + if (!getCopySourceEncryptionKeyBytes().equals(other.getCopySourceEncryptionKeyBytes())) + return false; + if (!getCopySourceEncryptionKeySha256Bytes() + .equals(other.getCopySourceEncryptionKeySha256Bytes())) return false; + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DESTINATION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDestinationName().hashCode(); + hash = (37 * hash) + DESTINATION_BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getDestinationBucket().hashCode(); + hash = (37 * hash) + DESTINATION_KMS_KEY_FIELD_NUMBER; + hash = (53 * hash) + getDestinationKmsKey().hashCode(); + if (hasDestination()) { + hash = (37 * hash) + DESTINATION_FIELD_NUMBER; + hash = (53 * hash) + getDestination().hashCode(); + } + hash = (37 * hash) + SOURCE_BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getSourceBucket().hashCode(); + hash = (37 * hash) + SOURCE_OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getSourceObject().hashCode(); + hash = (37 * hash) + SOURCE_GENERATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSourceGeneration()); + hash = (37 * hash) + REWRITE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRewriteToken().hashCode(); + hash = (37 * hash) + DESTINATION_PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getDestinationPredefinedAcl().hashCode(); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasIfSourceGenerationMatch()) { + hash = (37 * hash) + IF_SOURCE_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceGenerationMatch()); + } + if (hasIfSourceGenerationNotMatch()) { + hash = (37 * hash) + IF_SOURCE_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceGenerationNotMatch()); + } + if (hasIfSourceMetagenerationMatch()) { + hash = (37 * hash) + IF_SOURCE_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceMetagenerationMatch()); + } + if (hasIfSourceMetagenerationNotMatch()) { + hash = (37 * hash) + IF_SOURCE_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = + (53 * hash) + com.google.protobuf.Internal.hashLong(getIfSourceMetagenerationNotMatch()); + } + hash = (37 * hash) + MAX_BYTES_REWRITTEN_PER_CALL_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxBytesRewrittenPerCall()); + hash = (37 * hash) + COPY_SOURCE_ENCRYPTION_ALGORITHM_FIELD_NUMBER; + hash = (53 * hash) + getCopySourceEncryptionAlgorithm().hashCode(); + hash = (37 * hash) + COPY_SOURCE_ENCRYPTION_KEY_BYTES_FIELD_NUMBER; + hash = (53 * hash) + getCopySourceEncryptionKeyBytes().hashCode(); + hash = (37 * hash) + COPY_SOURCE_ENCRYPTION_KEY_SHA256_BYTES_FIELD_NUMBER; + hash = (53 * hash) + getCopySourceEncryptionKeySha256Bytes().hashCode(); + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RewriteObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.RewriteObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [RewriteObject][google.storage.v2.Storage.RewriteObject].
+   * If the source object is encrypted using a Customer-Supplied Encryption Key
+   * the key information must be provided in the
+   * `copy_source_encryption_algorithm`, `copy_source_encryption_key_bytes`, and
+   * `copy_source_encryption_key_sha256_bytes` fields. If the destination object
+   * should be encrypted the keying information should be provided in the
+   * `encryption_algorithm`, `encryption_key_bytes`, and
+   * `encryption_key_sha256_bytes` fields of the
+   * `common_object_request_params.customer_encryption` field.
+   * 
+ * + * Protobuf type {@code google.storage.v2.RewriteObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.RewriteObjectRequest) + com.google.storage.v2.RewriteObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RewriteObjectRequest.class, + com.google.storage.v2.RewriteObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.RewriteObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetDestinationFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetObjectChecksumsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + destinationName_ = ""; + destinationBucket_ = ""; + destinationKmsKey_ = ""; + destination_ = null; + if (destinationBuilder_ != null) { + destinationBuilder_.dispose(); + destinationBuilder_ = null; + } + sourceBucket_ = ""; + sourceObject_ = ""; + sourceGeneration_ = 0L; + rewriteToken_ = ""; + destinationPredefinedAcl_ = ""; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + ifSourceGenerationMatch_ = 0L; + ifSourceGenerationNotMatch_ = 0L; + ifSourceMetagenerationMatch_ = 0L; + ifSourceMetagenerationNotMatch_ = 0L; + maxBytesRewrittenPerCall_ = 0L; + copySourceEncryptionAlgorithm_ = ""; + copySourceEncryptionKeyBytes_ = com.google.protobuf.ByteString.EMPTY; + copySourceEncryptionKeySha256Bytes_ = com.google.protobuf.ByteString.EMPTY; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.RewriteObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.RewriteObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.RewriteObjectRequest build() { + com.google.storage.v2.RewriteObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.RewriteObjectRequest buildPartial() { + com.google.storage.v2.RewriteObjectRequest result = + new com.google.storage.v2.RewriteObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.RewriteObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.destinationName_ = destinationName_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.destinationBucket_ = destinationBucket_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.destinationKmsKey_ = destinationKmsKey_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.destination_ = + destinationBuilder_ == null ? destination_ : destinationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.sourceBucket_ = sourceBucket_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.sourceObject_ = sourceObject_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.sourceGeneration_ = sourceGeneration_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.rewriteToken_ = rewriteToken_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.destinationPredefinedAcl_ = destinationPredefinedAcl_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.ifSourceGenerationMatch_ = ifSourceGenerationMatch_; + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.ifSourceGenerationNotMatch_ = ifSourceGenerationNotMatch_; + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00008000) != 0)) { + result.ifSourceMetagenerationMatch_ = ifSourceMetagenerationMatch_; + to_bitField0_ |= 0x00000080; + } + if (((from_bitField0_ & 0x00010000) != 0)) { + result.ifSourceMetagenerationNotMatch_ = ifSourceMetagenerationNotMatch_; + to_bitField0_ |= 0x00000100; + } + if (((from_bitField0_ & 0x00020000) != 0)) { + result.maxBytesRewrittenPerCall_ = maxBytesRewrittenPerCall_; + } + if (((from_bitField0_ & 0x00040000) != 0)) { + result.copySourceEncryptionAlgorithm_ = copySourceEncryptionAlgorithm_; + } + if (((from_bitField0_ & 0x00080000) != 0)) { + result.copySourceEncryptionKeyBytes_ = copySourceEncryptionKeyBytes_; + } + if (((from_bitField0_ & 0x00100000) != 0)) { + result.copySourceEncryptionKeySha256Bytes_ = copySourceEncryptionKeySha256Bytes_; + } + if (((from_bitField0_ & 0x00200000) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000200; + } + if (((from_bitField0_ & 0x00400000) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000400; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.RewriteObjectRequest) { + return mergeFrom((com.google.storage.v2.RewriteObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.RewriteObjectRequest other) { + if (other == com.google.storage.v2.RewriteObjectRequest.getDefaultInstance()) return this; + if (!other.getDestinationName().isEmpty()) { + destinationName_ = other.destinationName_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDestinationBucket().isEmpty()) { + destinationBucket_ = other.destinationBucket_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDestinationKmsKey().isEmpty()) { + destinationKmsKey_ = other.destinationKmsKey_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasDestination()) { + mergeDestination(other.getDestination()); + } + if (!other.getSourceBucket().isEmpty()) { + sourceBucket_ = other.sourceBucket_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (!other.getSourceObject().isEmpty()) { + sourceObject_ = other.sourceObject_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.getSourceGeneration() != 0L) { + setSourceGeneration(other.getSourceGeneration()); + } + if (!other.getRewriteToken().isEmpty()) { + rewriteToken_ = other.rewriteToken_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (!other.getDestinationPredefinedAcl().isEmpty()) { + destinationPredefinedAcl_ = other.destinationPredefinedAcl_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasIfSourceGenerationMatch()) { + setIfSourceGenerationMatch(other.getIfSourceGenerationMatch()); + } + if (other.hasIfSourceGenerationNotMatch()) { + setIfSourceGenerationNotMatch(other.getIfSourceGenerationNotMatch()); + } + if (other.hasIfSourceMetagenerationMatch()) { + setIfSourceMetagenerationMatch(other.getIfSourceMetagenerationMatch()); + } + if (other.hasIfSourceMetagenerationNotMatch()) { + setIfSourceMetagenerationNotMatch(other.getIfSourceMetagenerationNotMatch()); + } + if (other.getMaxBytesRewrittenPerCall() != 0L) { + setMaxBytesRewrittenPerCall(other.getMaxBytesRewrittenPerCall()); + } + if (!other.getCopySourceEncryptionAlgorithm().isEmpty()) { + copySourceEncryptionAlgorithm_ = other.copySourceEncryptionAlgorithm_; + bitField0_ |= 0x00040000; + onChanged(); + } + if (!other.getCopySourceEncryptionKeyBytes().isEmpty()) { + setCopySourceEncryptionKeyBytes(other.getCopySourceEncryptionKeyBytes()); + } + if (!other.getCopySourceEncryptionKeySha256Bytes().isEmpty()) { + setCopySourceEncryptionKeySha256Bytes(other.getCopySourceEncryptionKeySha256Bytes()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetDestinationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 10 + case 18: + { + sourceBucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 18 + case 26: + { + sourceObject_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 26 + case 32: + { + sourceGeneration_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 32 + case 42: + { + rewriteToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 42 + case 56: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000200; + break; + } // case 56 + case 64: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 64 + case 72: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000800; + break; + } // case 72 + case 80: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00001000; + break; + } // case 80 + case 88: + { + ifSourceGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00002000; + break; + } // case 88 + case 96: + { + ifSourceGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00004000; + break; + } // case 96 + case 104: + { + ifSourceMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00008000; + break; + } // case 104 + case 112: + { + ifSourceMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00010000; + break; + } // case 112 + case 120: + { + maxBytesRewrittenPerCall_ = input.readInt64(); + bitField0_ |= 0x00020000; + break; + } // case 120 + case 130: + { + copySourceEncryptionAlgorithm_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00040000; + break; + } // case 130 + case 154: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00200000; + break; + } // case 154 + case 170: + { + copySourceEncryptionKeyBytes_ = input.readBytes(); + bitField0_ |= 0x00080000; + break; + } // case 170 + case 178: + { + copySourceEncryptionKeySha256Bytes_ = input.readBytes(); + bitField0_ |= 0x00100000; + break; + } // case 178 + case 194: + { + destinationName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 194 + case 202: + { + destinationBucket_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 202 + case 218: + { + destinationKmsKey_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 218 + case 226: + { + destinationPredefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 226 + case 234: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00400000; + break; + } // case 234 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object destinationName_ = ""; + + /** + * + * + *
+     * Required. Immutable. The name of the destination object.
+     * See the
+     * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The destinationName. + */ + public java.lang.String getDestinationName() { + java.lang.Object ref = destinationName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Immutable. The name of the destination object.
+     * See the
+     * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for destinationName. + */ + public com.google.protobuf.ByteString getDestinationNameBytes() { + java.lang.Object ref = destinationName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Immutable. The name of the destination object.
+     * See the
+     * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The destinationName to set. + * @return This builder for chaining. + */ + public Builder setDestinationName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationName_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Immutable. The name of the destination object.
+     * See the
+     * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearDestinationName() { + destinationName_ = getDefaultInstance().getDestinationName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Immutable. The name of the destination object.
+     * See the
+     * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+     * Example: `test.txt`
+     * The `name` field by itself does not uniquely identify a Cloud Storage
+     * object. A Cloud Storage object is uniquely identified by the tuple of
+     * (bucket, object, generation).
+     * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The bytes for destinationName to set. + * @return This builder for chaining. + */ + public Builder setDestinationNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationName_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object destinationBucket_ = ""; + + /** + * + * + *
+     * Required. Immutable. The name of the bucket containing the destination
+     * object.
+     * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationBucket. + */ + public java.lang.String getDestinationBucket() { + java.lang.Object ref = destinationBucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationBucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Immutable. The name of the bucket containing the destination
+     * object.
+     * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationBucket. + */ + public com.google.protobuf.ByteString getDestinationBucketBytes() { + java.lang.Object ref = destinationBucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Immutable. The name of the bucket containing the destination
+     * object.
+     * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The destinationBucket to set. + * @return This builder for chaining. + */ + public Builder setDestinationBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationBucket_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Immutable. The name of the bucket containing the destination
+     * object.
+     * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDestinationBucket() { + destinationBucket_ = getDefaultInstance().getDestinationBucket(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Immutable. The name of the bucket containing the destination
+     * object.
+     * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for destinationBucket to set. + * @return This builder for chaining. + */ + public Builder setDestinationBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationBucket_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object destinationKmsKey_ = ""; + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt the
+     * destination object. The Cloud KMS key must be located in same location as
+     * the object. If the parameter is not specified, the request uses the
+     * destination bucket's default encryption key, if any, or else the
+     * Google-managed encryption key.
+     * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationKmsKey. + */ + public java.lang.String getDestinationKmsKey() { + java.lang.Object ref = destinationKmsKey_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationKmsKey_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt the
+     * destination object. The Cloud KMS key must be located in same location as
+     * the object. If the parameter is not specified, the request uses the
+     * destination bucket's default encryption key, if any, or else the
+     * Google-managed encryption key.
+     * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationKmsKey. + */ + public com.google.protobuf.ByteString getDestinationKmsKeyBytes() { + java.lang.Object ref = destinationKmsKey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationKmsKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt the
+     * destination object. The Cloud KMS key must be located in same location as
+     * the object. If the parameter is not specified, the request uses the
+     * destination bucket's default encryption key, if any, or else the
+     * Google-managed encryption key.
+     * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The destinationKmsKey to set. + * @return This builder for chaining. + */ + public Builder setDestinationKmsKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationKmsKey_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt the
+     * destination object. The Cloud KMS key must be located in same location as
+     * the object. If the parameter is not specified, the request uses the
+     * destination bucket's default encryption key, if any, or else the
+     * Google-managed encryption key.
+     * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDestinationKmsKey() { + destinationKmsKey_ = getDefaultInstance().getDestinationKmsKey(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The name of the Cloud KMS key that is used to encrypt the
+     * destination object. The Cloud KMS key must be located in same location as
+     * the object. If the parameter is not specified, the request uses the
+     * destination bucket's default encryption key, if any, or else the
+     * Google-managed encryption key.
+     * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for destinationKmsKey to set. + * @return This builder for chaining. + */ + public Builder setDestinationKmsKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationKmsKey_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.storage.v2.Object destination_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + destinationBuilder_; + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the destination field is set. + */ + public boolean hasDestination() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The destination. + */ + public com.google.storage.v2.Object getDestination() { + if (destinationBuilder_ == null) { + return destination_ == null + ? com.google.storage.v2.Object.getDefaultInstance() + : destination_; + } else { + return destinationBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDestination(com.google.storage.v2.Object value) { + if (destinationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + destination_ = value; + } else { + destinationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDestination(com.google.storage.v2.Object.Builder builderForValue) { + if (destinationBuilder_ == null) { + destination_ = builderForValue.build(); + } else { + destinationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeDestination(com.google.storage.v2.Object value) { + if (destinationBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && destination_ != null + && destination_ != com.google.storage.v2.Object.getDefaultInstance()) { + getDestinationBuilder().mergeFrom(value); + } else { + destination_ = value; + } + } else { + destinationBuilder_.mergeFrom(value); + } + if (destination_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearDestination() { + bitField0_ = (bitField0_ & ~0x00000008); + destination_ = null; + if (destinationBuilder_ != null) { + destinationBuilder_.dispose(); + destinationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.Object.Builder getDestinationBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetDestinationFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder() { + if (destinationBuilder_ != null) { + return destinationBuilder_.getMessageOrBuilder(); + } else { + return destination_ == null + ? com.google.storage.v2.Object.getDefaultInstance() + : destination_; + } + } + + /** + * + * + *
+     * Optional. Properties of the destination, post-rewrite object.
+     * The `name`, `bucket` and `kms_key` fields must not be populated (these
+     * values are specified in the `destination_name`, `destination_bucket`, and
+     * `destination_kms_key` fields).
+     * If `destination` is present it is used to construct the destination
+     * object's metadata; otherwise the destination object's metadata is
+     * copied from the source object.
+     * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetDestinationFieldBuilder() { + if (destinationBuilder_ == null) { + destinationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getDestination(), getParentForChildren(), isClean()); + destination_ = null; + } + return destinationBuilder_; + } + + private java.lang.Object sourceBucket_ = ""; + + /** + * + * + *
+     * Required. Name of the bucket in which to find the source object.
+     * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBucket. + */ + public java.lang.String getSourceBucket() { + java.lang.Object ref = sourceBucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to find the source object.
+     * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBucket. + */ + public com.google.protobuf.ByteString getSourceBucketBytes() { + java.lang.Object ref = sourceBucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the bucket in which to find the source object.
+     * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The sourceBucket to set. + * @return This builder for chaining. + */ + public Builder setSourceBucket(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceBucket_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to find the source object.
+     * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSourceBucket() { + sourceBucket_ = getDefaultInstance().getSourceBucket(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the bucket in which to find the source object.
+     * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for sourceBucket to set. + * @return This builder for chaining. + */ + public Builder setSourceBucketBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceBucket_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.lang.Object sourceObject_ = ""; + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + public java.lang.String getSourceObject() { + java.lang.Object ref = sourceObject_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceObject_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + public com.google.protobuf.ByteString getSourceObjectBytes() { + java.lang.Object ref = sourceObject_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceObject_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sourceObject to set. + * @return This builder for chaining. + */ + public Builder setSourceObject(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceObject_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSourceObject() { + sourceObject_ = getDefaultInstance().getSourceObject(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the source object.
+     * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for sourceObject to set. + * @return This builder for chaining. + */ + public Builder setSourceObjectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceObject_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private long sourceGeneration_; + + /** + * + * + *
+     * Optional. If present, selects a specific revision of the source object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 source_generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The sourceGeneration. + */ + @java.lang.Override + public long getSourceGeneration() { + return sourceGeneration_; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of the source object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 source_generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The sourceGeneration to set. + * @return This builder for chaining. + */ + public Builder setSourceGeneration(long value) { + + sourceGeneration_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If present, selects a specific revision of the source object (as
+     * opposed to the latest version, the default).
+     * 
+ * + * int64 source_generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSourceGeneration() { + bitField0_ = (bitField0_ & ~0x00000040); + sourceGeneration_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object rewriteToken_ = ""; + + /** + * + * + *
+     * Optional. Include this field (from the previous rewrite response) on each
+     * rewrite request after the first one, until the rewrite response 'done' flag
+     * is true. Calls that provide a rewriteToken can omit all other request
+     * fields, but if included those fields must match the values provided in the
+     * first rewrite request.
+     * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rewriteToken. + */ + public java.lang.String getRewriteToken() { + java.lang.Object ref = rewriteToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rewriteToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Include this field (from the previous rewrite response) on each
+     * rewrite request after the first one, until the rewrite response 'done' flag
+     * is true. Calls that provide a rewriteToken can omit all other request
+     * fields, but if included those fields must match the values provided in the
+     * first rewrite request.
+     * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rewriteToken. + */ + public com.google.protobuf.ByteString getRewriteTokenBytes() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rewriteToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Include this field (from the previous rewrite response) on each
+     * rewrite request after the first one, until the rewrite response 'done' flag
+     * is true. Calls that provide a rewriteToken can omit all other request
+     * fields, but if included those fields must match the values provided in the
+     * first rewrite request.
+     * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The rewriteToken to set. + * @return This builder for chaining. + */ + public Builder setRewriteToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rewriteToken_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Include this field (from the previous rewrite response) on each
+     * rewrite request after the first one, until the rewrite response 'done' flag
+     * is true. Calls that provide a rewriteToken can omit all other request
+     * fields, but if included those fields must match the values provided in the
+     * first rewrite request.
+     * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRewriteToken() { + rewriteToken_ = getDefaultInstance().getRewriteToken(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Include this field (from the previous rewrite response) on each
+     * rewrite request after the first one, until the rewrite response 'done' flag
+     * is true. Calls that provide a rewriteToken can omit all other request
+     * fields, but if included those fields must match the values provided in the
+     * first rewrite request.
+     * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for rewriteToken to set. + * @return This builder for chaining. + */ + public Builder setRewriteTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rewriteToken_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private java.lang.Object destinationPredefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The destinationPredefinedAcl. + */ + public java.lang.String getDestinationPredefinedAcl() { + java.lang.Object ref = destinationPredefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPredefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for destinationPredefinedAcl. + */ + public com.google.protobuf.ByteString getDestinationPredefinedAclBytes() { + java.lang.Object ref = destinationPredefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPredefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The destinationPredefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setDestinationPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationPredefinedAcl_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDestinationPredefinedAcl() { + destinationPredefinedAcl_ = getDefaultInstance().getDestinationPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to the destination
+     * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes for destinationPredefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setDestinationPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationPredefinedAcl_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 7; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 7; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 7; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 7; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000200); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000400); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000800); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00001000) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the destination object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00001000); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation matches the given value.
+     * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationMatch() { + return ((bitField0_ & 0x00002000) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation matches the given value.
+     * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return The ifSourceGenerationMatch. + */ + @java.lang.Override + public long getIfSourceGenerationMatch() { + return ifSourceGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation matches the given value.
+     * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @param value The ifSourceGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceGenerationMatch(long value) { + + ifSourceGenerationMatch_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation matches the given value.
+     * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return This builder for chaining. + */ + public Builder clearIfSourceGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00002000); + ifSourceGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation does not match the given value.
+     * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceGenerationNotMatch() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation does not match the given value.
+     * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return The ifSourceGenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceGenerationNotMatch() { + return ifSourceGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation does not match the given value.
+     * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @param value The ifSourceGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceGenerationNotMatch(long value) { + + ifSourceGenerationNotMatch_ = value; + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's live
+     * generation does not match the given value.
+     * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return This builder for chaining. + */ + public Builder clearIfSourceGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00004000); + ifSourceGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationMatch() { + return ((bitField0_ & 0x00008000) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return The ifSourceMetagenerationMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationMatch() { + return ifSourceMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @param value The ifSourceMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceMetagenerationMatch(long value) { + + ifSourceMetagenerationMatch_ = value; + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return This builder for chaining. + */ + public Builder clearIfSourceMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00008000); + ifSourceMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifSourceMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfSourceMetagenerationNotMatch() { + return ((bitField0_ & 0x00010000) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return The ifSourceMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfSourceMetagenerationNotMatch() { + return ifSourceMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @param value The ifSourceMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfSourceMetagenerationNotMatch(long value) { + + ifSourceMetagenerationNotMatch_ = value; + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the source object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return This builder for chaining. + */ + public Builder clearIfSourceMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00010000); + ifSourceMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long maxBytesRewrittenPerCall_; + + /** + * + * + *
+     * Optional. The maximum number of bytes that are rewritten per rewrite
+     * request. Most callers shouldn't need to specify this parameter - it is
+     * primarily in place to support testing. If specified the value must be an
+     * integral multiple of 1 MiB (1048576). Also, this only applies to requests
+     * where the source and destination span locations and/or storage classes.
+     * Finally, this value must not change across rewrite calls else you'll get an
+     * error that the `rewriteToken` is invalid.
+     * 
+ * + * int64 max_bytes_rewritten_per_call = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The maxBytesRewrittenPerCall. + */ + @java.lang.Override + public long getMaxBytesRewrittenPerCall() { + return maxBytesRewrittenPerCall_; + } + + /** + * + * + *
+     * Optional. The maximum number of bytes that are rewritten per rewrite
+     * request. Most callers shouldn't need to specify this parameter - it is
+     * primarily in place to support testing. If specified the value must be an
+     * integral multiple of 1 MiB (1048576). Also, this only applies to requests
+     * where the source and destination span locations and/or storage classes.
+     * Finally, this value must not change across rewrite calls else you'll get an
+     * error that the `rewriteToken` is invalid.
+     * 
+ * + * int64 max_bytes_rewritten_per_call = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The maxBytesRewrittenPerCall to set. + * @return This builder for chaining. + */ + public Builder setMaxBytesRewrittenPerCall(long value) { + + maxBytesRewrittenPerCall_ = value; + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The maximum number of bytes that are rewritten per rewrite
+     * request. Most callers shouldn't need to specify this parameter - it is
+     * primarily in place to support testing. If specified the value must be an
+     * integral multiple of 1 MiB (1048576). Also, this only applies to requests
+     * where the source and destination span locations and/or storage classes.
+     * Finally, this value must not change across rewrite calls else you'll get an
+     * error that the `rewriteToken` is invalid.
+     * 
+ * + * int64 max_bytes_rewritten_per_call = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMaxBytesRewrittenPerCall() { + bitField0_ = (bitField0_ & ~0x00020000); + maxBytesRewrittenPerCall_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object copySourceEncryptionAlgorithm_ = ""; + + /** + * + * + *
+     * Optional. The algorithm used to encrypt the source object, if any. Used if
+     * the source object was encrypted with a Customer-Supplied Encryption Key.
+     * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionAlgorithm. + */ + public java.lang.String getCopySourceEncryptionAlgorithm() { + java.lang.Object ref = copySourceEncryptionAlgorithm_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + copySourceEncryptionAlgorithm_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The algorithm used to encrypt the source object, if any. Used if
+     * the source object was encrypted with a Customer-Supplied Encryption Key.
+     * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for copySourceEncryptionAlgorithm. + */ + public com.google.protobuf.ByteString getCopySourceEncryptionAlgorithmBytes() { + java.lang.Object ref = copySourceEncryptionAlgorithm_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + copySourceEncryptionAlgorithm_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The algorithm used to encrypt the source object, if any. Used if
+     * the source object was encrypted with a Customer-Supplied Encryption Key.
+     * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The copySourceEncryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setCopySourceEncryptionAlgorithm(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + copySourceEncryptionAlgorithm_ = value; + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The algorithm used to encrypt the source object, if any. Used if
+     * the source object was encrypted with a Customer-Supplied Encryption Key.
+     * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearCopySourceEncryptionAlgorithm() { + copySourceEncryptionAlgorithm_ = getDefaultInstance().getCopySourceEncryptionAlgorithm(); + bitField0_ = (bitField0_ & ~0x00040000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The algorithm used to encrypt the source object, if any. Used if
+     * the source object was encrypted with a Customer-Supplied Encryption Key.
+     * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes for copySourceEncryptionAlgorithm to set. + * @return This builder for chaining. + */ + public Builder setCopySourceEncryptionAlgorithmBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + copySourceEncryptionAlgorithm_ = value; + bitField0_ |= 0x00040000; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString copySourceEncryptionKeyBytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to
+     * encrypt the source object, if it was encrypted with a Customer-Supplied
+     * Encryption Key.
+     * 
+ * + * bytes copy_source_encryption_key_bytes = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeyBytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCopySourceEncryptionKeyBytes() { + return copySourceEncryptionKeyBytes_; + } + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to
+     * encrypt the source object, if it was encrypted with a Customer-Supplied
+     * Encryption Key.
+     * 
+ * + * bytes copy_source_encryption_key_bytes = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The copySourceEncryptionKeyBytes to set. + * @return This builder for chaining. + */ + public Builder setCopySourceEncryptionKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + copySourceEncryptionKeyBytes_ = value; + bitField0_ |= 0x00080000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to
+     * encrypt the source object, if it was encrypted with a Customer-Supplied
+     * Encryption Key.
+     * 
+ * + * bytes copy_source_encryption_key_bytes = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearCopySourceEncryptionKeyBytes() { + bitField0_ = (bitField0_ & ~0x00080000); + copySourceEncryptionKeyBytes_ = getDefaultInstance().getCopySourceEncryptionKeyBytes(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString copySourceEncryptionKeySha256Bytes_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption
+     * key used to encrypt the source object, if it was encrypted with a
+     * Customer-Supplied Encryption Key.
+     * 
+ * + * + * bytes copy_source_encryption_key_sha256_bytes = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeySha256Bytes. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCopySourceEncryptionKeySha256Bytes() { + return copySourceEncryptionKeySha256Bytes_; + } + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption
+     * key used to encrypt the source object, if it was encrypted with a
+     * Customer-Supplied Encryption Key.
+     * 
+ * + * + * bytes copy_source_encryption_key_sha256_bytes = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The copySourceEncryptionKeySha256Bytes to set. + * @return This builder for chaining. + */ + public Builder setCopySourceEncryptionKeySha256Bytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + copySourceEncryptionKeySha256Bytes_ = value; + bitField0_ |= 0x00100000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption
+     * key used to encrypt the source object, if it was encrypted with a
+     * Customer-Supplied Encryption Key.
+     * 
+ * + * + * bytes copy_source_encryption_key_sha256_bytes = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearCopySourceEncryptionKeySha256Bytes() { + bitField0_ = (bitField0_ & ~0x00100000); + copySourceEncryptionKeySha256Bytes_ = + getDefaultInstance().getCopySourceEncryptionKeySha256Bytes(); + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00200000) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00200000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00200000) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00200000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00200000); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00200000; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00400000) != 0); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00400000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00400000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00400000) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00400000; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00400000); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00400000; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the destination object after rewriting.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.RewriteObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.RewriteObjectRequest) + private static final com.google.storage.v2.RewriteObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.RewriteObjectRequest(); + } + + public static com.google.storage.v2.RewriteObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RewriteObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.RewriteObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequestOrBuilder.java new file mode 100644 index 000000000000..aa847c05e92f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteObjectRequestOrBuilder.java @@ -0,0 +1,736 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface RewriteObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.RewriteObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Immutable. The name of the destination object.
+   * See the
+   * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The destinationName. + */ + java.lang.String getDestinationName(); + + /** + * + * + *
+   * Required. Immutable. The name of the destination object.
+   * See the
+   * [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+   * Example: `test.txt`
+   * The `name` field by itself does not uniquely identify a Cloud Storage
+   * object. A Cloud Storage object is uniquely identified by the tuple of
+   * (bucket, object, generation).
+   * 
+ * + * + * string destination_name = 24 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for destinationName. + */ + com.google.protobuf.ByteString getDestinationNameBytes(); + + /** + * + * + *
+   * Required. Immutable. The name of the bucket containing the destination
+   * object.
+   * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationBucket. + */ + java.lang.String getDestinationBucket(); + + /** + * + * + *
+   * Required. Immutable. The name of the bucket containing the destination
+   * object.
+   * 
+ * + * + * string destination_bucket = 25 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationBucket. + */ + com.google.protobuf.ByteString getDestinationBucketBytes(); + + /** + * + * + *
+   * Optional. The name of the Cloud KMS key that is used to encrypt the
+   * destination object. The Cloud KMS key must be located in same location as
+   * the object. If the parameter is not specified, the request uses the
+   * destination bucket's default encryption key, if any, or else the
+   * Google-managed encryption key.
+   * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The destinationKmsKey. + */ + java.lang.String getDestinationKmsKey(); + + /** + * + * + *
+   * Optional. The name of the Cloud KMS key that is used to encrypt the
+   * destination object. The Cloud KMS key must be located in same location as
+   * the object. If the parameter is not specified, the request uses the
+   * destination bucket's default encryption key, if any, or else the
+   * Google-managed encryption key.
+   * 
+ * + * + * string destination_kms_key = 27 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for destinationKmsKey. + */ + com.google.protobuf.ByteString getDestinationKmsKeyBytes(); + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the destination field is set. + */ + boolean hasDestination(); + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The destination. + */ + com.google.storage.v2.Object getDestination(); + + /** + * + * + *
+   * Optional. Properties of the destination, post-rewrite object.
+   * The `name`, `bucket` and `kms_key` fields must not be populated (these
+   * values are specified in the `destination_name`, `destination_bucket`, and
+   * `destination_kms_key` fields).
+   * If `destination` is present it is used to construct the destination
+   * object's metadata; otherwise the destination object's metadata is
+   * copied from the source object.
+   * 
+ * + * .google.storage.v2.Object destination = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectOrBuilder getDestinationOrBuilder(); + + /** + * + * + *
+   * Required. Name of the bucket in which to find the source object.
+   * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBucket. + */ + java.lang.String getSourceBucket(); + + /** + * + * + *
+   * Required. Name of the bucket in which to find the source object.
+   * 
+ * + * + * string source_bucket = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBucket. + */ + com.google.protobuf.ByteString getSourceBucketBytes(); + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sourceObject. + */ + java.lang.String getSourceObject(); + + /** + * + * + *
+   * Required. Name of the source object.
+   * 
+ * + * string source_object = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sourceObject. + */ + com.google.protobuf.ByteString getSourceObjectBytes(); + + /** + * + * + *
+   * Optional. If present, selects a specific revision of the source object (as
+   * opposed to the latest version, the default).
+   * 
+ * + * int64 source_generation = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The sourceGeneration. + */ + long getSourceGeneration(); + + /** + * + * + *
+   * Optional. Include this field (from the previous rewrite response) on each
+   * rewrite request after the first one, until the rewrite response 'done' flag
+   * is true. Calls that provide a rewriteToken can omit all other request
+   * fields, but if included those fields must match the values provided in the
+   * first rewrite request.
+   * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The rewriteToken. + */ + java.lang.String getRewriteToken(); + + /** + * + * + *
+   * Optional. Include this field (from the previous rewrite response) on each
+   * rewrite request after the first one, until the rewrite response 'done' flag
+   * is true. Calls that provide a rewriteToken can omit all other request
+   * fields, but if included those fields must match the values provided in the
+   * first rewrite request.
+   * 
+ * + * string rewrite_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for rewriteToken. + */ + com.google.protobuf.ByteString getRewriteTokenBytes(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The destinationPredefinedAcl. + */ + java.lang.String getDestinationPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to the destination
+   * object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string destination_predefined_acl = 28 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for destinationPredefinedAcl. + */ + com.google.protobuf.ByteString getDestinationPredefinedAclBytes(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 7; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 7; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 8; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 9; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the destination object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 10; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation matches the given value.
+   * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return Whether the ifSourceGenerationMatch field is set. + */ + boolean hasIfSourceGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation matches the given value.
+   * 
+ * + * optional int64 if_source_generation_match = 11; + * + * @return The ifSourceGenerationMatch. + */ + long getIfSourceGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation does not match the given value.
+   * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return Whether the ifSourceGenerationNotMatch field is set. + */ + boolean hasIfSourceGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's live
+   * generation does not match the given value.
+   * 
+ * + * optional int64 if_source_generation_not_match = 12; + * + * @return The ifSourceGenerationNotMatch. + */ + long getIfSourceGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return Whether the ifSourceMetagenerationMatch field is set. + */ + boolean hasIfSourceMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_source_metageneration_match = 13; + * + * @return The ifSourceMetagenerationMatch. + */ + long getIfSourceMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return Whether the ifSourceMetagenerationNotMatch field is set. + */ + boolean hasIfSourceMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the source object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_source_metageneration_not_match = 14; + * + * @return The ifSourceMetagenerationNotMatch. + */ + long getIfSourceMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. The maximum number of bytes that are rewritten per rewrite
+   * request. Most callers shouldn't need to specify this parameter - it is
+   * primarily in place to support testing. If specified the value must be an
+   * integral multiple of 1 MiB (1048576). Also, this only applies to requests
+   * where the source and destination span locations and/or storage classes.
+   * Finally, this value must not change across rewrite calls else you'll get an
+   * error that the `rewriteToken` is invalid.
+   * 
+ * + * int64 max_bytes_rewritten_per_call = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxBytesRewrittenPerCall. + */ + long getMaxBytesRewrittenPerCall(); + + /** + * + * + *
+   * Optional. The algorithm used to encrypt the source object, if any. Used if
+   * the source object was encrypted with a Customer-Supplied Encryption Key.
+   * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionAlgorithm. + */ + java.lang.String getCopySourceEncryptionAlgorithm(); + + /** + * + * + *
+   * Optional. The algorithm used to encrypt the source object, if any. Used if
+   * the source object was encrypted with a Customer-Supplied Encryption Key.
+   * 
+ * + * string copy_source_encryption_algorithm = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for copySourceEncryptionAlgorithm. + */ + com.google.protobuf.ByteString getCopySourceEncryptionAlgorithmBytes(); + + /** + * + * + *
+   * Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to
+   * encrypt the source object, if it was encrypted with a Customer-Supplied
+   * Encryption Key.
+   * 
+ * + * bytes copy_source_encryption_key_bytes = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeyBytes. + */ + com.google.protobuf.ByteString getCopySourceEncryptionKeyBytes(); + + /** + * + * + *
+   * Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption
+   * key used to encrypt the source object, if it was encrypted with a
+   * Customer-Supplied Encryption Key.
+   * 
+ * + * + * bytes copy_source_encryption_key_sha256_bytes = 22 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The copySourceEncryptionKeySha256Bytes. + */ + com.google.protobuf.ByteString getCopySourceEncryptionKeySha256Bytes(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 19 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the destination object after rewriting.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 29 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponse.java new file mode 100644 index 000000000000..6cd1d99efc45 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponse.java @@ -0,0 +1,1199 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * A rewrite response.
+ * 
+ * + * Protobuf type {@code google.storage.v2.RewriteResponse} + */ +@com.google.protobuf.Generated +public final class RewriteResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.RewriteResponse) + RewriteResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RewriteResponse"); + } + + // Use RewriteResponse.newBuilder() to construct. + private RewriteResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RewriteResponse() { + rewriteToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RewriteResponse.class, + com.google.storage.v2.RewriteResponse.Builder.class); + } + + private int bitField0_; + public static final int TOTAL_BYTES_REWRITTEN_FIELD_NUMBER = 1; + private long totalBytesRewritten_ = 0L; + + /** + * + * + *
+   * The total bytes written so far, which can be used to provide a waiting user
+   * with a progress indicator. This property is always present in the response.
+   * 
+ * + * int64 total_bytes_rewritten = 1; + * + * @return The totalBytesRewritten. + */ + @java.lang.Override + public long getTotalBytesRewritten() { + return totalBytesRewritten_; + } + + public static final int OBJECT_SIZE_FIELD_NUMBER = 2; + private long objectSize_ = 0L; + + /** + * + * + *
+   * The total size of the object being copied in bytes. This property is always
+   * present in the response.
+   * 
+ * + * int64 object_size = 2; + * + * @return The objectSize. + */ + @java.lang.Override + public long getObjectSize() { + return objectSize_; + } + + public static final int DONE_FIELD_NUMBER = 3; + private boolean done_ = false; + + /** + * + * + *
+   * `true` if the copy is finished; otherwise, `false` if
+   * the copy is in progress. This property is always present in the response.
+   * 
+ * + * bool done = 3; + * + * @return The done. + */ + @java.lang.Override + public boolean getDone() { + return done_; + } + + public static final int REWRITE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object rewriteToken_ = ""; + + /** + * + * + *
+   * A token to use in subsequent requests to continue copying data. This token
+   * is present in the response only when there is more data to copy.
+   * 
+ * + * string rewrite_token = 4; + * + * @return The rewriteToken. + */ + @java.lang.Override + public java.lang.String getRewriteToken() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rewriteToken_ = s; + return s; + } + } + + /** + * + * + *
+   * A token to use in subsequent requests to continue copying data. This token
+   * is present in the response only when there is more data to copy.
+   * 
+ * + * string rewrite_token = 4; + * + * @return The bytes for rewriteToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRewriteTokenBytes() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rewriteToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int RESOURCE_FIELD_NUMBER = 5; + private com.google.storage.v2.Object resource_; + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (totalBytesRewritten_ != 0L) { + output.writeInt64(1, totalBytesRewritten_); + } + if (objectSize_ != 0L) { + output.writeInt64(2, objectSize_); + } + if (done_ != false) { + output.writeBool(3, done_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rewriteToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, rewriteToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getResource()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (totalBytesRewritten_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, totalBytesRewritten_); + } + if (objectSize_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, objectSize_); + } + if (done_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, done_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(rewriteToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, rewriteToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getResource()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.RewriteResponse)) { + return super.equals(obj); + } + com.google.storage.v2.RewriteResponse other = (com.google.storage.v2.RewriteResponse) obj; + + if (getTotalBytesRewritten() != other.getTotalBytesRewritten()) return false; + if (getObjectSize() != other.getObjectSize()) return false; + if (getDone() != other.getDone()) return false; + if (!getRewriteToken().equals(other.getRewriteToken())) return false; + if (hasResource() != other.hasResource()) return false; + if (hasResource()) { + if (!getResource().equals(other.getResource())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TOTAL_BYTES_REWRITTEN_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTotalBytesRewritten()); + hash = (37 * hash) + OBJECT_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getObjectSize()); + hash = (37 * hash) + DONE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDone()); + hash = (37 * hash) + REWRITE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getRewriteToken().hashCode(); + if (hasResource()) { + hash = (37 * hash) + RESOURCE_FIELD_NUMBER; + hash = (53 * hash) + getResource().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.RewriteResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.RewriteResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RewriteResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.RewriteResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.RewriteResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * A rewrite response.
+   * 
+ * + * Protobuf type {@code google.storage.v2.RewriteResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.RewriteResponse) + com.google.storage.v2.RewriteResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.RewriteResponse.class, + com.google.storage.v2.RewriteResponse.Builder.class); + } + + // Construct using com.google.storage.v2.RewriteResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetResourceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + totalBytesRewritten_ = 0L; + objectSize_ = 0L; + done_ = false; + rewriteToken_ = ""; + resource_ = null; + if (resourceBuilder_ != null) { + resourceBuilder_.dispose(); + resourceBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_RewriteResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.RewriteResponse getDefaultInstanceForType() { + return com.google.storage.v2.RewriteResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.RewriteResponse build() { + com.google.storage.v2.RewriteResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.RewriteResponse buildPartial() { + com.google.storage.v2.RewriteResponse result = + new com.google.storage.v2.RewriteResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.RewriteResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.totalBytesRewritten_ = totalBytesRewritten_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.objectSize_ = objectSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.done_ = done_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.rewriteToken_ = rewriteToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.resource_ = resourceBuilder_ == null ? resource_ : resourceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.RewriteResponse) { + return mergeFrom((com.google.storage.v2.RewriteResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.RewriteResponse other) { + if (other == com.google.storage.v2.RewriteResponse.getDefaultInstance()) return this; + if (other.getTotalBytesRewritten() != 0L) { + setTotalBytesRewritten(other.getTotalBytesRewritten()); + } + if (other.getObjectSize() != 0L) { + setObjectSize(other.getObjectSize()); + } + if (other.getDone() != false) { + setDone(other.getDone()); + } + if (!other.getRewriteToken().isEmpty()) { + rewriteToken_ = other.rewriteToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasResource()) { + mergeResource(other.getResource()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + totalBytesRewritten_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + objectSize_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + done_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + rewriteToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetResourceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long totalBytesRewritten_; + + /** + * + * + *
+     * The total bytes written so far, which can be used to provide a waiting user
+     * with a progress indicator. This property is always present in the response.
+     * 
+ * + * int64 total_bytes_rewritten = 1; + * + * @return The totalBytesRewritten. + */ + @java.lang.Override + public long getTotalBytesRewritten() { + return totalBytesRewritten_; + } + + /** + * + * + *
+     * The total bytes written so far, which can be used to provide a waiting user
+     * with a progress indicator. This property is always present in the response.
+     * 
+ * + * int64 total_bytes_rewritten = 1; + * + * @param value The totalBytesRewritten to set. + * @return This builder for chaining. + */ + public Builder setTotalBytesRewritten(long value) { + + totalBytesRewritten_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The total bytes written so far, which can be used to provide a waiting user
+     * with a progress indicator. This property is always present in the response.
+     * 
+ * + * int64 total_bytes_rewritten = 1; + * + * @return This builder for chaining. + */ + public Builder clearTotalBytesRewritten() { + bitField0_ = (bitField0_ & ~0x00000001); + totalBytesRewritten_ = 0L; + onChanged(); + return this; + } + + private long objectSize_; + + /** + * + * + *
+     * The total size of the object being copied in bytes. This property is always
+     * present in the response.
+     * 
+ * + * int64 object_size = 2; + * + * @return The objectSize. + */ + @java.lang.Override + public long getObjectSize() { + return objectSize_; + } + + /** + * + * + *
+     * The total size of the object being copied in bytes. This property is always
+     * present in the response.
+     * 
+ * + * int64 object_size = 2; + * + * @param value The objectSize to set. + * @return This builder for chaining. + */ + public Builder setObjectSize(long value) { + + objectSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The total size of the object being copied in bytes. This property is always
+     * present in the response.
+     * 
+ * + * int64 object_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearObjectSize() { + bitField0_ = (bitField0_ & ~0x00000002); + objectSize_ = 0L; + onChanged(); + return this; + } + + private boolean done_; + + /** + * + * + *
+     * `true` if the copy is finished; otherwise, `false` if
+     * the copy is in progress. This property is always present in the response.
+     * 
+ * + * bool done = 3; + * + * @return The done. + */ + @java.lang.Override + public boolean getDone() { + return done_; + } + + /** + * + * + *
+     * `true` if the copy is finished; otherwise, `false` if
+     * the copy is in progress. This property is always present in the response.
+     * 
+ * + * bool done = 3; + * + * @param value The done to set. + * @return This builder for chaining. + */ + public Builder setDone(boolean value) { + + done_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * `true` if the copy is finished; otherwise, `false` if
+     * the copy is in progress. This property is always present in the response.
+     * 
+ * + * bool done = 3; + * + * @return This builder for chaining. + */ + public Builder clearDone() { + bitField0_ = (bitField0_ & ~0x00000004); + done_ = false; + onChanged(); + return this; + } + + private java.lang.Object rewriteToken_ = ""; + + /** + * + * + *
+     * A token to use in subsequent requests to continue copying data. This token
+     * is present in the response only when there is more data to copy.
+     * 
+ * + * string rewrite_token = 4; + * + * @return The rewriteToken. + */ + public java.lang.String getRewriteToken() { + java.lang.Object ref = rewriteToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rewriteToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * A token to use in subsequent requests to continue copying data. This token
+     * is present in the response only when there is more data to copy.
+     * 
+ * + * string rewrite_token = 4; + * + * @return The bytes for rewriteToken. + */ + public com.google.protobuf.ByteString getRewriteTokenBytes() { + java.lang.Object ref = rewriteToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rewriteToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * A token to use in subsequent requests to continue copying data. This token
+     * is present in the response only when there is more data to copy.
+     * 
+ * + * string rewrite_token = 4; + * + * @param value The rewriteToken to set. + * @return This builder for chaining. + */ + public Builder setRewriteToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rewriteToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * A token to use in subsequent requests to continue copying data. This token
+     * is present in the response only when there is more data to copy.
+     * 
+ * + * string rewrite_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearRewriteToken() { + rewriteToken_ = getDefaultInstance().getRewriteToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * A token to use in subsequent requests to continue copying data. This token
+     * is present in the response only when there is more data to copy.
+     * 
+ * + * string rewrite_token = 4; + * + * @param value The bytes for rewriteToken to set. + * @return This builder for chaining. + */ + public Builder setRewriteTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rewriteToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.storage.v2.Object resource_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + resourceBuilder_; + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return Whether the resource field is set. + */ + public boolean hasResource() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return The resource. + */ + public com.google.storage.v2.Object getResource() { + if (resourceBuilder_ == null) { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } else { + return resourceBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public Builder setResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + resource_ = value; + } else { + resourceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public Builder setResource(com.google.storage.v2.Object.Builder builderForValue) { + if (resourceBuilder_ == null) { + resource_ = builderForValue.build(); + } else { + resourceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public Builder mergeResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && resource_ != null + && resource_ != com.google.storage.v2.Object.getDefaultInstance()) { + getResourceBuilder().mergeFrom(value); + } else { + resource_ = value; + } + } else { + resourceBuilder_.mergeFrom(value); + } + if (resource_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public Builder clearResource() { + bitField0_ = (bitField0_ & ~0x00000010); + resource_ = null; + if (resourceBuilder_ != null) { + resourceBuilder_.dispose(); + resourceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public com.google.storage.v2.Object.Builder getResourceBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetResourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if (resourceBuilder_ != null) { + return resourceBuilder_.getMessageOrBuilder(); + } else { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + } + + /** + * + * + *
+     * A resource containing the metadata for the copied-to object. This property
+     * is present in the response only when copying completes.
+     * 
+ * + * .google.storage.v2.Object resource = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetResourceFieldBuilder() { + if (resourceBuilder_ == null) { + resourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getResource(), getParentForChildren(), isClean()); + resource_ = null; + } + return resourceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.RewriteResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.RewriteResponse) + private static final com.google.storage.v2.RewriteResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.RewriteResponse(); + } + + public static com.google.storage.v2.RewriteResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RewriteResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.RewriteResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponseOrBuilder.java new file mode 100644 index 000000000000..f4ec2c33098a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/RewriteResponseOrBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface RewriteResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.RewriteResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The total bytes written so far, which can be used to provide a waiting user
+   * with a progress indicator. This property is always present in the response.
+   * 
+ * + * int64 total_bytes_rewritten = 1; + * + * @return The totalBytesRewritten. + */ + long getTotalBytesRewritten(); + + /** + * + * + *
+   * The total size of the object being copied in bytes. This property is always
+   * present in the response.
+   * 
+ * + * int64 object_size = 2; + * + * @return The objectSize. + */ + long getObjectSize(); + + /** + * + * + *
+   * `true` if the copy is finished; otherwise, `false` if
+   * the copy is in progress. This property is always present in the response.
+   * 
+ * + * bool done = 3; + * + * @return The done. + */ + boolean getDone(); + + /** + * + * + *
+   * A token to use in subsequent requests to continue copying data. This token
+   * is present in the response only when there is more data to copy.
+   * 
+ * + * string rewrite_token = 4; + * + * @return The rewriteToken. + */ + java.lang.String getRewriteToken(); + + /** + * + * + *
+   * A token to use in subsequent requests to continue copying data. This token
+   * is present in the response only when there is more data to copy.
+   * 
+ * + * string rewrite_token = 4; + * + * @return The bytes for rewriteToken. + */ + com.google.protobuf.ByteString getRewriteTokenBytes(); + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return Whether the resource field is set. + */ + boolean hasResource(); + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + * + * @return The resource. + */ + com.google.storage.v2.Object getResource(); + + /** + * + * + *
+   * A resource containing the metadata for the copied-to object. This property
+   * is present in the response only when copying completes.
+   * 
+ * + * .google.storage.v2.Object resource = 5; + */ + com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstants.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstants.java new file mode 100644 index 000000000000..22163fa17760 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstants.java @@ -0,0 +1,973 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Shared constants.
+ * 
+ * + * Protobuf type {@code google.storage.v2.ServiceConstants} + */ +@com.google.protobuf.Generated +public final class ServiceConstants extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.ServiceConstants) + ServiceConstantsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ServiceConstants"); + } + + // Use ServiceConstants.newBuilder() to construct. + private ServiceConstants(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ServiceConstants() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ServiceConstants_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ServiceConstants_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ServiceConstants.class, + com.google.storage.v2.ServiceConstants.Builder.class); + } + + /** + * + * + *
+   * A collection of constant values meaningful to the Storage API.
+   * 
+ * + * Protobuf enum {@code google.storage.v2.ServiceConstants.Values} + */ + public enum Values implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unused. Proto3 requires first enum to be 0.
+     * 
+ * + * VALUES_UNSPECIFIED = 0; + */ + VALUES_UNSPECIFIED(0, 0), + /** + * + * + *
+     * The maximum size chunk that can be returned in a single
+     * `ReadRequest`.
+     * 2 MiB.
+     * 
+ * + * MAX_READ_CHUNK_BYTES = 2097152; + */ + MAX_READ_CHUNK_BYTES(1, 2097152), + /** + * + * + *
+     * The maximum size of an object in MB - whether written in a single stream
+     * or composed from multiple other objects.
+     * 5 TiB.
+     * 
+ * + * MAX_OBJECT_SIZE_MB = 5242880; + */ + MAX_OBJECT_SIZE_MB(3, 5242880), + /** + * + * + *
+     * The maximum length field name that can be sent in a single
+     * custom metadata field.
+     * 1 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_FIELD_NAME_BYTES = 1024; + */ + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES(4, 1024), + /** + * + * + *
+     * The maximum length field value that can be sent in a single
+     * custom_metadata field.
+     * 4 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES = 4096; + */ + MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES(5, 4096), + /** + * + * + *
+     * The maximum total bytes that can be populated into all field names and
+     * values of the custom_metadata for one object.
+     * 8 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES = 8192; + */ + MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES(6, 8192), + /** + * + * + *
+     * The maximum total bytes that can be populated into all bucket metadata
+     * fields.
+     * 20 KiB.
+     * 
+ * + * MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES = 20480; + */ + MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES(7, 20480), + /** + * + * + *
+     * The maximum number of NotificationConfigs that can be registered
+     * for a given bucket.
+     * 
+ * + * MAX_NOTIFICATION_CONFIGS_PER_BUCKET = 100; + */ + MAX_NOTIFICATION_CONFIGS_PER_BUCKET(8, 100), + /** + * + * + *
+     * The maximum number of custom attributes per NotificationConfigs.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTES = 5; + */ + MAX_NOTIFICATION_CUSTOM_ATTRIBUTES(10, 5), + /** + * + * + *
+     * The maximum length of a custom attribute key included in
+     * NotificationConfig.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH = 256; + */ + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH(11, 256), + /** + * + * + *
+     * The maximum number of key/value entries per bucket label.
+     * 
+ * + * MAX_LABELS_ENTRIES_COUNT = 64; + */ + MAX_LABELS_ENTRIES_COUNT(13, 64), + /** + * + * + *
+     * The maximum character length of the key or value in a bucket
+     * label map.
+     * 
+ * + * MAX_LABELS_KEY_VALUE_LENGTH = 63; + */ + MAX_LABELS_KEY_VALUE_LENGTH(14, 63), + /** + * + * + *
+     * The maximum byte size of the key or value in a bucket label
+     * map.
+     * 
+ * + * MAX_LABELS_KEY_VALUE_BYTES = 128; + */ + MAX_LABELS_KEY_VALUE_BYTES(15, 128), + /** + * + * + *
+     * The maximum number of object IDs that can be included in a
+     * DeleteObjectsRequest.
+     * 
+ * + * MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST = 1000; + */ + MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST(16, 1000), + /** + * + * + *
+     * The maximum number of days for which a token returned by the
+     * GetListObjectsSplitPoints RPC is valid.
+     * 
+ * + * SPLIT_TOKEN_MAX_VALID_DAYS = 14; + */ + SPLIT_TOKEN_MAX_VALID_DAYS(17, 14), + UNRECOGNIZED(-1, -1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Values"); + } + + /** + * + * + *
+     * The maximum size chunk that can be sent in a single WriteObjectRequest.
+     * 2 MiB.
+     * 
+ * + * MAX_WRITE_CHUNK_BYTES = 2097152; + */ + public static final Values MAX_WRITE_CHUNK_BYTES = MAX_READ_CHUNK_BYTES; + + /** + * + * + *
+     * The maximum number of LifecycleRules that can be registered for a given
+     * bucket.
+     * 
+ * + * MAX_LIFECYCLE_RULES_PER_BUCKET = 100; + */ + public static final Values MAX_LIFECYCLE_RULES_PER_BUCKET = MAX_NOTIFICATION_CONFIGS_PER_BUCKET; + + /** + * + * + *
+     * The maximum length of a custom attribute value included in a
+     * NotificationConfig.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = 1024; + */ + public static final Values MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES; + + /** + * + * + *
+     * Unused. Proto3 requires first enum to be 0.
+     * 
+ * + * VALUES_UNSPECIFIED = 0; + */ + public static final int VALUES_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * The maximum size chunk that can be returned in a single
+     * `ReadRequest`.
+     * 2 MiB.
+     * 
+ * + * MAX_READ_CHUNK_BYTES = 2097152; + */ + public static final int MAX_READ_CHUNK_BYTES_VALUE = 2097152; + + /** + * + * + *
+     * The maximum size chunk that can be sent in a single WriteObjectRequest.
+     * 2 MiB.
+     * 
+ * + * MAX_WRITE_CHUNK_BYTES = 2097152; + */ + public static final int MAX_WRITE_CHUNK_BYTES_VALUE = 2097152; + + /** + * + * + *
+     * The maximum size of an object in MB - whether written in a single stream
+     * or composed from multiple other objects.
+     * 5 TiB.
+     * 
+ * + * MAX_OBJECT_SIZE_MB = 5242880; + */ + public static final int MAX_OBJECT_SIZE_MB_VALUE = 5242880; + + /** + * + * + *
+     * The maximum length field name that can be sent in a single
+     * custom metadata field.
+     * 1 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_FIELD_NAME_BYTES = 1024; + */ + public static final int MAX_CUSTOM_METADATA_FIELD_NAME_BYTES_VALUE = 1024; + + /** + * + * + *
+     * The maximum length field value that can be sent in a single
+     * custom_metadata field.
+     * 4 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES = 4096; + */ + public static final int MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES_VALUE = 4096; + + /** + * + * + *
+     * The maximum total bytes that can be populated into all field names and
+     * values of the custom_metadata for one object.
+     * 8 KiB.
+     * 
+ * + * MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES = 8192; + */ + public static final int MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES_VALUE = 8192; + + /** + * + * + *
+     * The maximum total bytes that can be populated into all bucket metadata
+     * fields.
+     * 20 KiB.
+     * 
+ * + * MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES = 20480; + */ + public static final int MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES_VALUE = 20480; + + /** + * + * + *
+     * The maximum number of NotificationConfigs that can be registered
+     * for a given bucket.
+     * 
+ * + * MAX_NOTIFICATION_CONFIGS_PER_BUCKET = 100; + */ + public static final int MAX_NOTIFICATION_CONFIGS_PER_BUCKET_VALUE = 100; + + /** + * + * + *
+     * The maximum number of LifecycleRules that can be registered for a given
+     * bucket.
+     * 
+ * + * MAX_LIFECYCLE_RULES_PER_BUCKET = 100; + */ + public static final int MAX_LIFECYCLE_RULES_PER_BUCKET_VALUE = 100; + + /** + * + * + *
+     * The maximum number of custom attributes per NotificationConfigs.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTES = 5; + */ + public static final int MAX_NOTIFICATION_CUSTOM_ATTRIBUTES_VALUE = 5; + + /** + * + * + *
+     * The maximum length of a custom attribute key included in
+     * NotificationConfig.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH = 256; + */ + public static final int MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH_VALUE = 256; + + /** + * + * + *
+     * The maximum length of a custom attribute value included in a
+     * NotificationConfig.
+     * 
+ * + * MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = 1024; + */ + public static final int MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH_VALUE = 1024; + + /** + * + * + *
+     * The maximum number of key/value entries per bucket label.
+     * 
+ * + * MAX_LABELS_ENTRIES_COUNT = 64; + */ + public static final int MAX_LABELS_ENTRIES_COUNT_VALUE = 64; + + /** + * + * + *
+     * The maximum character length of the key or value in a bucket
+     * label map.
+     * 
+ * + * MAX_LABELS_KEY_VALUE_LENGTH = 63; + */ + public static final int MAX_LABELS_KEY_VALUE_LENGTH_VALUE = 63; + + /** + * + * + *
+     * The maximum byte size of the key or value in a bucket label
+     * map.
+     * 
+ * + * MAX_LABELS_KEY_VALUE_BYTES = 128; + */ + public static final int MAX_LABELS_KEY_VALUE_BYTES_VALUE = 128; + + /** + * + * + *
+     * The maximum number of object IDs that can be included in a
+     * DeleteObjectsRequest.
+     * 
+ * + * MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST = 1000; + */ + public static final int MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST_VALUE = 1000; + + /** + * + * + *
+     * The maximum number of days for which a token returned by the
+     * GetListObjectsSplitPoints RPC is valid.
+     * 
+ * + * SPLIT_TOKEN_MAX_VALID_DAYS = 14; + */ + public static final int SPLIT_TOKEN_MAX_VALID_DAYS_VALUE = 14; + + public final int getNumber() { + if (index == -1) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Values valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Values forNumber(int value) { + switch (value) { + case 0: + return VALUES_UNSPECIFIED; + case 2097152: + return MAX_READ_CHUNK_BYTES; + case 5242880: + return MAX_OBJECT_SIZE_MB; + case 1024: + return MAX_CUSTOM_METADATA_FIELD_NAME_BYTES; + case 4096: + return MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES; + case 8192: + return MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES; + case 20480: + return MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES; + case 100: + return MAX_NOTIFICATION_CONFIGS_PER_BUCKET; + case 5: + return MAX_NOTIFICATION_CUSTOM_ATTRIBUTES; + case 256: + return MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH; + case 64: + return MAX_LABELS_ENTRIES_COUNT; + case 63: + return MAX_LABELS_KEY_VALUE_LENGTH; + case 128: + return MAX_LABELS_KEY_VALUE_BYTES; + case 1000: + return MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST; + case 14: + return SPLIT_TOKEN_MAX_VALID_DAYS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Values findValueByNumber(int number) { + return Values.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (index == -1) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(index); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.storage.v2.ServiceConstants.getDescriptor().getEnumTypes().get(0); + } + + private static final Values[] VALUES = getStaticValuesArray(); + + private static Values[] getStaticValuesArray() { + return new Values[] { + VALUES_UNSPECIFIED, + MAX_READ_CHUNK_BYTES, + MAX_WRITE_CHUNK_BYTES, + MAX_OBJECT_SIZE_MB, + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES, + MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES, + MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES, + MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES, + MAX_NOTIFICATION_CONFIGS_PER_BUCKET, + MAX_LIFECYCLE_RULES_PER_BUCKET, + MAX_NOTIFICATION_CUSTOM_ATTRIBUTES, + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH, + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH, + MAX_LABELS_ENTRIES_COUNT, + MAX_LABELS_KEY_VALUE_LENGTH, + MAX_LABELS_KEY_VALUE_BYTES, + MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST, + SPLIT_TOKEN_MAX_VALID_DAYS, + }; + } + + public static Values valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Values(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.storage.v2.ServiceConstants.Values) + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.ServiceConstants)) { + return super.equals(obj); + } + com.google.storage.v2.ServiceConstants other = (com.google.storage.v2.ServiceConstants) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.ServiceConstants parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ServiceConstants parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.ServiceConstants parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ServiceConstants parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ServiceConstants parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.ServiceConstants parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.ServiceConstants prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Shared constants.
+   * 
+ * + * Protobuf type {@code google.storage.v2.ServiceConstants} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.ServiceConstants) + com.google.storage.v2.ServiceConstantsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ServiceConstants_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ServiceConstants_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.ServiceConstants.class, + com.google.storage.v2.ServiceConstants.Builder.class); + } + + // Construct using com.google.storage.v2.ServiceConstants.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_ServiceConstants_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.ServiceConstants getDefaultInstanceForType() { + return com.google.storage.v2.ServiceConstants.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.ServiceConstants build() { + com.google.storage.v2.ServiceConstants result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.ServiceConstants buildPartial() { + com.google.storage.v2.ServiceConstants result = + new com.google.storage.v2.ServiceConstants(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.ServiceConstants) { + return mergeFrom((com.google.storage.v2.ServiceConstants) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.ServiceConstants other) { + if (other == com.google.storage.v2.ServiceConstants.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.ServiceConstants) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.ServiceConstants) + private static final com.google.storage.v2.ServiceConstants DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.ServiceConstants(); + } + + public static com.google.storage.v2.ServiceConstants getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ServiceConstants parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.ServiceConstants getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstantsOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstantsOrBuilder.java new file mode 100644 index 000000000000..19d81ed9d99b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/ServiceConstantsOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface ServiceConstantsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.ServiceConstants) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequest.java new file mode 100644 index 000000000000..58b1ff06d07e --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequest.java @@ -0,0 +1,1412 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for
+ * [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite].
+ * 
+ * + * Protobuf type {@code google.storage.v2.StartResumableWriteRequest} + */ +@com.google.protobuf.Generated +public final class StartResumableWriteRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.StartResumableWriteRequest) + StartResumableWriteRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StartResumableWriteRequest"); + } + + // Use StartResumableWriteRequest.newBuilder() to construct. + private StartResumableWriteRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StartResumableWriteRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.StartResumableWriteRequest.class, + com.google.storage.v2.StartResumableWriteRequest.Builder.class); + } + + private int bitField0_; + public static final int WRITE_OBJECT_SPEC_FIELD_NUMBER = 1; + private com.google.storage.v2.WriteObjectSpec writeObjectSpec_; + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeObjectSpec field is set. + */ + @java.lang.Override + public boolean hasWriteObjectSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + return writeObjectSpec_ == null + ? com.google.storage.v2.WriteObjectSpec.getDefaultInstance() + : writeObjectSpec_; + } + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + return writeObjectSpec_ == null + ? com.google.storage.v2.WriteObjectSpec.getDefaultInstance() + : writeObjectSpec_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 3; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 5; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getWriteObjectSpec()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getObjectChecksums()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriteObjectSpec()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, getCommonObjectRequestParams()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getObjectChecksums()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.StartResumableWriteRequest)) { + return super.equals(obj); + } + com.google.storage.v2.StartResumableWriteRequest other = + (com.google.storage.v2.StartResumableWriteRequest) obj; + + if (hasWriteObjectSpec() != other.hasWriteObjectSpec()) return false; + if (hasWriteObjectSpec()) { + if (!getWriteObjectSpec().equals(other.getWriteObjectSpec())) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriteObjectSpec()) { + hash = (37 * hash) + WRITE_OBJECT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getWriteObjectSpec().hashCode(); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.StartResumableWriteRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for
+   * [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite].
+   * 
+ * + * Protobuf type {@code google.storage.v2.StartResumableWriteRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.StartResumableWriteRequest) + com.google.storage.v2.StartResumableWriteRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.StartResumableWriteRequest.class, + com.google.storage.v2.StartResumableWriteRequest.Builder.class); + } + + // Construct using com.google.storage.v2.StartResumableWriteRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetWriteObjectSpecFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + internalGetObjectChecksumsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writeObjectSpec_ = null; + if (writeObjectSpecBuilder_ != null) { + writeObjectSpecBuilder_.dispose(); + writeObjectSpecBuilder_ = null; + } + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteRequest getDefaultInstanceForType() { + return com.google.storage.v2.StartResumableWriteRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteRequest build() { + com.google.storage.v2.StartResumableWriteRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteRequest buildPartial() { + com.google.storage.v2.StartResumableWriteRequest result = + new com.google.storage.v2.StartResumableWriteRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.StartResumableWriteRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writeObjectSpec_ = + writeObjectSpecBuilder_ == null ? writeObjectSpec_ : writeObjectSpecBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.StartResumableWriteRequest) { + return mergeFrom((com.google.storage.v2.StartResumableWriteRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.StartResumableWriteRequest other) { + if (other == com.google.storage.v2.StartResumableWriteRequest.getDefaultInstance()) + return this; + if (other.hasWriteObjectSpec()) { + mergeWriteObjectSpec(other.getWriteObjectSpec()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetWriteObjectSpecFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 26 + case 42: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.WriteObjectSpec writeObjectSpec_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + writeObjectSpecBuilder_; + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeObjectSpec field is set. + */ + public boolean hasWriteObjectSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeObjectSpec. + */ + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + if (writeObjectSpecBuilder_ == null) { + return writeObjectSpec_ == null + ? com.google.storage.v2.WriteObjectSpec.getDefaultInstance() + : writeObjectSpec_; + } else { + return writeObjectSpecBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeObjectSpec_ = value; + } else { + writeObjectSpecBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteObjectSpec( + com.google.storage.v2.WriteObjectSpec.Builder builderForValue) { + if (writeObjectSpecBuilder_ == null) { + writeObjectSpec_ = builderForValue.build(); + } else { + writeObjectSpecBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && writeObjectSpec_ != null + && writeObjectSpec_ != com.google.storage.v2.WriteObjectSpec.getDefaultInstance()) { + getWriteObjectSpecBuilder().mergeFrom(value); + } else { + writeObjectSpec_ = value; + } + } else { + writeObjectSpecBuilder_.mergeFrom(value); + } + if (writeObjectSpec_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearWriteObjectSpec() { + bitField0_ = (bitField0_ & ~0x00000001); + writeObjectSpec_ = null; + if (writeObjectSpecBuilder_ != null) { + writeObjectSpecBuilder_.dispose(); + writeObjectSpecBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.WriteObjectSpec.Builder getWriteObjectSpecBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetWriteObjectSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + if (writeObjectSpecBuilder_ != null) { + return writeObjectSpecBuilder_.getMessageOrBuilder(); + } else { + return writeObjectSpec_ == null + ? com.google.storage.v2.WriteObjectSpec.getDefaultInstance() + : writeObjectSpec_; + } + } + + /** + * + * + *
+     * Required. Contains the information necessary to start a resumable write.
+     * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + internalGetWriteObjectSpecFieldBuilder() { + if (writeObjectSpecBuilder_ == null) { + writeObjectSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder>( + getWriteObjectSpec(), getParentForChildren(), isClean()); + writeObjectSpec_ = null; + } + return writeObjectSpecBuilder_; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000002); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests related to an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00000004); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * Optional. The checksums of the complete object. This is used to validate
+     * the uploaded object. For each upload, `object_checksums` can be provided
+     * when initiating a resumable upload with`StartResumableWriteRequest` or when
+     * completing a write with `WriteObjectRequest` with
+     * `finish_write` set to `true`.
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.StartResumableWriteRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.StartResumableWriteRequest) + private static final com.google.storage.v2.StartResumableWriteRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.StartResumableWriteRequest(); + } + + public static com.google.storage.v2.StartResumableWriteRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StartResumableWriteRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequestOrBuilder.java new file mode 100644 index 000000000000..300dec75c447 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteRequestOrBuilder.java @@ -0,0 +1,172 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface StartResumableWriteRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.StartResumableWriteRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeObjectSpec field is set. + */ + boolean hasWriteObjectSpec(); + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeObjectSpec. + */ + com.google.storage.v2.WriteObjectSpec getWriteObjectSpec(); + + /** + * + * + *
+   * Required. Contains the information necessary to start a resumable write.
+   * 
+ * + * + * .google.storage.v2.WriteObjectSpec write_object_spec = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests related to an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * Optional. The checksums of the complete object. This is used to validate
+   * the uploaded object. For each upload, `object_checksums` can be provided
+   * when initiating a resumable upload with`StartResumableWriteRequest` or when
+   * completing a write with `WriteObjectRequest` with
+   * `finish_write` set to `true`.
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponse.java new file mode 100644 index 000000000000..0a5c09d53e8b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponse.java @@ -0,0 +1,626 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response object for
+ * [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite].
+ * 
+ * + * Protobuf type {@code google.storage.v2.StartResumableWriteResponse} + */ +@com.google.protobuf.Generated +public final class StartResumableWriteResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.StartResumableWriteResponse) + StartResumableWriteResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StartResumableWriteResponse"); + } + + // Use StartResumableWriteResponse.newBuilder() to construct. + private StartResumableWriteResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StartResumableWriteResponse() { + uploadId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.StartResumableWriteResponse.class, + com.google.storage.v2.StartResumableWriteResponse.Builder.class); + } + + public static final int UPLOAD_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object uploadId_ = ""; + + /** + * + * + *
+   * A unique identifier for the initiated resumable write operation.
+   * As the ID grants write access, you should keep it confidential during
+   * the upload to prevent unauthorized access and data tampering during your
+   * upload. This ID should be included in subsequent `WriteObject` requests to
+   * upload the object data.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + @java.lang.Override + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } + } + + /** + * + * + *
+   * A unique identifier for the initiated resumable write operation.
+   * As the ID grants write access, you should keep it confidential during
+   * the upload to prevent unauthorized access and data tampering during your
+   * upload. This ID should be included in subsequent `WriteObject` requests to
+   * upload the object data.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, uploadId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(uploadId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, uploadId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.StartResumableWriteResponse)) { + return super.equals(obj); + } + com.google.storage.v2.StartResumableWriteResponse other = + (com.google.storage.v2.StartResumableWriteResponse) obj; + + if (!getUploadId().equals(other.getUploadId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + UPLOAD_ID_FIELD_NUMBER; + hash = (53 * hash) + getUploadId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.StartResumableWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.StartResumableWriteResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response object for
+   * [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite].
+   * 
+ * + * Protobuf type {@code google.storage.v2.StartResumableWriteResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.StartResumableWriteResponse) + com.google.storage.v2.StartResumableWriteResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.StartResumableWriteResponse.class, + com.google.storage.v2.StartResumableWriteResponse.Builder.class); + } + + // Construct using com.google.storage.v2.StartResumableWriteResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + uploadId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_StartResumableWriteResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteResponse getDefaultInstanceForType() { + return com.google.storage.v2.StartResumableWriteResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteResponse build() { + com.google.storage.v2.StartResumableWriteResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteResponse buildPartial() { + com.google.storage.v2.StartResumableWriteResponse result = + new com.google.storage.v2.StartResumableWriteResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.StartResumableWriteResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.uploadId_ = uploadId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.StartResumableWriteResponse) { + return mergeFrom((com.google.storage.v2.StartResumableWriteResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.StartResumableWriteResponse other) { + if (other == com.google.storage.v2.StartResumableWriteResponse.getDefaultInstance()) + return this; + if (!other.getUploadId().isEmpty()) { + uploadId_ = other.uploadId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + uploadId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object uploadId_ = ""; + + /** + * + * + *
+     * A unique identifier for the initiated resumable write operation.
+     * As the ID grants write access, you should keep it confidential during
+     * the upload to prevent unauthorized access and data tampering during your
+     * upload. This ID should be included in subsequent `WriteObject` requests to
+     * upload the object data.
+     * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + public java.lang.String getUploadId() { + java.lang.Object ref = uploadId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uploadId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * A unique identifier for the initiated resumable write operation.
+     * As the ID grants write access, you should keep it confidential during
+     * the upload to prevent unauthorized access and data tampering during your
+     * upload. This ID should be included in subsequent `WriteObject` requests to
+     * upload the object data.
+     * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = uploadId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uploadId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * A unique identifier for the initiated resumable write operation.
+     * As the ID grants write access, you should keep it confidential during
+     * the upload to prevent unauthorized access and data tampering during your
+     * upload. This ID should be included in subsequent `WriteObject` requests to
+     * upload the object data.
+     * 
+ * + * string upload_id = 1; + * + * @param value The uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A unique identifier for the initiated resumable write operation.
+     * As the ID grants write access, you should keep it confidential during
+     * the upload to prevent unauthorized access and data tampering during your
+     * upload. This ID should be included in subsequent `WriteObject` requests to
+     * upload the object data.
+     * 
+ * + * string upload_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUploadId() { + uploadId_ = getDefaultInstance().getUploadId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * A unique identifier for the initiated resumable write operation.
+     * As the ID grants write access, you should keep it confidential during
+     * the upload to prevent unauthorized access and data tampering during your
+     * upload. This ID should be included in subsequent `WriteObject` requests to
+     * upload the object data.
+     * 
+ * + * string upload_id = 1; + * + * @param value The bytes for uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + uploadId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.StartResumableWriteResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.StartResumableWriteResponse) + private static final com.google.storage.v2.StartResumableWriteResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.StartResumableWriteResponse(); + } + + public static com.google.storage.v2.StartResumableWriteResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StartResumableWriteResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.StartResumableWriteResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponseOrBuilder.java new file mode 100644 index 000000000000..8c5863ed53cf --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StartResumableWriteResponseOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface StartResumableWriteResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.StartResumableWriteResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A unique identifier for the initiated resumable write operation.
+   * As the ID grants write access, you should keep it confidential during
+   * the upload to prevent unauthorized access and data tampering during your
+   * upload. This ID should be included in subsequent `WriteObject` requests to
+   * upload the object data.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + java.lang.String getUploadId(); + + /** + * + * + *
+   * A unique identifier for the initiated resumable write operation.
+   * As the ID grants write access, you should keep it confidential during
+   * the upload to prevent unauthorized access and data tampering during your
+   * upload. This ID should be included in subsequent `WriteObject` requests to
+   * upload the object data.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + com.google.protobuf.ByteString getUploadIdBytes(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageProto.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageProto.java new file mode 100644 index 000000000000..161be5a97e40 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/StorageProto.java @@ -0,0 +1,2215 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public final class StorageProto extends com.google.protobuf.GeneratedFile { + private StorageProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StorageProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_DeleteBucketRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_DeleteBucketRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_GetBucketRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_GetBucketRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_CreateBucketRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_CreateBucketRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ListBucketsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ListBucketsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ListBucketsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ListBucketsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_UpdateBucketRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_UpdateBucketRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ComposeObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ComposeObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_DeleteObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_DeleteObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_RestoreObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_RestoreObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_CancelResumableWriteRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_CancelResumableWriteResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ReadObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ReadObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_GetObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_GetObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ReadObjectResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ReadObjectResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadObjectSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadObjectSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadObjectResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadObjectResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadObjectRedirectedError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiWriteObjectRedirectedError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadObjectError_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadObjectError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ReadRangeError_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ReadRangeError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ReadRange_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ReadRange_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectRangeData_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectRangeData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiReadHandle_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiReadHandle_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiWriteHandle_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiWriteHandle_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_WriteObjectSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_WriteObjectSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_WriteObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_WriteObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_WriteObjectResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_WriteObjectResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_AppendObjectSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_AppendObjectSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiWriteObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BidiWriteObjectResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ListObjectsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ListObjectsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_QueryWriteStatusRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_QueryWriteStatusResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_RewriteObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_RewriteObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_RewriteResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_RewriteResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_MoveObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_MoveObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_StartResumableWriteRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_StartResumableWriteRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_StartResumableWriteResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_StartResumableWriteResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_UpdateObjectRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_UpdateObjectRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_CommonObjectRequestParams_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_CommonObjectRequestParams_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ServiceConstants_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ServiceConstants_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Billing_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Billing_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Cors_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Cors_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Encryption_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Encryption_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_IamConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_IamConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Lifecycle_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Lifecycle_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Logging_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Logging_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_ObjectRetention_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_RetentionPolicy_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_SoftDeletePolicy_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Versioning_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Versioning_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Website_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Website_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_CustomPlacementConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_Autoclass_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_Autoclass_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_IpFilter_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_IpFilter_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_HierarchicalNamespace_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Bucket_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Bucket_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_BucketAccessControl_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_BucketAccessControl_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ChecksummedData_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ChecksummedData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectChecksums_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectChecksums_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectCustomContextPayload_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectContexts_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectContexts_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectContexts_CustomEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectContexts_CustomEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_CustomerEncryption_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_CustomerEncryption_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Object_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Object_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Object_Retention_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Object_Retention_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Object_MetadataEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Object_MetadataEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ObjectAccessControl_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ObjectAccessControl_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ListObjectsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ListObjectsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ProjectTeam_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ProjectTeam_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_Owner_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_Owner_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_storage_v2_ContentRange_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_storage_v2_ContentRange_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "\037google/storage/v2/storage.proto\022\021googl" + + "e.storage.v2\032\027google/api/client.proto\032\037g" + + "oogle/api/field_behavior.proto\032\031google/a" + + "pi/resource.proto\032\030google/api/routing.pr" + + "oto\032\036google/iam/v1/iam_policy.proto\032\032goo" + + "gle/iam/v1/policy.proto\032\036google/protobuf" + + "/duration.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\037" + + "google/protobuf/timestamp.proto\032\027google/" + + "rpc/status.proto\032\026google/type/date.proto\"\326\001\n" + + "\023DeleteBucketRequest\0223\n" + + "\004name\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022$\n" + + "\027if_metageneration_match\030\002 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\003 \001(\003H\001\210\001\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\225\002\n" + + "\020GetBucketRequest\0223\n" + + "\004name\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022$\n" + + "\027if_metageneration_match\030\002 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\003 \001(\003H\001\210\001\001\0222\n" + + "\tread_mask\030\005 \001(\0132\032.google.protobuf.FieldMaskH\002\210\001\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\014\n\n" + + "_read_mask\"\203\002\n" + + "\023CreateBucketRequest\0225\n" + + "\006parent\030\001 \001(" + + "\tB%\340A\002\372A\037\022\035storage.googleapis.com/Bucket\022.\n" + + "\006bucket\030\002 \001(\0132\031.google.storage.v2.BucketB\003\340A\001\022\026\n" + + "\tbucket_id\030\003 \001(\tB\003\340A\002\022\033\n" + + "\016predefined_acl\030\006 \001(\tB\003\340A\001\022*\n" + + "\035predefined_default_object_acl\030\007 \001(\tB\003\340A\001\022$\n" + + "\027enable_object_retention\030\t \001(\010B\003\340A\001\"\370\001\n" + + "\022ListBucketsRequest\0225\n" + + "\006parent\030\001 \001(" + + "\tB%\340A\002\372A\037\022\035storage.googleapis.com/Bucket\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\022\023\n" + + "\006prefix\030\004 \001(\tB\003\340A\001\0222\n" + + "\tread_mask\030\005 \001(\0132\032.google.protobuf.FieldMaskH\000\210\001\001\022#\n" + + "\026return_partial_success\030\t \001(\010B\003\340A\001B\014\n\n" + + "_read_mask\"o\n" + + "\023ListBucketsResponse\022*\n" + + "\007buckets\030\001 \003(\0132\031.google.storage.v2.Bucket\022\027\n" + + "\017next_page_token\030\002 \001(\t\022\023\n" + + "\013unreachable\030\003 \003(\t\"\177\n" + + " LockBucketRetentionPolicyRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022$\n" + + "\027if_metageneration_match\030\002 \001(\003B\003\340A\002\"\320\002\n" + + "\023UpdateBucketRequest\022.\n" + + "\006bucket\030\001 \001(\0132\031.google.storage.v2.BucketB\003\340A\002\022$\n" + + "\027if_metageneration_match\030\002 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\003 \001(\003H\001\210\001\001\022\033\n" + + "\016predefined_acl\030\010 \001(\tB\003\340A\001\022*\n" + + "\035predefined_default_object_acl\030\t \001(\tB\003\340A\001\0224\n" + + "\013update_mask\030\006 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\324\006\n" + + "\024ComposeObjectRequest\0223\n" + + "\013destination\030\001 \001(\0132\031.google.storage.v2.ObjectB\003\340A\002\022Q\n" + + "\016source_objects\030\002" + + " \003(\01324.google.storage.v2.ComposeObjectRequest.SourceObjectB\003\340A\001\022\'\n" + + "\032destination_predefined_acl\030\t \001(\tB\003\340A\001\022 \n" + + "\023if_generation_match\030\004 \001(\003H\000\210\001\001\022$\n" + + "\027if_metageneration_match\030\005 \001(\003H\001\210\001\001\022:\n" + + "\007kms_key\030\006 \001(\tB)\340A\001\372A#\n" + + "!cloudkms.googleapis.com/CryptoKey\022W\n" + + "\034common_object_request_params\030\007 " + + "\001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\022A\n" + + "\020object_checksums\030\n" + + " \001(\0132\".google.storage.v2.ObjectChecksumsB\003\340A\001\022\"\n" + + "\025delete_source_objects\030\013 \001(\010H\002\210\001\001\032\370\001\n" + + "\014SourceObject\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\002 \001(\003B\003\340A\001\022k\n" + + "\024object_preconditions\030\003 \001(\0132H.google.storage.v2.ComposeObje" + + "ctRequest.SourceObject.ObjectPreconditionsB\003\340A\001\032O\n" + + "\023ObjectPreconditions\022 \n" + + "\023if_generation_match\030\001 \001(\003H\000\210\001\001B\026\n" + + "\024_if_generation_matchB\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_metageneration_matchB\030\n" + + "\026_delete_source_objects\"\333\003\n" + + "\023DeleteObjectRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\004 \001(\003B\003\340A\001\022 \n" + + "\023if_generation_match\030\005 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\006 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\007 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\010 \001(\003H\003\210\001\001\022W\n" + + "\034common_object_request_params\030\n" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\252\004\n" + + "\024RestoreObjectRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\002\022\032\n\r" + + "restore_token\030\013 \001(\tB\003\340A\001\022 \n" + + "\023if_generation_match\030\004 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\005 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\006 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\007 \001(\003H\003\210\001\001\022\034\n" + + "\017copy_source_acl\030\t \001(\010H\004\210\001\001\022W\n" + + "\034common_object_request_params\030\010" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\022\n" + + "\020_copy_source_acl\"5\n" + + "\033CancelResumableWriteRequest\022\026\n" + + "\tupload_id\030\001 \001(\tB\003\340A\002\"\036\n" + + "\034CancelResumableWriteResponse\"\316\004\n" + + "\021ReadObjectRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\001\022\030\n" + + "\013read_offset\030\004 \001(\003B\003\340A\001\022\027\n" + + "\n" + + "read_limit\030\005 \001(\003B\003\340A\001\022 \n" + + "\023if_generation_match\030\006 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\007 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\010 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\t \001(\003H\003\210\001\001\022W\n" + + "\034common_object_request_params\030\n" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\0222\n" + + "\tread_mask\030\014 \001(\0132\032.google.protobuf.FieldMaskH\004\210\001\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\014\n\n" + + "_read_mask\"\342\004\n" + + "\020GetObjectRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\001\022\031\n" + + "\014soft_deleted\030\013 \001(\010H\000\210\001\001\022 \n" + + "\023if_generation_match\030\004 \001(\003H\001\210\001\001\022$\n" + + "\027if_generation_not_match\030\005 \001(\003H\002\210\001\001\022$\n" + + "\027if_metageneration_match\030\006 \001(\003H\003\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\007 \001(\003H\004\210\001\001\022W\n" + + "\034common_object_request_params\030\010" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\0222\n" + + "\tread_mask\030\n" + + " \001(\0132\032.google.protobuf.FieldMaskH\005\210\001\001\022\032\n\r" + + "restore_token\030\014 \001(\tB\003\340A\001B\017\n\r" + + "_soft_deletedB\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\014\n\n" + + "_read_mask\"\365\001\n" + + "\022ReadObjectResponse\022<\n" + + "\020checksummed_data\030\001 \001(\0132\".google.storage.v2.ChecksummedData\022<\n" + + "\020object_checksums\030\002 \001(\0132\".google.storage.v2.ObjectChecksums\0226\n\r" + + "content_range\030\003 \001(\0132\037.google.storage.v2.ContentRange\022+\n" + + "\010metadata\030\004 \001(\0132\031.google.storage.v2.Object\"\233\005\n" + + "\022BidiReadObjectSpec\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\001\022 \n" + + "\023if_generation_match\030\004 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\005 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\006 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\007 \001(\003H\003\210\001\001\022W\n" + + "\034common_object_request_params\030\010" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\0226\n" + + "\tread_mask\030\014" + + " \001(\0132\032.google.protobuf.FieldMaskB\002\030\001H\004\210\001\001\022;\n" + + "\013read_handle\030\r" + + " \001(\0132!.google.storage.v2.BidiReadHandleH\005\210\001\001\022\032\n\r" + + "routing_token\030\016 \001(\tH\006\210\001\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\014\n\n" + + "_read_maskB\016\n" + + "\014_read_handleB\020\n" + + "\016_routing_token\"\225\001\n" + + "\025BidiReadObjectRequest\022D\n" + + "\020read_object_spec\030\001" + + " \001(\0132%.google.storage.v2.BidiReadObjectSpecB\003\340A\001\0226\n" + + "\013read_ranges\030\010" + + " \003(\0132\034.google.storage.v2.ReadRangeB\003\340A\001\"\275\001\n" + + "\026BidiReadObjectResponse\022>\n" + + "\022object_data_ranges\030\006 \003(\0132\".google.storage.v2.ObjectRangeData\022+\n" + + "\010metadata\030\004 \001(\0132\031.google.storage.v2.Object\0226\n" + + "\013read_handle\030\007 \001(\0132!.google.storage.v2.BidiReadHandle\"\205\001\n" + + "\035BidiReadObjectRedirectedError\0226\n" + + "\013read_handle\030\001 \001(\0132!.google.storage.v2.BidiReadHandle\022\032\n\r" + + "routing_token\030\002 \001(\tH\000\210\001\001B\020\n" + + "\016_routing_token\"\306\001\n" + + "\036BidiWriteObjectRedirectedError\022\032\n\r" + + "routing_token\030\001 \001(\tH\000\210\001\001\022=\n" + + "\014write_handle\030\002" + + " \001(\0132\".google.storage.v2.BidiWriteHandleH\001\210\001\001\022\027\n\n" + + "generation\030\003 \001(\003H\002\210\001\001B\020\n" + + "\016_routing_tokenB\017\n\r" + + "_write_handleB\r" + + "\n" + + "\013_generation\"S\n" + + "\023BidiReadObjectError\022<\n" + + "\021read_range_errors\030\001 \003(\0132!.google.storage.v2.ReadRangeError\"E\n" + + "\016ReadRangeError\022\017\n" + + "\007read_id\030\001 \001(\003\022\"\n" + + "\006status\030\002 \001(\0132\022.google.rpc.Status\"U\n" + + "\tReadRange\022\030\n" + + "\013read_offset\030\001 \001(\003B\003\340A\002\022\030\n" + + "\013read_length\030\002 \001(\003B\003\340A\001\022\024\n" + + "\007read_id\030\003 \001(\003B\003\340A\002\"\224\001\n" + + "\017ObjectRangeData\022<\n" + + "\020checksummed_data\030\001 \001(\0132\".google.storage.v2.ChecksummedData\0220\n\n" + + "read_range\030\002 \001(\0132\034.google.storage.v2.ReadRange\022\021\n" + + "\trange_end\030\003 \001(\010\"%\n" + + "\016BidiReadHandle\022\023\n" + + "\006handle\030\001 \001(\014B\003\340A\002\"&\n" + + "\017BidiWriteHandle\022\023\n" + + "\006handle\030\001 \001(\014B\003\340A\002\"\272\003\n" + + "\017WriteObjectSpec\0220\n" + + "\010resource\030\001 \001(\0132\031.google.storage.v2.ObjectB\003\340A\002\022\033\n" + + "\016predefined_acl\030\007 \001(\tB\003\340A\001\022 \n" + + "\023if_generation_match\030\003 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\004 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\005 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\006 \001(\003H\003\210\001\001\022\030\n" + + "\013object_size\030\010 \001(\003H\004\210\001\001\022\027\n\n" + + "appendable\030\t \001(\010H\005\210\001\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\016\n" + + "\014_object_sizeB\r\n" + + "\013_appendable\"\225\003\n" + + "\022WriteObjectRequest\022\023\n" + + "\tupload_id\030\001 \001(\tH\000\022?\n" + + "\021write_object_spec\030\002 \001" + + "(\0132\".google.storage.v2.WriteObjectSpecH\000\022\031\n" + + "\014write_offset\030\003 \001(\003B\003\340A\002\022>\n" + + "\020checksummed_data\030\004" + + " \001(\0132\".google.storage.v2.ChecksummedDataH\001\022A\n" + + "\020object_checksums\030\006 \001(\0132\"." + + "google.storage.v2.ObjectChecksumsB\003\340A\001\022\031\n" + + "\014finish_write\030\007 \001(\010B\003\340A\001\022W\n" + + "\034common_object_request_params\030\010" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001B\017\n\r" + + "first_messageB\006\n" + + "\004data\"n\n" + + "\023WriteObjectResponse\022\030\n" + + "\016persisted_size\030\001 \001(\003H\000\022-\n" + + "\010resource\030\002 \001(\0132\031.google.storage.v2.ObjectH\000B\016\n" + + "\014write_status\"\201\003\n" + + "\020AppendObjectSpec\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\023\n" + + "\006object\030\002 \001(\tB\003\340A\002\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\002\022$\n" + + "\027if_metageneration_match\030\004 \001(\003H\000\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\005 \001(\003H\001\210\001\001\022\032\n\r" + + "routing_token\030\006 \001(\tH\002\210\001\001\022=\n" + + "\014write_handle\030\007" + + " \001(\0132\".google.storage.v2.BidiWriteHandleH\003\210\001\001B\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\020\n" + + "\016_routing_tokenB\017\n\r" + + "_write_handle\"\213\004\n" + + "\026BidiWriteObjectRequest\022\023\n" + + "\tupload_id\030\001 \001(\tH\000\022?\n" + + "\021write_object_spec\030\002" + + " \001(\0132\".google.storage.v2.WriteObjectSpecH\000\022A\n" + + "\022append_object_spec\030\013" + + " \001(\0132#.google.storage.v2.AppendObjectSpecH\000\022\031\n" + + "\014write_offset\030\003 \001(\003B\003\340A\002\022>\n" + + "\020checksummed_data\030\004" + + " \001(\0132\".google.storage.v2.ChecksummedDataH\001\022A\n" + + "\020object_checksums\030\006" + + " \001(\0132\".google.storage.v2.ObjectChecksumsB\003\340A\001\022\031\n" + + "\014state_lookup\030\007 \001(\010B\003\340A\001\022\022\n" + + "\005flush\030\010 \001(\010B\003\340A\001\022\031\n" + + "\014finish_write\030\t \001(\010B\003\340A\001\022W\n" + + "\034common_object_request_params\030\n" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001B\017\n\r" + + "first_messageB\006\n" + + "\004data\"\302\001\n" + + "\027BidiWriteObjectResponse\022\030\n" + + "\016persisted_size\030\001 \001(\003H\000\022-\n" + + "\010resource\030\002 \001(\0132\031.google.storage.v2.ObjectH\000\022=\n" + + "\014write_handle\030\003" + + " \001(\0132\".google.storage.v2.BidiWriteHandleH\001\210\001\001B\016\n" + + "\014write_statusB\017\n\r" + + "_write_handle\"\340\003\n" + + "\022ListObjectsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\022\026\n" + + "\tdelimiter\030\004 \001(\tB\003\340A\001\022\'\n" + + "\032include_trailing_delimiter\030\005 \001(\010B\003\340A\001\022\023\n" + + "\006prefix\030\006 \001(\tB\003\340A\001\022\025\n" + + "\010versions\030\007 \001(\010B\003\340A\001\0222\n" + + "\tread_mask\030\010 \001(\0132\032.google.protobuf.FieldMaskH\000\210\001\001\022 \n" + + "\023lexicographic_start\030\n" + + " \001(\tB\003\340A\001\022\036\n" + + "\021lexicographic_end\030\013 \001(\tB\003\340A\001\022\031\n" + + "\014soft_deleted\030\014 \001(\010B\003\340A\001\022(\n" + + "\033include_folders_as_prefixes\030\r" + + " \001(\010B\003\340A\001\022\027\n\n" + + "match_glob\030\016 \001(\tB\003\340A\001\022\023\n" + + "\006filter\030\017 \001(\tB\003\340A\001B\014\n\n" + + "_read_mask\"\212\001\n" + + "\027QueryWriteStatusRequest\022\026\n" + + "\tupload_id\030\001 \001(\tB\003\340A\002\022W\n" + + "\034common_object_request_params\030\002" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\"s\n" + + "\030QueryWriteStatusResponse\022\030\n" + + "\016persisted_size\030\001 \001(\003H\000\022-\n" + + "\010resource\030\002 \001(\0132\031.google.storage.v2.ObjectH\000B\016\n" + + "\014write_status\"\335\n\n" + + "\024RewriteObjectRequest\022 \n" + + "\020destination_name\030\030 \001(\tB\006\340A\002\340A\005\022D\n" + + "\022destination_bucket\030\031 \001(\tB(\340A\002\340A\005\372A\037\n" + + "\035storage.googleapis.com/Bucket\022F\n" + + "\023destination_kms_key\030\033 \001(\tB)\340A\001\372A#\n" + + "!cloudkms.googleapis.com/CryptoKey\0223\n" + + "\013destination\030\001 \001(\0132\031.google.storage.v2.ObjectB\003\340A\001\022<\n" + + "\r" + + "source_bucket\030\002 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\032\n\r" + + "source_object\030\003 \001(\tB\003\340A\002\022\036\n" + + "\021source_generation\030\004 \001(\003B\003\340A\001\022\032\n\r" + + "rewrite_token\030\005 \001(\tB\003\340A\001\022\'\n" + + "\032destination_predefined_acl\030\034 \001(\tB\003\340A\001\022 \n" + + "\023if_generation_match\030\007 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\010 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\t \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\n" + + " \001(\003H\003\210\001\001\022\'\n" + + "\032if_source_generation_match\030\013 \001(\003H\004\210\001\001\022+\n" + + "\036if_source_generation_not_match\030\014 \001(\003H\005\210\001\001\022+\n" + + "\036if_source_metageneration_match\030\r" + + " \001(\003H\006\210\001\001\022/\n" + + "\"if_source_metageneration_not_match\030\016 \001(\003H\007\210\001\001\022)\n" + + "\034max_bytes_rewritten_per_call\030\017 \001(\003B\003\340A\001\022-\n" + + " copy_source_encryption_algorithm\030\020 \001(\tB\003\340A\001\022-\n" + + " copy_source_encryption_key_bytes\030\025 \001(\014B\003\340A\001\0224\n" + + "\'copy_source_encryption_key_sha256_bytes\030\026 \001(\014B\003\340A\001\022W\n" + + "\034common_object_request_params\030\023 \001(\0132,.google.s" + + "torage.v2.CommonObjectRequestParamsB\003\340A\001\022A\n" + + "\020object_checksums\030\035" + + " \001(\0132\".google.storage.v2.ObjectChecksumsB\003\340A\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_matchB\035\n" + + "\033_if_source_generation_matchB!\n" + + "\037_if_source_generation_not_matchB!\n" + + "\037_if_source_metageneration_matchB%\n" + + "#_if_source_metageneration_not_match\"\227\001\n" + + "\017RewriteResponse\022\035\n" + + "\025total_bytes_rewritten\030\001 \001(\003\022\023\n" + + "\013object_size\030\002 \001(\003\022\014\n" + + "\004done\030\003 \001(\010\022\025\n\r" + + "rewrite_token\030\004 \001(\t\022+\n" + + "\010resource\030\005 \001(\0132\031.google.storage.v2.Object\"\367\005\n" + + "\021MoveObjectRequest\0225\n" + + "\006bucket\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\032\n\r" + + "source_object\030\002 \001(\tB\003\340A\002\022\037\n" + + "\022destination_object\030\003 \001(\tB\003\340A\002\022,\n" + + "\032if_source_generation_match\030\004 \001(\003B\003\340A\001H\000\210\001\001\0220\n" + + "\036if_source_generation_not_match\030\005" + + " \001(\003B\003\340A\001H\001\210\001\001\0220\n" + + "\036if_source_metageneration_match\030\006" + + " \001(\003B\003\340A\001H\002\210\001\001\0224\n" + + "\"if_source_metageneration_not_match\030\007" + + " \001(\003B\003\340A\001H\003\210\001\001\022%\n" + + "\023if_generation_match\030\010 \001(\003B\003\340A\001H\004\210\001\001\022)\n" + + "\027if_generation_not_match\030\t \001(\003B\003\340A\001H\005\210\001\001\022)\n" + + "\027if_metageneration_match\030\n" + + " \001(\003B\003\340A\001H\006\210\001\001\022-\n" + + "\033if_metageneration_not_match\030\013" + + " \001(\003B\003\340A\001H\007\210\001\001B\035\n" + + "\033_if_source_generation_matchB!\n" + + "\037_if_source_generation_not_matchB!\n" + + "\037_if_source_metageneration_matchB%\n" + + "#_if_source_metageneration_not_matchB\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\374\001\n" + + "\032StartResumableWriteRequest\022B\n" + + "\021write_object_spec\030\001" + + " \001(\0132\".google.storage.v2.WriteObjectSpecB\003\340A\002\022W\n" + + "\034common_object_request_params\030\003" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\022A\n" + + "\020object_checksums\030\005" + + " \001(\0132\".google.storage.v2.ObjectChecksumsB\003\340A\001\"0\n" + + "\033StartResumableWriteResponse\022\021\n" + + "\tupload_id\030\001 \001(\t\"\243\004\n" + + "\023UpdateObjectRequest\022.\n" + + "\006object\030\001 \001(\0132\031.google.storage.v2.ObjectB\003\340A\002\022 \n" + + "\023if_generation_match\030\002 \001(\003H\000\210\001\001\022$\n" + + "\027if_generation_not_match\030\003 \001(\003H\001\210\001\001\022$\n" + + "\027if_metageneration_match\030\004 \001(\003H\002\210\001\001\022(\n" + + "\033if_metageneration_not_match\030\005 \001(\003H\003\210\001\001\022\033\n" + + "\016predefined_acl\030\n" + + " \001(\tB\003\340A\001\0224\n" + + "\013update_mask\030\007 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022W\n" + + "\034common_object_request_params\030\010" + + " \001(\0132,.google.storage.v2.CommonObjectRequestParamsB\003\340A\001\022(\n" + + "\033override_unlocked_retention\030\013 \001(\010B\003\340A\001B\026\n" + + "\024_if_generation_matchB\032\n" + + "\030_if_generation_not_matchB\032\n" + + "\030_if_metageneration_matchB\036\n" + + "\034_if_metageneration_not_match\"\213\001\n" + + "\031CommonObjectRequestParams\022!\n" + + "\024encryption_algorithm\030\001 \001(\tB\003\340A\001\022!\n" + + "\024encryption_key_bytes\030\004 \001(\014B\003\340A\001\022(\n" + + "\033encryption_key_sha256_bytes\030\005 \001(\014B\003\340A\001\"\312\005\n" + + "\020ServiceConstants\"\265\005\n" + + "\006Values\022\026\n" + + "\022VALUES_UNSPECIFIED\020\000\022\033\n" + + "\024MAX_READ_CHUNK_BYTES\020\200\200\200\001\022\034\n" + + "\025MAX_WRITE_CHUNK_BYTES\020\200\200\200\001\022\031\n" + + "\022MAX_OBJECT_SIZE_MB\020\200\200\300\002\022)\n" + + "$MAX_CUSTOM_METADATA_FIELD_NAME_BYTES\020\200\010\022*\n" + + "%MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES\020\200 \022)\n" + + "$MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES\020\200@\022*\n" + + "$MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES\020\200\240\001\022\'\n" + + "#MAX_NOTIFICATION_CONFIGS_PER_BUCKET\020d\022\"\n" + + "\036MAX_LIFECYCLE_RULES_PER_BUCKET\020d\022&\n" + + "\"MAX_NOTIFICATION_CUSTOM_ATTRIBUTES\020\005\0221\n" + + ",MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH\020\200\002\0223\n" + + ".MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH\020\200\010\022\034\n" + + "\030MAX_LABELS_ENTRIES_COUNT\020@\022\037\n" + + "\033MAX_LABELS_KEY_VALUE_LENGTH\020?\022\037\n" + + "\032MAX_LABELS_KEY_VALUE_BYTES\020\200\001\022.\n" + + ")MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST\020\350\007\022\036\n" + + "\032SPLIT_TOKEN_MAX_VALID_DAYS\020\016\032\002\020\001\"\272,\n" + + "\006Bucket\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\010\022\026\n" + + "\tbucket_id\030\002 \001(\tB\003\340A\003\022\014\n" + + "\004etag\030\035 \001(\t\022D\n" + + "\007project\030\003 \001(\tB3\340A\005\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\033\n" + + "\016metageneration\030\004 \001(\003B\003\340A\003\022\025\n" + + "\010location\030\005 \001(\tB\003\340A\005\022\032\n\r" + + "location_type\030\006 \001(\tB\003\340A\003\022\032\n\r" + + "storage_class\030\007 \001(\tB\003\340A\001\022\020\n" + + "\003rpo\030\033 \001(\tB\003\340A\001\0228\n" + + "\003acl\030\010 \003(\0132&.google.storage.v2.BucketAccessControlB\003\340A\001\022G\n" + + "\022default_object_acl\030\t" + + " \003(\0132&.google.storage.v2.ObjectAccessControlB\003\340A\001\022;\n" + + "\tlifecycle\030\n" + + " \001(\0132#.google.storage.v2.Bucket.LifecycleB\003\340A\001\0224\n" + + "\013create_time\030\013 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0221\n" + + "\004cors\030\014 \003(\0132\036.google.storage.v2.Bucket.CorsB\003\340A\001\0224\n" + + "\013update_time\030\r" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022%\n" + + "\030default_event_based_hold\030\016 \001(\010B\003\340A\001\022:\n" + + "\006labels\030\017" + + " \003(\0132%.google.storage.v2.Bucket.LabelsEntryB\003\340A\001\0227\n" + + "\007website\030\020 \001(\013" + + "2!.google.storage.v2.Bucket.WebsiteB\003\340A\001\022=\n\n" + + "versioning\030\021" + + " \001(\0132$.google.storage.v2.Bucket.VersioningB\003\340A\001\0227\n" + + "\007logging\030\022 \001(\0132!.google.storage.v2.Bucket.LoggingB\003\340A\001\022,\n" + + "\005owner\030\023 \001(\0132\030.google.storage.v2.OwnerB\003\340A\003\022=\n\n" + + "encryption\030\024" + + " \001(\0132$.google.storage.v2.Bucket.EncryptionB\003\340A\001\0227\n" + + "\007billing\030\025 \001(\0132!.google.storage.v2.Bucket.BillingB\003\340A\001\022H\n" + + "\020retention_policy\030\026 \001(\0132).googl" + + "e.storage.v2.Bucket.RetentionPolicyB\003\340A\001\022<\n\n" + + "iam_config\030\027" + + " \001(\0132#.google.storage.v2.Bucket.IamConfigB\003\340A\001\022\032\n\r" + + "satisfies_pzs\030\031 \001(\010B\003\340A\001\022U\n" + + "\027custom_placement_config\030\032 " + + "\001(\0132/.google.storage.v2.Bucket.CustomPlacementConfigB\003\340A\001\022;\n" + + "\tautoclass\030\034 \001(\0132#.g" + + "oogle.storage.v2.Bucket.AutoclassB\003\340A\001\022T\n" + + "\026hierarchical_namespace\030 \001(\0132/.google." + + "storage.v2.Bucket.HierarchicalNamespaceB\003\340A\001\022K\n" + + "\022soft_delete_policy\030\037 \001(\0132*.googl" + + "e.storage.v2.Bucket.SoftDeletePolicyB\003\340A\001\022H\n" + + "\020object_retention\030!" + + " \001(\0132).google.storage.v2.Bucket.ObjectRetentionB\003\340A\001\022?\n" + + "\tip_filter\030&" + + " \001(\0132\".google.storage.v2.Bucket.IpFilterB\003\340A\001H\000\210\001\001\032&\n" + + "\007Billing\022\033\n" + + "\016requester_pays\030\001 \001(\010B\003\340A\001\032l\n" + + "\004Cors\022\023\n" + + "\006origin\030\001 \003(\tB\003\340A\001\022\023\n" + + "\006method\030\002 \003(\tB\003\340A\001\022\034\n" + + "\017response_header\030\003 \003(\tB\003\340A\001\022\034\n" + + "\017max_age_seconds\030\004 \001(\005B\003\340A\001\032\256\t\n\n" + + "Encryption\022B\n" + + "\017default_kms_key\030\001 \001(\tB)\340A\001\372A#\n" + + "!cloudkms.googleapis.com/CryptoKey\022\215\001\n" + + ",google_managed_encrypt", + "ion_enforcement_config\030\002 \001(\0132M.google.storage.v2.Bucket.Encryption.GoogleManaged" + + "EncryptionEnforcementConfigB\003\340A\001H\000\210\001\001\022\221\001\n" + + ".customer_managed_encryption_enforcement_config\030\003" + + " \001(\0132O.google.storage.v2.Bucke" + + "t.Encryption.CustomerManagedEncryptionEnforcementConfigB\003\340A\001H\001\210\001\001\022\223\001\n" + + "/customer_supplied_encryption_enforcement_config\030\004 " + + "\001(\0132P.google.storage.v2.Bucket.Encryptio" + + "n.CustomerSuppliedEncryptionEnforcementConfigB\003\340A\001H\002\210\001\001\032\252\001\n" + + "(GoogleManagedEncryptionEnforcementConfig\022\035\n" + + "\020restriction_mode\030\003 \001(\tH\000\210\001\001\0227\n" + + "\016effective_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampH\001\210\001\001B\023\n" + + "\021_restriction_modeB\021\n" + + "\017_effective_time\032\254\001\n" + + "*CustomerManagedEncryptionEnforcementConfig\022\035\n" + + "\020restriction_mode\030\003 \001(\tH\000\210\001\001\0227\n" + + "\016effective_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampH\001\210\001\001B\023\n" + + "\021_restriction_modeB\021\n" + + "\017_effective_time\032\255\001\n" + + "+CustomerSuppliedEncryptionEnforcementConfig\022\035\n" + + "\020restriction_mode\030\003 \001(\tH\000\210\001\001\0227\n" + + "\016effective_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampH\001\210\001\001B\023\n" + + "\021_restriction_modeB\021\n" + + "\017_effective_timeB/\n" + + "-_google_managed_encryption_enforcement_configB1\n" + + "/_customer_managed_encryption_enforcement_configB2\n" + + "0_customer_supplied_encryption_enforcement_config\032\200\002\n" + + "\tIamConfig\022f\n" + + "\033uniform_bucket_level_access\030\001 \001(\0132<.google.storage." + + "v2.Bucket.IamConfig.UniformBucketLevelAccessB\003\340A\001\022%\n" + + "\030public_access_prevention\030\003 \001(\tB\003\340A\001\032d\n" + + "\030UniformBucketLevelAccess\022\024\n" + + "\007enabled\030\001 \001(\010B\003\340A\001\0222\n" + + "\tlock_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\032\252\006\n" + + "\tLifecycle\022;\n" + + "\004rule\030\001" + + " \003(\0132(.google.storage.v2.Bucket.Lifecycle.RuleB\003\340A\001\032\337\005\n" + + "\004Rule\022D\n" + + "\006action\030\001" + + " \001(\0132/.google.storage.v2.Bucket.Lifecycle.Rule.ActionB\003\340A\001\022J\n" + + "\tcondition\030\002 " + + "\001(\01322.google.storage.v2.Bucket.Lifecycle.Rule.ConditionB\003\340A\001\0327\n" + + "\006Action\022\021\n" + + "\004type\030\001 \001(\tB\003\340A\001\022\032\n\r" + + "storage_class\030\002 \001(\tB\003\340A\001\032\213\004\n" + + "\tCondition\022\025\n" + + "\010age_days\030\001 \001(\005H\000\210\001\001\022.\n" + + "\016created_before\030\002 \001(\0132\021.google.type.DateB\003\340A\001\022\024\n" + + "\007is_live\030\003 \001(\010H\001\210\001\001\022\037\n" + + "\022num_newer_versions\030\004 \001(\005H\002\210\001\001\022\"\n" + + "\025matches_storage_class\030\005 \003(\tB\003\340A\001\022#\n" + + "\026days_since_custom_time\030\007 \001(\005H\003\210\001\001\0222\n" + + "\022custom_time_before\030\010 \001(\0132\021.google.type.DateB\003\340A\001\022\'\n" + + "\032days_since_noncurrent_time\030\t \001(\005H\004\210\001\001\0226\n" + + "\026noncurrent_time_before\030\n" + + " \001(\0132\021.google.type.DateB\003\340A\001\022\033\n" + + "\016matches_prefix\030\013 \003(\tB\003\340A\001\022\033\n" + + "\016matches_suffix\030\014 \003(\tB\003\340A\001B\013\n" + + "\t_age_daysB\n\n" + + "\010_is_liveB\025\n" + + "\023_num_newer_versionsB\031\n" + + "\027_days_since_custom_timeB\035\n" + + "\033_days_since_noncurrent_time\032B\n" + + "\007Logging\022\027\n\n" + + "log_bucket\030\001 \001(\tB\003\340A\001\022\036\n" + + "\021log_object_prefix\030\002 \001(\tB\003\340A\001\032*\n" + + "\017ObjectRetention\022\027\n" + + "\007enabled\030\001 \001(\010B\006\340A\001\340A\003\032\236\001\n" + + "\017RetentionPolicy\0227\n" + + "\016effective_time\030\001" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\022\026\n" + + "\tis_locked\030\002 \001(\010B\003\340A\001\022:\n" + + "\022retention_duration\030\004" + + " \001(\0132\031.google.protobuf.DurationB\003\340A\001\032\261\001\n" + + "\020SoftDeletePolicy\022:\n" + + "\022retention_duration\030\001" + + " \001(\0132\031.google.protobuf.DurationH\000\210\001\001\0227\n" + + "\016effective_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampH\001\210\001\001B\025\n" + + "\023_retention_durationB\021\n" + + "\017_effective_time\032\"\n\n" + + "Versioning\022\024\n" + + "\007enabled\030\001 \001(\010B\003\340A\001\032E\n" + + "\007Website\022\035\n" + + "\020main_page_suffix\030\001 \001(\tB\003\340A\001\022\033\n" + + "\016not_found_page\030\002 \001(\tB\003\340A\001\0324\n" + + "\025CustomPlacementConfig\022\033\n" + + "\016data_locations\030\001 \003(\tB\003\340A\001\032\220\002\n" + + "\tAutoclass\022\024\n" + + "\007enabled\030\001 \001(\010B\003\340A\001\0224\n" + + "\013toggle_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022#\n" + + "\026terminal_storage_class\030\003 \001(\tH\000\210\001\001\022P\n" + + "\"terminal_storage_class_update_time\030\004" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003H\001\210\001\001B\031\n" + + "\027_terminal_storage_classB%\n" + + "#_terminal_storage_class_update_time\032\375\003\n" + + "\010IpFilter\022\021\n" + + "\004mode\030\001 \001(\tH\000\210\001\001\022Z\n" + + "\025public_network_source\030\002 \001(\01326.go" + + "ogle.storage.v2.Bucket.IpFilter.PublicNetworkSourceH\001\210\001\001\022U\n" + + "\023vpc_network_sources\030\003" + + " \003(\01323.google.storage.v2.Bucket.IpFilter.VpcNetworkSourceB\003\340A\001\022!\n" + + "\024allow_cross_org_vpcs\030\004 \001(\010B\003\340A\001\022+\n" + + "\036allow_all_service_agent_access\030\005 \001(\010H\002\210\001\001\032:\n" + + "\023PublicNetworkSource\022#\n" + + "\026allowed_ip_cidr_ranges\030\001 \003(\tB\003\340A\001\032Y\n" + + "\020VpcNetworkSource\022\024\n" + + "\007network\030\001 \001(\tH\000\210\001\001\022#\n" + + "\026allowed_ip_cidr_ranges\030\002 \003(\tB\003\340A\001B\n\n" + + "\010_networkB\007\n" + + "\005_modeB\030\n" + + "\026_public_network_sourceB!\n" + + "\037_allow_all_service_agent_access\032-\n" + + "\025HierarchicalNamespace\022\024\n" + + "\007enabled\030\001 \001(\010B\003\340A\001\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001:X\352AU\n" + + "\035storage.googlea" + + "pis.com/Bucket\022#projects/{project}/buckets/{bucket}*\007buckets2\006bucketB\014\n\n" + + "_ip_filter\"\366\001\n" + + "\023BucketAccessControl\022\021\n" + + "\004role\030\001 \001(\tB\003\340A\001\022\017\n" + + "\002id\030\002 \001(\tB\003\340A\001\022\023\n" + + "\006entity\030\003 \001(\tB\003\340A\001\022\027\n\n" + + "entity_alt\030\t \001(\tB\003\340A\003\022\026\n" + + "\tentity_id\030\004 \001(\tB\003\340A\001\022\021\n" + + "\004etag\030\010 \001(\tB\003\340A\001\022\022\n" + + "\005email\030\005 \001(\tB\003\340A\001\022\023\n" + + "\006domain\030\006 \001(\tB\003\340A\001\0229\n" + + "\014project_team\030\007" + + " \001(\0132\036.google.storage.v2.ProjectTeamB\003\340A\001\"I\n" + + "\017ChecksummedData\022\026\n" + + "\007content\030\001 \001(\014B\005\010\001\340A\001\022\023\n" + + "\006crc32c\030\002 \001(\007H\000\210\001\001B\t\n" + + "\007_crc32c\"H\n" + + "\017ObjectChecksums\022\023\n" + + "\006crc32c\030\001 \001(\007H\000\210\001\001\022\025\n" + + "\010md5_hash\030\002 \001(\014B\003\340A\001B\t\n" + + "\007_crc32c\"\234\001\n" + + "\032ObjectCustomContextPayload\022\022\n" + + "\005value\030\001 \001(\tB\003\340A\002\0224\n" + + "\013create_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\003" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\"\262\001\n" + + "\016ObjectContexts\022B\n" + + "\006custom\030\001 \003(\0132-.goo" + + "gle.storage.v2.ObjectContexts.CustomEntryB\003\340A\001\032\\\n" + + "\013CustomEntry\022\013\n" + + "\003key\030\001 \001(\t\022<\n" + + "\005value\030\002" + + " \001(\0132-.google.storage.v2.ObjectCustomContextPayload:\0028\001\"V\n" + + "\022CustomerEncryption\022!\n" + + "\024encryption_algorithm\030\001 \001(\tB\003\340A\001\022\035\n" + + "\020key_sha256_bytes\030\003 \001(\014B\003\340A\001\"\221\016\n" + + "\006Object\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\005\0225\n" + + "\006bucket\030\002 \001(\tB%\340A\005\372A\037\n" + + "\035storage.googleapis.com/Bucket\022\021\n" + + "\004etag\030\033 \001(\tB\003\340A\001\022\027\n\n" + + "generation\030\003 \001(\003B\003\340A\005\022\037\n" + + "\r" + + "restore_token\030# \001(\tB\003\340A\003H\000\210\001\001\022\033\n" + + "\016metageneration\030\004 \001(\003B\003\340A\003\022\032\n\r" + + "storage_class\030\005 \001(\tB\003\340A\001\022\021\n" + + "\004size\030\006 \001(\003B\003\340A\003\022\035\n" + + "\020content_encoding\030\007 \001(\tB\003\340A\001\022 \n" + + "\023content_disposition\030\010 \001(\tB\003\340A\001\022\032\n\r" + + "cache_control\030\t \001(\tB\003\340A\001\0228\n" + + "\003acl\030\n" + + " \003(\0132&.google.storage.v2.ObjectAccessControlB\003\340A\001\022\035\n" + + "\020content_language\030\013 \001(\tB\003\340A\001\0224\n" + + "\013delete_time\030\014" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0226\n\r" + + "finalize_time\030$ \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\031\n" + + "\014content_type\030\r" + + " \001(\tB\003\340A\001\0224\n" + + "\013create_time\030\016" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\034\n" + + "\017component_count\030\017 \001(\005B\003\340A\003\022:\n" + + "\tchecksums\030\020" + + " \001(\0132\".google.storage.v2.ObjectChecksumsB\003\340A\003\0224\n" + + "\013update_time\030\021 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022:\n" + + "\007kms_key\030\022 \001(\tB)\340A\001\372A#\n" + + "!cloudkms.googleapis.com/CryptoKey\022B\n" + + "\031update_storage_class_time\030\023" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\033\n" + + "\016temporary_hold\030\024 \001(\010B\003\340A\001\022>\n" + + "\025retention_expire_time\030\025" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\022>\n" + + "\010metadata\030\026" + + " \003(\0132\'.google.storage.v2.Object.MetadataEntryB\003\340A\001\0228\n" + + "\010contexts\030& \001(\0132!.google.storage.v2.ObjectContextsB\003\340A\001\022\035\n" + + "\020event_based_hold\030\027 \001(\010H\001\210\001\001\022,\n" + + "\005owner\030\030 \001(\0132\030.google.storage.v2.OwnerB\003\340A\003\022G\n" + + "\023customer_encryption\030\031" + + " \001(\0132%.google.storage.v2.CustomerEncryptionB\003\340A\001\0224\n" + + "\013custom_time\030\032 \001(\0132\032.google.protobuf.TimestampB\003\340A\001\022>\n" + + "\020soft_delete_time\030\034" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003H\002\210\001\001\022>\n" + + "\020hard_delete_time\030\035" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003H\003\210\001\001\022;\n" + + "\tretention\030\036 \001(\0132#." + + "google.storage.v2.Object.RetentionB\003\340A\001\032\274\001\n" + + "\tRetention\022;\n" + + "\004mode\030\001" + + " \001(\0162(.google.storage.v2.Object.Retention.ModeB\003\340A\001\022:\n" + + "\021retain_until_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\"6\n" + + "\004Mode\022\024\n" + + "\020MODE_UNSPECIFIED\020\000\022\014\n" + + "\010UNLOCKED\020\001\022\n\n" + + "\006LOCKED\020\002\032/\n\r" + + "MetadataEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\020\n" + + "\016_restore_tokenB\023\n" + + "\021_event_based_holdB\023\n" + + "\021_soft_delete_timeB\023\n" + + "\021_hard_delete_time\"\366\001\n" + + "\023ObjectAccessControl\022\021\n" + + "\004role\030\001 \001(\tB\003\340A\001\022\017\n" + + "\002id\030\002 \001(\tB\003\340A\001\022\023\n" + + "\006entity\030\003 \001(\tB\003\340A\001\022\027\n\n" + + "entity_alt\030\t \001(\tB\003\340A\003\022\026\n" + + "\tentity_id\030\004 \001(\tB\003\340A\001\022\021\n" + + "\004etag\030\010 \001(\tB\003\340A\001\022\022\n" + + "\005email\030\005 \001(\tB\003\340A\001\022\023\n" + + "\006domain\030\006 \001(\tB\003\340A\001\0229\n" + + "\014project_team\030\007" + + " \001(\0132\036.google.storage.v2.ProjectTeamB\003\340A\001\"l\n" + + "\023ListObjectsResponse\022*\n" + + "\007objects\030\001 \003(\0132\031.google.storage.v2.Object\022\020\n" + + "\010prefixes\030\002 \003(\t\022\027\n" + + "\017next_page_token\030\003 \001(\t\"=\n" + + "\013ProjectTeam\022\033\n" + + "\016project_number\030\001 \001(\tB\003\340A\001\022\021\n" + + "\004team\030\002 \001(\tB\003\340A\001\"4\n" + + "\005Owner\022\023\n" + + "\006entity\030\001 \001(\tB\003\340A\001\022\026\n" + + "\tentity_id\030\002 \001(\tB\003\340A\001\"C\n" + + "\014ContentRange\022\r\n" + + "\005start\030\001 \001(\003\022\013\n" + + "\003end\030\002 \001(\003\022\027\n" + + "\017complete_length\030\003 \001(\0032\237\037\n" + + "\007Storage\022r\n" + + "\014DeleteBucket\022&.google.storage.v2.Delete" + + "BucketRequest\032\026.google.protobuf.Empty\"\"\332A\004name\212\323\344\223\002\025\022\023\n" + + "\004name\022\013{bucket=**}\022o\n" + + "\tGetBucket\022#.google.storage.v2.GetBucketRequ" + + "est\032\031.google.storage.v2.Bucket\"\"\332A\004name\212\323\344\223\002\025\022\023\n" + + "\004name\022\013{bucket=**}\022\253\001\n" + + "\014CreateBucket\022&.google.storage.v2.CreateBucketRequ" + + "est\032\031.google.storage.v2.Bucket\"X\332A\027parent,bucket,bucket_id\212\323\344\223\0028\022\026\n" + + "\006parent\022\014{project=**}\022\036\n" + + "\016bucket.project\022\014{project=**}\022\205\001\n" + + "\013ListBuckets\022%.google.storage.v2.Lis" + + "tBucketsRequest\032&.google.storage.v2.ListBucketsResponse\"\'\332A\006parent\212\323\344\223\002\030\022\026\n" + + "\006parent\022\014{project=**}\022\223\001\n" + + "\031LockBucketRetentionPolicy\0223.google.storage.v2.LockBucketRet" + + "entionPolicyRequest\032\031.google.storage.v2.Bucket\"&\332A\006bucket\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}\022\243\001\n" + + "\014GetIamPolicy\022\".google.iam.v1." + + "GetIamPolicyRequest\032\025.google.iam.v1.Policy\"X\332A\010resource\212\323\344\223\002G\022\027\n" + + "\010resource\022\013{bucket=**}\022,\n" + + "\010resource\022 {bucket=projects/*/buckets/*}/**\022\252\001\n" + + "\014SetIamPolicy\022\".google.i" + + "am.v1.SetIamPolicyRequest\032\025.google.iam.v1.Policy\"_\332A\017resource,policy\212\323\344\223\002G\022\027\n" + + "\010resource\022\013{bucket=**}\022,\n" + + "\010resource\022 {bucket=projects/*/buckets/*}/**\022\226\002\n" + + "\022TestIamPermissions\022(.google.iam.v1.TestIamPermissi" + + "onsRequest\032).google.iam.v1.TestIamPermis" + + "sionsResponse\"\252\001\332A\024resource,permissions\212\323\344\223\002\214\001\022\027\n" + + "\010resource\022\013{bucket=**}\0224\n" + + "\010resource\022({bucket=projects/*/buckets/*}/objects/**\022;\n" + + "\010resource\022/{bucket=projects/*/buckets/*}/managedFolders/**\022\212\001\n" + + "\014UpdateBucket\022&.google.storage.v2.UpdateBucketRequ" + + "est\032\031.google.storage.v2.Bucket\"7\332A\022bucket,update_mask\212\323\344\223\002\034\022\032\n" + + "\013bucket.name\022\013{bucket=**}\022~\n\r" + + "ComposeObject\022\'.google.storag" + + "e.v2.ComposeObjectRequest\032\031.google.storage.v2.Object\")\212\323\344\223\002#\022!\n" + + "\022destination.bucket\022\013{bucket=**}\022\230\001\n" + + "\014DeleteObject\022&.googl" + + "e.storage.v2.DeleteObjectRequest\032\026.google.protobuf.Empty\"H\332A\r" + + "bucket,object\332A\030bucket,object,generation\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}\022\215\001\n\r" + + "RestoreObject\022\'.google.storage.v2.RestoreObjectRequest\032\031.google.s" + + "torage.v2.Object\"8\332A\030bucket,object,generation\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}\022\272\001\n" + + "\024CancelResumableWrite\022..google.storage.v2." + + "CancelResumableWriteRequest\032/.google.sto" + + "rage.v2.CancelResumableWriteResponse\"A\332A\tupload_id\212\323\344\223\002/\022-\n" + + "\tupload_id\022 {bucket=projects/*/buckets/*}/**\022\225\001\n" + + "\tGetObject\022#." + + "google.storage.v2.GetObjectRequest\032\031.google.storage.v2.Object\"H\332A\r" + + "bucket,object\332A\030bucket,object,generation\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}\022\245\001\n\n" + + "ReadObject\022$.google." + + "storage.v2.ReadObjectRequest\032%.google.storage.v2.ReadObjectResponse\"H\332A\r" + + "bucket,object\332A\030bucket,object,generation\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}0\001\022\231\001\n" + + "\016BidiReadObject\022(.google.storage.v2.BidiReadObjectReq" + + "uest\032).google.storage.v2.BidiReadObjectResponse\".\212\323\344\223\002(\022&\n" + + "\027read_object_spec.bucket\022\013{bucket=**}(\0010\001\022\214\001\n" + + "\014UpdateObject\022&.google.storage.v2.UpdateObjectRequest\032\031.g" + + "oogle.storage.v2.Object\"9\332A\022object,update_mask\212\323\344\223\002\036\022\034\n\r" + + "object.bucket\022\013{bucket=**}\022`\n" + + "\013WriteObject\022%.google.storage.v2.Wr" + + "iteObjectRequest\032&.google.storage.v2.WriteObjectResponse\"\000(\001\022n\n" + + "\017BidiWriteObject\022).google.storage.v2.BidiWriteObjectReque" + + "st\032*.google.storage.v2.BidiWriteObjectResponse\"\000(\0010\001\022\204\001\n" + + "\013ListObjects\022%.google.storage.v2.ListObjectsRequest\032&.google.sto" + + "rage.v2.ListObjectsResponse\"&\332A\006parent\212\323\344\223\002\027\022\025\n" + + "\006parent\022\013{bucket=**}\022\230\001\n\r" + + "RewriteObject\022\'.google.storage.v2.RewriteObjectR" + + "equest\032\".google.storage.v2.RewriteResponse\":\212\323\344\223\0024\022\017\n\r" + + "source_bucket\022!\n" + + "\022destination_bucket\022\013{bucket=**}\022\256\001\n" + + "\023StartResumableWrite\022-.google.storage.v2.StartResumabl" + + "eWriteRequest\032..google.storage.v2.StartResumableWriteResponse\"8\212\323\344\223\0022\0220\n" + + "!write_object_spec.resource.bucket\022\013{bucket=**}\022\256\001\n" + + "\020QueryWriteStatus\022*.google.storage.v2" + + ".QueryWriteStatusRequest\032+.google.storage.v2.QueryWriteStatusResponse\"A\332A" + + "\tupload_id\212\323\344\223\002/\022-\n" + + "\tupload_id\022 {bucket=projects/*/buckets/*}/**\022\226\001\n\n" + + "MoveObject\022$.google.storage.v2.MoveObjectRequest\032\031.google.s" + + "torage.v2.Object\"G\332A\'bucket,source_object,destination_object\212\323\344\223\002\027\022\025\n" + + "\006bucket\022\013{bucket=**}\032\247\002\312A\026storage.googleapis.com\322A\212" + + "\002https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/" + + "cloud-platform.read-only,https://www.googleapis.com/auth/devstorage.full_control" + + ",https://www.googleapis.com/auth/devstor" + + "age.read_only,https://www.googleapis.com/auth/devstorage.read_writeB\342\001\n" + + "\025com.google.storage.v2B\014StorageProtoP\001Z>cloud.goo" + + "gle.com/go/storage/internal/apiv2/storagepb;storagepb\352Ax\n" + + "!cloudkms.googleapis.com/CryptoKey\022Sprojects/{project}/location" + + "s/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.api.RoutingProto.getDescriptor(), + com.google.iam.v1.IamPolicyProto.getDescriptor(), + com.google.iam.v1.PolicyProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + com.google.type.DateProto.getDescriptor(), + }); + internal_static_google_storage_v2_DeleteBucketRequest_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_storage_v2_DeleteBucketRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_DeleteBucketRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", + }); + internal_static_google_storage_v2_GetBucketRequest_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_storage_v2_GetBucketRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_GetBucketRequest_descriptor, + new java.lang.String[] { + "Name", "IfMetagenerationMatch", "IfMetagenerationNotMatch", "ReadMask", + }); + internal_static_google_storage_v2_CreateBucketRequest_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_storage_v2_CreateBucketRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_CreateBucketRequest_descriptor, + new java.lang.String[] { + "Parent", + "Bucket", + "BucketId", + "PredefinedAcl", + "PredefinedDefaultObjectAcl", + "EnableObjectRetention", + }); + internal_static_google_storage_v2_ListBucketsRequest_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_storage_v2_ListBucketsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ListBucketsRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "Prefix", "ReadMask", "ReturnPartialSuccess", + }); + internal_static_google_storage_v2_ListBucketsResponse_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_storage_v2_ListBucketsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ListBucketsResponse_descriptor, + new java.lang.String[] { + "Buckets", "NextPageToken", "Unreachable", + }); + internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_LockBucketRetentionPolicyRequest_descriptor, + new java.lang.String[] { + "Bucket", "IfMetagenerationMatch", + }); + internal_static_google_storage_v2_UpdateBucketRequest_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_storage_v2_UpdateBucketRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_UpdateBucketRequest_descriptor, + new java.lang.String[] { + "Bucket", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "PredefinedAcl", + "PredefinedDefaultObjectAcl", + "UpdateMask", + }); + internal_static_google_storage_v2_ComposeObjectRequest_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_storage_v2_ComposeObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ComposeObjectRequest_descriptor, + new java.lang.String[] { + "Destination", + "SourceObjects", + "DestinationPredefinedAcl", + "IfGenerationMatch", + "IfMetagenerationMatch", + "KmsKey", + "CommonObjectRequestParams", + "ObjectChecksums", + "DeleteSourceObjects", + }); + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor = + internal_static_google_storage_v2_ComposeObjectRequest_descriptor.getNestedType(0); + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor, + new java.lang.String[] { + "Name", "Generation", "ObjectPreconditions", + }); + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor = + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_descriptor + .getNestedType(0); + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ComposeObjectRequest_SourceObject_ObjectPreconditions_descriptor, + new java.lang.String[] { + "IfGenerationMatch", + }); + internal_static_google_storage_v2_DeleteObjectRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_storage_v2_DeleteObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_DeleteObjectRequest_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "CommonObjectRequestParams", + }); + internal_static_google_storage_v2_RestoreObjectRequest_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_storage_v2_RestoreObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_RestoreObjectRequest_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "RestoreToken", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "CopySourceAcl", + "CommonObjectRequestParams", + }); + internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_storage_v2_CancelResumableWriteRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_CancelResumableWriteRequest_descriptor, + new java.lang.String[] { + "UploadId", + }); + internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_storage_v2_CancelResumableWriteResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_CancelResumableWriteResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_storage_v2_ReadObjectRequest_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_storage_v2_ReadObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ReadObjectRequest_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "ReadOffset", + "ReadLimit", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "CommonObjectRequestParams", + "ReadMask", + }); + internal_static_google_storage_v2_GetObjectRequest_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_storage_v2_GetObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_GetObjectRequest_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "SoftDeleted", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "CommonObjectRequestParams", + "ReadMask", + "RestoreToken", + }); + internal_static_google_storage_v2_ReadObjectResponse_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_storage_v2_ReadObjectResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ReadObjectResponse_descriptor, + new java.lang.String[] { + "ChecksummedData", "ObjectChecksums", "ContentRange", "Metadata", + }); + internal_static_google_storage_v2_BidiReadObjectSpec_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_storage_v2_BidiReadObjectSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadObjectSpec_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "CommonObjectRequestParams", + "ReadMask", + "ReadHandle", + "RoutingToken", + }); + internal_static_google_storage_v2_BidiReadObjectRequest_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_storage_v2_BidiReadObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadObjectRequest_descriptor, + new java.lang.String[] { + "ReadObjectSpec", "ReadRanges", + }); + internal_static_google_storage_v2_BidiReadObjectResponse_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_storage_v2_BidiReadObjectResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadObjectResponse_descriptor, + new java.lang.String[] { + "ObjectDataRanges", "Metadata", "ReadHandle", + }); + internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_storage_v2_BidiReadObjectRedirectedError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadObjectRedirectedError_descriptor, + new java.lang.String[] { + "ReadHandle", "RoutingToken", + }); + internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_storage_v2_BidiWriteObjectRedirectedError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiWriteObjectRedirectedError_descriptor, + new java.lang.String[] { + "RoutingToken", "WriteHandle", "Generation", + }); + internal_static_google_storage_v2_BidiReadObjectError_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_storage_v2_BidiReadObjectError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadObjectError_descriptor, + new java.lang.String[] { + "ReadRangeErrors", + }); + internal_static_google_storage_v2_ReadRangeError_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_storage_v2_ReadRangeError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ReadRangeError_descriptor, + new java.lang.String[] { + "ReadId", "Status", + }); + internal_static_google_storage_v2_ReadRange_descriptor = getDescriptor().getMessageType(22); + internal_static_google_storage_v2_ReadRange_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ReadRange_descriptor, + new java.lang.String[] { + "ReadOffset", "ReadLength", "ReadId", + }); + internal_static_google_storage_v2_ObjectRangeData_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_storage_v2_ObjectRangeData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectRangeData_descriptor, + new java.lang.String[] { + "ChecksummedData", "ReadRange", "RangeEnd", + }); + internal_static_google_storage_v2_BidiReadHandle_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_storage_v2_BidiReadHandle_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiReadHandle_descriptor, + new java.lang.String[] { + "Handle", + }); + internal_static_google_storage_v2_BidiWriteHandle_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_storage_v2_BidiWriteHandle_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiWriteHandle_descriptor, + new java.lang.String[] { + "Handle", + }); + internal_static_google_storage_v2_WriteObjectSpec_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_storage_v2_WriteObjectSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_WriteObjectSpec_descriptor, + new java.lang.String[] { + "Resource", + "PredefinedAcl", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "ObjectSize", + "Appendable", + }); + internal_static_google_storage_v2_WriteObjectRequest_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_storage_v2_WriteObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_WriteObjectRequest_descriptor, + new java.lang.String[] { + "UploadId", + "WriteObjectSpec", + "WriteOffset", + "ChecksummedData", + "ObjectChecksums", + "FinishWrite", + "CommonObjectRequestParams", + "FirstMessage", + "Data", + }); + internal_static_google_storage_v2_WriteObjectResponse_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_storage_v2_WriteObjectResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_WriteObjectResponse_descriptor, + new java.lang.String[] { + "PersistedSize", "Resource", "WriteStatus", + }); + internal_static_google_storage_v2_AppendObjectSpec_descriptor = + getDescriptor().getMessageType(29); + internal_static_google_storage_v2_AppendObjectSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_AppendObjectSpec_descriptor, + new java.lang.String[] { + "Bucket", + "Object", + "Generation", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "RoutingToken", + "WriteHandle", + }); + internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor = + getDescriptor().getMessageType(30); + internal_static_google_storage_v2_BidiWriteObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiWriteObjectRequest_descriptor, + new java.lang.String[] { + "UploadId", + "WriteObjectSpec", + "AppendObjectSpec", + "WriteOffset", + "ChecksummedData", + "ObjectChecksums", + "StateLookup", + "Flush", + "FinishWrite", + "CommonObjectRequestParams", + "FirstMessage", + "Data", + }); + internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor = + getDescriptor().getMessageType(31); + internal_static_google_storage_v2_BidiWriteObjectResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BidiWriteObjectResponse_descriptor, + new java.lang.String[] { + "PersistedSize", "Resource", "WriteHandle", "WriteStatus", + }); + internal_static_google_storage_v2_ListObjectsRequest_descriptor = + getDescriptor().getMessageType(32); + internal_static_google_storage_v2_ListObjectsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ListObjectsRequest_descriptor, + new java.lang.String[] { + "Parent", + "PageSize", + "PageToken", + "Delimiter", + "IncludeTrailingDelimiter", + "Prefix", + "Versions", + "ReadMask", + "LexicographicStart", + "LexicographicEnd", + "SoftDeleted", + "IncludeFoldersAsPrefixes", + "MatchGlob", + "Filter", + }); + internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor = + getDescriptor().getMessageType(33); + internal_static_google_storage_v2_QueryWriteStatusRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_QueryWriteStatusRequest_descriptor, + new java.lang.String[] { + "UploadId", "CommonObjectRequestParams", + }); + internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor = + getDescriptor().getMessageType(34); + internal_static_google_storage_v2_QueryWriteStatusResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_QueryWriteStatusResponse_descriptor, + new java.lang.String[] { + "PersistedSize", "Resource", "WriteStatus", + }); + internal_static_google_storage_v2_RewriteObjectRequest_descriptor = + getDescriptor().getMessageType(35); + internal_static_google_storage_v2_RewriteObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_RewriteObjectRequest_descriptor, + new java.lang.String[] { + "DestinationName", + "DestinationBucket", + "DestinationKmsKey", + "Destination", + "SourceBucket", + "SourceObject", + "SourceGeneration", + "RewriteToken", + "DestinationPredefinedAcl", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "IfSourceGenerationMatch", + "IfSourceGenerationNotMatch", + "IfSourceMetagenerationMatch", + "IfSourceMetagenerationNotMatch", + "MaxBytesRewrittenPerCall", + "CopySourceEncryptionAlgorithm", + "CopySourceEncryptionKeyBytes", + "CopySourceEncryptionKeySha256Bytes", + "CommonObjectRequestParams", + "ObjectChecksums", + }); + internal_static_google_storage_v2_RewriteResponse_descriptor = + getDescriptor().getMessageType(36); + internal_static_google_storage_v2_RewriteResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_RewriteResponse_descriptor, + new java.lang.String[] { + "TotalBytesRewritten", "ObjectSize", "Done", "RewriteToken", "Resource", + }); + internal_static_google_storage_v2_MoveObjectRequest_descriptor = + getDescriptor().getMessageType(37); + internal_static_google_storage_v2_MoveObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_MoveObjectRequest_descriptor, + new java.lang.String[] { + "Bucket", + "SourceObject", + "DestinationObject", + "IfSourceGenerationMatch", + "IfSourceGenerationNotMatch", + "IfSourceMetagenerationMatch", + "IfSourceMetagenerationNotMatch", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + }); + internal_static_google_storage_v2_StartResumableWriteRequest_descriptor = + getDescriptor().getMessageType(38); + internal_static_google_storage_v2_StartResumableWriteRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_StartResumableWriteRequest_descriptor, + new java.lang.String[] { + "WriteObjectSpec", "CommonObjectRequestParams", "ObjectChecksums", + }); + internal_static_google_storage_v2_StartResumableWriteResponse_descriptor = + getDescriptor().getMessageType(39); + internal_static_google_storage_v2_StartResumableWriteResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_StartResumableWriteResponse_descriptor, + new java.lang.String[] { + "UploadId", + }); + internal_static_google_storage_v2_UpdateObjectRequest_descriptor = + getDescriptor().getMessageType(40); + internal_static_google_storage_v2_UpdateObjectRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_UpdateObjectRequest_descriptor, + new java.lang.String[] { + "Object", + "IfGenerationMatch", + "IfGenerationNotMatch", + "IfMetagenerationMatch", + "IfMetagenerationNotMatch", + "PredefinedAcl", + "UpdateMask", + "CommonObjectRequestParams", + "OverrideUnlockedRetention", + }); + internal_static_google_storage_v2_CommonObjectRequestParams_descriptor = + getDescriptor().getMessageType(41); + internal_static_google_storage_v2_CommonObjectRequestParams_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_CommonObjectRequestParams_descriptor, + new java.lang.String[] { + "EncryptionAlgorithm", "EncryptionKeyBytes", "EncryptionKeySha256Bytes", + }); + internal_static_google_storage_v2_ServiceConstants_descriptor = + getDescriptor().getMessageType(42); + internal_static_google_storage_v2_ServiceConstants_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ServiceConstants_descriptor, + new java.lang.String[] {}); + internal_static_google_storage_v2_Bucket_descriptor = getDescriptor().getMessageType(43); + internal_static_google_storage_v2_Bucket_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_descriptor, + new java.lang.String[] { + "Name", + "BucketId", + "Etag", + "Project", + "Metageneration", + "Location", + "LocationType", + "StorageClass", + "Rpo", + "Acl", + "DefaultObjectAcl", + "Lifecycle", + "CreateTime", + "Cors", + "UpdateTime", + "DefaultEventBasedHold", + "Labels", + "Website", + "Versioning", + "Logging", + "Owner", + "Encryption", + "Billing", + "RetentionPolicy", + "IamConfig", + "SatisfiesPzs", + "CustomPlacementConfig", + "Autoclass", + "HierarchicalNamespace", + "SoftDeletePolicy", + "ObjectRetention", + "IpFilter", + }); + internal_static_google_storage_v2_Bucket_Billing_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_Billing_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Billing_descriptor, + new java.lang.String[] { + "RequesterPays", + }); + internal_static_google_storage_v2_Bucket_Cors_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(1); + internal_static_google_storage_v2_Bucket_Cors_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Cors_descriptor, + new java.lang.String[] { + "Origin", "Method", "ResponseHeader", "MaxAgeSeconds", + }); + internal_static_google_storage_v2_Bucket_Encryption_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(2); + internal_static_google_storage_v2_Bucket_Encryption_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Encryption_descriptor, + new java.lang.String[] { + "DefaultKmsKey", + "GoogleManagedEncryptionEnforcementConfig", + "CustomerManagedEncryptionEnforcementConfig", + "CustomerSuppliedEncryptionEnforcementConfig", + }); + internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor = + internal_static_google_storage_v2_Bucket_Encryption_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Encryption_GoogleManagedEncryptionEnforcementConfig_descriptor, + new java.lang.String[] { + "RestrictionMode", "EffectiveTime", + }); + internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor = + internal_static_google_storage_v2_Bucket_Encryption_descriptor.getNestedType(1); + internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Encryption_CustomerManagedEncryptionEnforcementConfig_descriptor, + new java.lang.String[] { + "RestrictionMode", "EffectiveTime", + }); + internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor = + internal_static_google_storage_v2_Bucket_Encryption_descriptor.getNestedType(2); + internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Encryption_CustomerSuppliedEncryptionEnforcementConfig_descriptor, + new java.lang.String[] { + "RestrictionMode", "EffectiveTime", + }); + internal_static_google_storage_v2_Bucket_IamConfig_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(3); + internal_static_google_storage_v2_Bucket_IamConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_IamConfig_descriptor, + new java.lang.String[] { + "UniformBucketLevelAccess", "PublicAccessPrevention", + }); + internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor = + internal_static_google_storage_v2_Bucket_IamConfig_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_IamConfig_UniformBucketLevelAccess_descriptor, + new java.lang.String[] { + "Enabled", "LockTime", + }); + internal_static_google_storage_v2_Bucket_Lifecycle_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(4); + internal_static_google_storage_v2_Bucket_Lifecycle_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Lifecycle_descriptor, + new java.lang.String[] { + "Rule", + }); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor = + internal_static_google_storage_v2_Bucket_Lifecycle_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor, + new java.lang.String[] { + "Action", "Condition", + }); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor = + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Action_descriptor, + new java.lang.String[] { + "Type", "StorageClass", + }); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor = + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_descriptor.getNestedType(1); + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Lifecycle_Rule_Condition_descriptor, + new java.lang.String[] { + "AgeDays", + "CreatedBefore", + "IsLive", + "NumNewerVersions", + "MatchesStorageClass", + "DaysSinceCustomTime", + "CustomTimeBefore", + "DaysSinceNoncurrentTime", + "NoncurrentTimeBefore", + "MatchesPrefix", + "MatchesSuffix", + }); + internal_static_google_storage_v2_Bucket_Logging_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(5); + internal_static_google_storage_v2_Bucket_Logging_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Logging_descriptor, + new java.lang.String[] { + "LogBucket", "LogObjectPrefix", + }); + internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(6); + internal_static_google_storage_v2_Bucket_ObjectRetention_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_ObjectRetention_descriptor, + new java.lang.String[] { + "Enabled", + }); + internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(7); + internal_static_google_storage_v2_Bucket_RetentionPolicy_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_RetentionPolicy_descriptor, + new java.lang.String[] { + "EffectiveTime", "IsLocked", "RetentionDuration", + }); + internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(8); + internal_static_google_storage_v2_Bucket_SoftDeletePolicy_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_SoftDeletePolicy_descriptor, + new java.lang.String[] { + "RetentionDuration", "EffectiveTime", + }); + internal_static_google_storage_v2_Bucket_Versioning_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(9); + internal_static_google_storage_v2_Bucket_Versioning_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Versioning_descriptor, + new java.lang.String[] { + "Enabled", + }); + internal_static_google_storage_v2_Bucket_Website_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(10); + internal_static_google_storage_v2_Bucket_Website_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Website_descriptor, + new java.lang.String[] { + "MainPageSuffix", "NotFoundPage", + }); + internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(11); + internal_static_google_storage_v2_Bucket_CustomPlacementConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_CustomPlacementConfig_descriptor, + new java.lang.String[] { + "DataLocations", + }); + internal_static_google_storage_v2_Bucket_Autoclass_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(12); + internal_static_google_storage_v2_Bucket_Autoclass_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_Autoclass_descriptor, + new java.lang.String[] { + "Enabled", "ToggleTime", "TerminalStorageClass", "TerminalStorageClassUpdateTime", + }); + internal_static_google_storage_v2_Bucket_IpFilter_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(13); + internal_static_google_storage_v2_Bucket_IpFilter_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_IpFilter_descriptor, + new java.lang.String[] { + "Mode", + "PublicNetworkSource", + "VpcNetworkSources", + "AllowCrossOrgVpcs", + "AllowAllServiceAgentAccess", + }); + internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor = + internal_static_google_storage_v2_Bucket_IpFilter_descriptor.getNestedType(0); + internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_IpFilter_PublicNetworkSource_descriptor, + new java.lang.String[] { + "AllowedIpCidrRanges", + }); + internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor = + internal_static_google_storage_v2_Bucket_IpFilter_descriptor.getNestedType(1); + internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_IpFilter_VpcNetworkSource_descriptor, + new java.lang.String[] { + "Network", "AllowedIpCidrRanges", + }); + internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(14); + internal_static_google_storage_v2_Bucket_HierarchicalNamespace_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_HierarchicalNamespace_descriptor, + new java.lang.String[] { + "Enabled", + }); + internal_static_google_storage_v2_Bucket_LabelsEntry_descriptor = + internal_static_google_storage_v2_Bucket_descriptor.getNestedType(15); + internal_static_google_storage_v2_Bucket_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Bucket_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_storage_v2_BucketAccessControl_descriptor = + getDescriptor().getMessageType(44); + internal_static_google_storage_v2_BucketAccessControl_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_BucketAccessControl_descriptor, + new java.lang.String[] { + "Role", + "Id", + "Entity", + "EntityAlt", + "EntityId", + "Etag", + "Email", + "Domain", + "ProjectTeam", + }); + internal_static_google_storage_v2_ChecksummedData_descriptor = + getDescriptor().getMessageType(45); + internal_static_google_storage_v2_ChecksummedData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ChecksummedData_descriptor, + new java.lang.String[] { + "Content", "Crc32C", + }); + internal_static_google_storage_v2_ObjectChecksums_descriptor = + getDescriptor().getMessageType(46); + internal_static_google_storage_v2_ObjectChecksums_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectChecksums_descriptor, + new java.lang.String[] { + "Crc32C", "Md5Hash", + }); + internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor = + getDescriptor().getMessageType(47); + internal_static_google_storage_v2_ObjectCustomContextPayload_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectCustomContextPayload_descriptor, + new java.lang.String[] { + "Value", "CreateTime", "UpdateTime", + }); + internal_static_google_storage_v2_ObjectContexts_descriptor = + getDescriptor().getMessageType(48); + internal_static_google_storage_v2_ObjectContexts_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectContexts_descriptor, + new java.lang.String[] { + "Custom", + }); + internal_static_google_storage_v2_ObjectContexts_CustomEntry_descriptor = + internal_static_google_storage_v2_ObjectContexts_descriptor.getNestedType(0); + internal_static_google_storage_v2_ObjectContexts_CustomEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectContexts_CustomEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_storage_v2_CustomerEncryption_descriptor = + getDescriptor().getMessageType(49); + internal_static_google_storage_v2_CustomerEncryption_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_CustomerEncryption_descriptor, + new java.lang.String[] { + "EncryptionAlgorithm", "KeySha256Bytes", + }); + internal_static_google_storage_v2_Object_descriptor = getDescriptor().getMessageType(50); + internal_static_google_storage_v2_Object_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Object_descriptor, + new java.lang.String[] { + "Name", + "Bucket", + "Etag", + "Generation", + "RestoreToken", + "Metageneration", + "StorageClass", + "Size", + "ContentEncoding", + "ContentDisposition", + "CacheControl", + "Acl", + "ContentLanguage", + "DeleteTime", + "FinalizeTime", + "ContentType", + "CreateTime", + "ComponentCount", + "Checksums", + "UpdateTime", + "KmsKey", + "UpdateStorageClassTime", + "TemporaryHold", + "RetentionExpireTime", + "Metadata", + "Contexts", + "EventBasedHold", + "Owner", + "CustomerEncryption", + "CustomTime", + "SoftDeleteTime", + "HardDeleteTime", + "Retention", + }); + internal_static_google_storage_v2_Object_Retention_descriptor = + internal_static_google_storage_v2_Object_descriptor.getNestedType(0); + internal_static_google_storage_v2_Object_Retention_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Object_Retention_descriptor, + new java.lang.String[] { + "Mode", "RetainUntilTime", + }); + internal_static_google_storage_v2_Object_MetadataEntry_descriptor = + internal_static_google_storage_v2_Object_descriptor.getNestedType(1); + internal_static_google_storage_v2_Object_MetadataEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Object_MetadataEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_storage_v2_ObjectAccessControl_descriptor = + getDescriptor().getMessageType(51); + internal_static_google_storage_v2_ObjectAccessControl_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ObjectAccessControl_descriptor, + new java.lang.String[] { + "Role", + "Id", + "Entity", + "EntityAlt", + "EntityId", + "Etag", + "Email", + "Domain", + "ProjectTeam", + }); + internal_static_google_storage_v2_ListObjectsResponse_descriptor = + getDescriptor().getMessageType(52); + internal_static_google_storage_v2_ListObjectsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ListObjectsResponse_descriptor, + new java.lang.String[] { + "Objects", "Prefixes", "NextPageToken", + }); + internal_static_google_storage_v2_ProjectTeam_descriptor = getDescriptor().getMessageType(53); + internal_static_google_storage_v2_ProjectTeam_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ProjectTeam_descriptor, + new java.lang.String[] { + "ProjectNumber", "Team", + }); + internal_static_google_storage_v2_Owner_descriptor = getDescriptor().getMessageType(54); + internal_static_google_storage_v2_Owner_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_Owner_descriptor, + new java.lang.String[] { + "Entity", "EntityId", + }); + internal_static_google_storage_v2_ContentRange_descriptor = getDescriptor().getMessageType(55); + internal_static_google_storage_v2_ContentRange_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_storage_v2_ContentRange_descriptor, + new java.lang.String[] { + "Start", "End", "CompleteLength", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.api.RoutingProto.getDescriptor(); + com.google.iam.v1.IamPolicyProto.getDescriptor(); + com.google.iam.v1.PolicyProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + com.google.type.DateProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.api.RoutingProto.routing); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequest.java new file mode 100644 index 000000000000..ba9a64f7fe8f --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequest.java @@ -0,0 +1,1771 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request for [UpdateBucket][google.storage.v2.Storage.UpdateBucket] method.
+ * 
+ * + * Protobuf type {@code google.storage.v2.UpdateBucketRequest} + */ +@com.google.protobuf.Generated +public final class UpdateBucketRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.UpdateBucketRequest) + UpdateBucketRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateBucketRequest"); + } + + // Use UpdateBucketRequest.newBuilder() to construct. + private UpdateBucketRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateBucketRequest() { + predefinedAcl_ = ""; + predefinedDefaultObjectAcl_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.UpdateBucketRequest.class, + com.google.storage.v2.UpdateBucketRequest.Builder.class); + } + + private int bitField0_; + public static final int BUCKET_FIELD_NUMBER = 1; + private com.google.storage.v2.Bucket bucket_; + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the bucket field is set. + */ + @java.lang.Override + public boolean hasBucket() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucket. + */ + @java.lang.Override + public com.google.storage.v2.Bucket getBucket() { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.storage.v2.BucketOrBuilder getBucketOrBuilder() { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 2; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 3; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration doesn't
+   * match this value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration doesn't
+   * match this value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int PREDEFINED_ACL_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREDEFINED_DEFAULT_OBJECT_ACL_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedDefaultObjectAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedDefaultObjectAcl() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedDefaultObjectAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedDefaultObjectAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 6; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getBucket()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(3, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(6, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, predefinedAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedDefaultObjectAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, predefinedDefaultObjectAcl_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getBucket()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getUpdateMask()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, predefinedAcl_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedDefaultObjectAcl_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize(9, predefinedDefaultObjectAcl_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.UpdateBucketRequest)) { + return super.equals(obj); + } + com.google.storage.v2.UpdateBucketRequest other = + (com.google.storage.v2.UpdateBucketRequest) obj; + + if (hasBucket() != other.hasBucket()) return false; + if (hasBucket()) { + if (!getBucket().equals(other.getBucket())) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getPredefinedAcl().equals(other.getPredefinedAcl())) return false; + if (!getPredefinedDefaultObjectAcl().equals(other.getPredefinedDefaultObjectAcl())) + return false; + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasBucket()) { + hash = (37 * hash) + BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getBucket().hashCode(); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedAcl().hashCode(); + hash = (37 * hash) + PREDEFINED_DEFAULT_OBJECT_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedDefaultObjectAcl().hashCode(); + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.UpdateBucketRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateBucketRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateBucketRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.UpdateBucketRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request for [UpdateBucket][google.storage.v2.Storage.UpdateBucket] method.
+   * 
+ * + * Protobuf type {@code google.storage.v2.UpdateBucketRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.UpdateBucketRequest) + com.google.storage.v2.UpdateBucketRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateBucketRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateBucketRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.UpdateBucketRequest.class, + com.google.storage.v2.UpdateBucketRequest.Builder.class); + } + + // Construct using com.google.storage.v2.UpdateBucketRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBucketFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bucket_ = null; + if (bucketBuilder_ != null) { + bucketBuilder_.dispose(); + bucketBuilder_ = null; + } + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + predefinedAcl_ = ""; + predefinedDefaultObjectAcl_ = ""; + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateBucketRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.UpdateBucketRequest getDefaultInstanceForType() { + return com.google.storage.v2.UpdateBucketRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.UpdateBucketRequest build() { + com.google.storage.v2.UpdateBucketRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.UpdateBucketRequest buildPartial() { + com.google.storage.v2.UpdateBucketRequest result = + new com.google.storage.v2.UpdateBucketRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.UpdateBucketRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bucket_ = bucketBuilder_ == null ? bucket_ : bucketBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.predefinedAcl_ = predefinedAcl_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.predefinedDefaultObjectAcl_ = predefinedDefaultObjectAcl_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.UpdateBucketRequest) { + return mergeFrom((com.google.storage.v2.UpdateBucketRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.UpdateBucketRequest other) { + if (other == com.google.storage.v2.UpdateBucketRequest.getDefaultInstance()) return this; + if (other.hasBucket()) { + mergeBucket(other.getBucket()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getPredefinedAcl().isEmpty()) { + predefinedAcl_ = other.predefinedAcl_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getPredefinedDefaultObjectAcl().isEmpty()) { + predefinedDefaultObjectAcl_ = other.predefinedDefaultObjectAcl_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetBucketFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 50: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 66: + { + predefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 66 + case 74: + { + predefinedDefaultObjectAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Bucket bucket_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + bucketBuilder_; + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the bucket field is set. + */ + public boolean hasBucket() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucket. + */ + public com.google.storage.v2.Bucket getBucket() { + if (bucketBuilder_ == null) { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } else { + return bucketBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setBucket(com.google.storage.v2.Bucket value) { + if (bucketBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + bucket_ = value; + } else { + bucketBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setBucket(com.google.storage.v2.Bucket.Builder builderForValue) { + if (bucketBuilder_ == null) { + bucket_ = builderForValue.build(); + } else { + bucketBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeBucket(com.google.storage.v2.Bucket value) { + if (bucketBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && bucket_ != null + && bucket_ != com.google.storage.v2.Bucket.getDefaultInstance()) { + getBucketBuilder().mergeFrom(value); + } else { + bucket_ = value; + } + } else { + bucketBuilder_.mergeFrom(value); + } + if (bucket_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearBucket() { + bitField0_ = (bitField0_ & ~0x00000001); + bucket_ = null; + if (bucketBuilder_ != null) { + bucketBuilder_.dispose(); + bucketBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.storage.v2.Bucket.Builder getBucketBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetBucketFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.storage.v2.BucketOrBuilder getBucketOrBuilder() { + if (bucketBuilder_ != null) { + return bucketBuilder_.getMessageOrBuilder(); + } else { + return bucket_ == null ? com.google.storage.v2.Bucket.getDefaultInstance() : bucket_; + } + } + + /** + * + * + *
+     * Required. The bucket to update.
+     * The bucket's `name` field is used to identify the bucket.
+     * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder> + internalGetBucketFieldBuilder() { + if (bucketBuilder_ == null) { + bucketBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Bucket, + com.google.storage.v2.Bucket.Builder, + com.google.storage.v2.BucketOrBuilder>( + getBucket(), getParentForChildren(), isClean()); + bucket_ = null; + } + return bucketBuilder_; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration matches this
+     * value.
+     * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration doesn't
+     * match this value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration doesn't
+     * match this value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration doesn't
+     * match this value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If set, the request modifies the bucket if its metageneration doesn't
+     * match this value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedAcl_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPredefinedAcl() { + predefinedAcl_ = getDefaultInstance().getPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this bucket.
+     * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+     * `publicRead`, or `publicReadWrite`.
+     * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedAcl_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object predefinedDefaultObjectAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + public java.lang.String getPredefinedDefaultObjectAcl() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedDefaultObjectAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + public com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes() { + java.lang.Object ref = predefinedDefaultObjectAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedDefaultObjectAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The predefinedDefaultObjectAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedDefaultObjectAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedDefaultObjectAcl_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearPredefinedDefaultObjectAcl() { + predefinedDefaultObjectAcl_ = getDefaultInstance().getPredefinedDefaultObjectAcl(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of default object access controls to this
+     * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes for predefinedDefaultObjectAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedDefaultObjectAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedDefaultObjectAcl_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000020); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.UpdateBucketRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.UpdateBucketRequest) + private static final com.google.storage.v2.UpdateBucketRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.UpdateBucketRequest(); + } + + public static com.google.storage.v2.UpdateBucketRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateBucketRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.UpdateBucketRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequestOrBuilder.java new file mode 100644 index 000000000000..8d2b8a667c7b --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateBucketRequestOrBuilder.java @@ -0,0 +1,247 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface UpdateBucketRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.UpdateBucketRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the bucket field is set. + */ + boolean hasBucket(); + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bucket. + */ + com.google.storage.v2.Bucket getBucket(); + + /** + * + * + *
+   * Required. The bucket to update.
+   * The bucket's `name` field is used to identify the bucket.
+   * 
+ * + * .google.storage.v2.Bucket bucket = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.storage.v2.BucketOrBuilder getBucketOrBuilder(); + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration matches this
+   * value.
+   * 
+ * + * optional int64 if_metageneration_match = 2; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration doesn't
+   * match this value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * If set, the request modifies the bucket if its metageneration doesn't
+   * match this value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 3; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + java.lang.String getPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this bucket.
+   * Valid values are `authenticatedRead`, `private`, `projectPrivate`,
+   * `publicRead`, or `publicReadWrite`.
+   * 
+ * + * string predefined_acl = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + com.google.protobuf.ByteString getPredefinedAclBytes(); + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The predefinedDefaultObjectAcl. + */ + java.lang.String getPredefinedDefaultObjectAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of default object access controls to this
+   * bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_default_object_acl = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The bytes for predefinedDefaultObjectAcl. + */ + com.google.protobuf.ByteString getPredefinedDefaultObjectAclBytes(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequest.java new file mode 100644 index 000000000000..f7ef0e9c99cd --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequest.java @@ -0,0 +1,2323 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [UpdateObject][google.storage.v2.Storage.UpdateObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.UpdateObjectRequest} + */ +@com.google.protobuf.Generated +public final class UpdateObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.UpdateObjectRequest) + UpdateObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateObjectRequest"); + } + + // Use UpdateObjectRequest.newBuilder() to construct. + private UpdateObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateObjectRequest() { + predefinedAcl_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.UpdateObjectRequest.class, + com.google.storage.v2.UpdateObjectRequest.Builder.class); + } + + private int bitField0_; + public static final int OBJECT_FIELD_NUMBER = 1; + private com.google.storage.v2.Object object_; + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the object field is set. + */ + @java.lang.Override + public boolean hasObject() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + @java.lang.Override + public com.google.storage.v2.Object getObject() { + return object_ == null ? com.google.storage.v2.Object.getDefaultInstance() : object_; + } + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getObjectOrBuilder() { + return object_ == null ? com.google.storage.v2.Object.getDefaultInstance() : object_; + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 2; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 2; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 2; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 3; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 4; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 5; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int PREDEFINED_ACL_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+   * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+   * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+   * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+   * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 7; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 8; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + public static final int OVERRIDE_UNLOCKED_RETENTION_FIELD_NUMBER = 11; + private boolean overrideUnlockedRetention_ = false; + + /** + * + * + *
+   * Optional. Overrides the unlocked retention config on the object.
+   * 
+ * + * bool override_unlocked_retention = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The overrideUnlockedRetention. + */ + @java.lang.Override + public boolean getOverrideUnlockedRetention() { + return overrideUnlockedRetention_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getObject()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(2, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(3, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(5, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(7, getUpdateMask()); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(8, getCommonObjectRequestParams()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 10, predefinedAcl_); + } + if (overrideUnlockedRetention_ != false) { + output.writeBool(11, overrideUnlockedRetention_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getObject()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifMetagenerationNotMatch_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getUpdateMask()); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, getCommonObjectRequestParams()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(10, predefinedAcl_); + } + if (overrideUnlockedRetention_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, overrideUnlockedRetention_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.UpdateObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.UpdateObjectRequest other = + (com.google.storage.v2.UpdateObjectRequest) obj; + + if (hasObject() != other.hasObject()) return false; + if (hasObject()) { + if (!getObject().equals(other.getObject())) return false; + } + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (!getPredefinedAcl().equals(other.getPredefinedAcl())) return false; + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (getOverrideUnlockedRetention() != other.getOverrideUnlockedRetention()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasObject()) { + hash = (37 * hash) + OBJECT_FIELD_NUMBER; + hash = (53 * hash) + getObject().hashCode(); + } + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + hash = (37 * hash) + PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedAcl().hashCode(); + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + hash = (37 * hash) + OVERRIDE_UNLOCKED_RETENTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getOverrideUnlockedRetention()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.UpdateObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.UpdateObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.UpdateObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [UpdateObject][google.storage.v2.Storage.UpdateObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.UpdateObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.UpdateObjectRequest) + com.google.storage.v2.UpdateObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.UpdateObjectRequest.class, + com.google.storage.v2.UpdateObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.UpdateObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetObjectFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + object_ = null; + if (objectBuilder_ != null) { + objectBuilder_.dispose(); + objectBuilder_ = null; + } + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + predefinedAcl_ = ""; + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + overrideUnlockedRetention_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_UpdateObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.UpdateObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.UpdateObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.UpdateObjectRequest build() { + com.google.storage.v2.UpdateObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.UpdateObjectRequest buildPartial() { + com.google.storage.v2.UpdateObjectRequest result = + new com.google.storage.v2.UpdateObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.UpdateObjectRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.object_ = objectBuilder_ == null ? object_ : objectBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.predefinedAcl_ = predefinedAcl_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.overrideUnlockedRetention_ = overrideUnlockedRetention_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.UpdateObjectRequest) { + return mergeFrom((com.google.storage.v2.UpdateObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.UpdateObjectRequest other) { + if (other == com.google.storage.v2.UpdateObjectRequest.getDefaultInstance()) return this; + if (other.hasObject()) { + mergeObject(other.getObject()); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (!other.getPredefinedAcl().isEmpty()) { + predefinedAcl_ = other.predefinedAcl_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + if (other.getOverrideUnlockedRetention() != false) { + setOverrideUnlockedRetention(other.getOverrideUnlockedRetention()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetObjectFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 58: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 82: + { + predefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 82 + case 88: + { + overrideUnlockedRetention_ = input.readBool(); + bitField0_ |= 0x00000100; + break; + } // case 88 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Object object_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + objectBuilder_; + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the object field is set. + */ + public boolean hasObject() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + public com.google.storage.v2.Object getObject() { + if (objectBuilder_ == null) { + return object_ == null ? com.google.storage.v2.Object.getDefaultInstance() : object_; + } else { + return objectBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setObject(com.google.storage.v2.Object value) { + if (objectBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + object_ = value; + } else { + objectBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setObject(com.google.storage.v2.Object.Builder builderForValue) { + if (objectBuilder_ == null) { + object_ = builderForValue.build(); + } else { + objectBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeObject(com.google.storage.v2.Object value) { + if (objectBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && object_ != null + && object_ != com.google.storage.v2.Object.getDefaultInstance()) { + getObjectBuilder().mergeFrom(value); + } else { + object_ = value; + } + } else { + objectBuilder_.mergeFrom(value); + } + if (object_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearObject() { + bitField0_ = (bitField0_ & ~0x00000001); + object_ = null; + if (objectBuilder_ != null) { + objectBuilder_.dispose(); + objectBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.storage.v2.Object.Builder getObjectBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetObjectFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.storage.v2.ObjectOrBuilder getObjectOrBuilder() { + if (objectBuilder_ != null) { + return objectBuilder_.getMessageOrBuilder(); + } else { + return object_ == null ? com.google.storage.v2.Object.getDefaultInstance() : object_; + } + } + + /** + * + * + *
+     * Required. The object to update.
+     * The object's bucket and name fields are used to identify the object to
+     * update. If present, the object's generation field selects a specific
+     * revision of this object whose metadata should be updated. Otherwise,
+     * assumes the live version of the object.
+     * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetObjectFieldBuilder() { + if (objectBuilder_ == null) { + objectBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getObject(), getParentForChildren(), isClean()); + object_ = null; + } + return objectBuilder_; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 2; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 2; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 2; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current generation
+     * matches the given value. Setting to 0 makes the operation succeed only if
+     * there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 2; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000002); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live generation
+     * does not match the given value. If no live object exists, the precondition
+     * fails. Setting to 0 makes the operation succeed only if there is a live
+     * version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+     * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+     * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+     * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+     * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+     * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+     * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedAcl_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+     * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+     * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPredefinedAcl() { + predefinedAcl_ = getDefaultInstance().getPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+     * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+     * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedAcl_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000040); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Required. List of fields to be updated.
+     *
+     * To specify ALL fields, equivalent to the JSON API's "update" function,
+     * specify a single field with the value `*`. Note: not recommended. If a new
+     * field is introduced at a later time, an older client updating with the `*`
+     * might accidentally reset the new field's value.
+     *
+     * Not specifying any fields is an error.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000080); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Storage API requests concerning an
+     * object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + private boolean overrideUnlockedRetention_; + + /** + * + * + *
+     * Optional. Overrides the unlocked retention config on the object.
+     * 
+ * + * bool override_unlocked_retention = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The overrideUnlockedRetention. + */ + @java.lang.Override + public boolean getOverrideUnlockedRetention() { + return overrideUnlockedRetention_; + } + + /** + * + * + *
+     * Optional. Overrides the unlocked retention config on the object.
+     * 
+ * + * bool override_unlocked_retention = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The overrideUnlockedRetention to set. + * @return This builder for chaining. + */ + public Builder setOverrideUnlockedRetention(boolean value) { + + overrideUnlockedRetention_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Overrides the unlocked retention config on the object.
+     * 
+ * + * bool override_unlocked_retention = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOverrideUnlockedRetention() { + bitField0_ = (bitField0_ & ~0x00000100); + overrideUnlockedRetention_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.UpdateObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.UpdateObjectRequest) + private static final com.google.storage.v2.UpdateObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.UpdateObjectRequest(); + } + + public static com.google.storage.v2.UpdateObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.UpdateObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequestOrBuilder.java new file mode 100644 index 000000000000..27a6114fe594 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/UpdateObjectRequestOrBuilder.java @@ -0,0 +1,345 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface UpdateObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.UpdateObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the object field is set. + */ + boolean hasObject(); + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The object. + */ + com.google.storage.v2.Object getObject(); + + /** + * + * + *
+   * Required. The object to update.
+   * The object's bucket and name fields are used to identify the object to
+   * update. If present, the object's generation field selects a specific
+   * revision of this object whose metadata should be updated. Otherwise,
+   * assumes the live version of the object.
+   * 
+ * + * .google.storage.v2.Object object = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.storage.v2.ObjectOrBuilder getObjectOrBuilder(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 2; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current generation
+   * matches the given value. Setting to 0 makes the operation succeed only if
+   * there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 2; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live generation
+   * does not match the given value. If no live object exists, the precondition
+   * fails. Setting to 0 makes the operation succeed only if there is a live
+   * version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 3; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 4; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 5; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+   * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+   * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + java.lang.String getPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are "authenticatedRead", "bucketOwnerFullControl",
+   * "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+   * 
+ * + * string predefined_acl = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + com.google.protobuf.ByteString getPredefinedAclBytes(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Required. List of fields to be updated.
+   *
+   * To specify ALL fields, equivalent to the JSON API's "update" function,
+   * specify a single field with the value `*`. Note: not recommended. If a new
+   * field is introduced at a later time, an older client updating with the `*`
+   * might accidentally reset the new field's value.
+   *
+   * Not specifying any fields is an error.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Storage API requests concerning an
+   * object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + /** + * + * + *
+   * Optional. Overrides the unlocked retention config on the object.
+   * 
+ * + * bool override_unlocked_retention = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The overrideUnlockedRetention. + */ + boolean getOverrideUnlockedRetention(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequest.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequest.java new file mode 100644 index 000000000000..96b67197c569 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequest.java @@ -0,0 +1,2397 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Request message for [WriteObject][google.storage.v2.Storage.WriteObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectRequest} + */ +@com.google.protobuf.Generated +public final class WriteObjectRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.WriteObjectRequest) + WriteObjectRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "WriteObjectRequest"); + } + + // Use WriteObjectRequest.newBuilder() to construct. + private WriteObjectRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private WriteObjectRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectRequest.class, + com.google.storage.v2.WriteObjectRequest.Builder.class); + } + + private int bitField0_; + private int firstMessageCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object firstMessage_; + + public enum FirstMessageCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + UPLOAD_ID(1), + WRITE_OBJECT_SPEC(2), + FIRSTMESSAGE_NOT_SET(0); + private final int value; + + private FirstMessageCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static FirstMessageCase valueOf(int value) { + return forNumber(value); + } + + public static FirstMessageCase forNumber(int value) { + switch (value) { + case 1: + return UPLOAD_ID; + case 2: + return WRITE_OBJECT_SPEC; + case 0: + return FIRSTMESSAGE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public FirstMessageCase getFirstMessageCase() { + return FirstMessageCase.forNumber(firstMessageCase_); + } + + private int dataCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object data_; + + public enum DataCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + CHECKSUMMED_DATA(4), + DATA_NOT_SET(0); + private final int value; + + private DataCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataCase valueOf(int value) { + return forNumber(value); + } + + public static DataCase forNumber(int value) { + switch (value) { + case 4: + return CHECKSUMMED_DATA; + case 0: + return DATA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public DataCase getDataCase() { + return DataCase.forNumber(dataCase_); + } + + public static final int UPLOAD_ID_FIELD_NUMBER = 1; + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + public boolean hasUploadId() { + return firstMessageCase_ == 1; + } + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + public java.lang.String getUploadId() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (firstMessageCase_ == 1) { + firstMessage_ = s; + } + return s; + } + } + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (firstMessageCase_ == 1) { + firstMessage_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_OBJECT_SPEC_FIELD_NUMBER = 2; + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + @java.lang.Override + public boolean hasWriteObjectSpec() { + return firstMessageCase_ == 2; + } + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + + public static final int WRITE_OFFSET_FIELD_NUMBER = 3; + private long writeOffset_ = 0L; + + /** + * + * + *
+   * Required. The offset from the beginning of the object at which the data
+   * should be written.
+   *
+   * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+   * indicates the initial offset for the `Write()` call. The value **must** be
+   * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+   * return (0 if this is the first write to the object).
+   *
+   * On subsequent calls, this value **must** be no larger than the sum of the
+   * first `write_offset` and the sizes of all `data` chunks sent previously on
+   * this stream.
+   *
+   * An incorrect value causes an error.
+   * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + @java.lang.Override + public long getWriteOffset() { + return writeOffset_; + } + + public static final int CHECKSUMMED_DATA_FIELD_NUMBER = 4; + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return dataCase_ == 4; + } + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + + public static final int OBJECT_CHECKSUMS_FIELD_NUMBER = 6; + private com.google.storage.v2.ObjectChecksums objectChecksums_; + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + @java.lang.Override + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + + public static final int FINISH_WRITE_FIELD_NUMBER = 7; + private boolean finishWrite_ = false; + + /** + * + * + *
+   * Optional. If `true`, this indicates that the write is complete. Sending any
+   * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+   * causes an error.
+   * For a non-resumable write (where the `upload_id` was not set in the first
+   * message), it is an error not to set this field in the final message of the
+   * stream.
+   * 
+ * + * bool finish_write = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + @java.lang.Override + public boolean getFinishWrite() { + return finishWrite_; + } + + public static final int COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER = 8; + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + @java.lang.Override + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (firstMessageCase_ == 1) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, firstMessage_); + } + if (firstMessageCase_ == 2) { + output.writeMessage(2, (com.google.storage.v2.WriteObjectSpec) firstMessage_); + } + if (writeOffset_ != 0L) { + output.writeInt64(3, writeOffset_); + } + if (dataCase_ == 4) { + output.writeMessage(4, (com.google.storage.v2.ChecksummedData) data_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getObjectChecksums()); + } + if (finishWrite_ != false) { + output.writeBool(7, finishWrite_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(8, getCommonObjectRequestParams()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (firstMessageCase_ == 1) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, firstMessage_); + } + if (firstMessageCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.storage.v2.WriteObjectSpec) firstMessage_); + } + if (writeOffset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, writeOffset_); + } + if (dataCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.storage.v2.ChecksummedData) data_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getObjectChecksums()); + } + if (finishWrite_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, finishWrite_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, getCommonObjectRequestParams()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.WriteObjectRequest)) { + return super.equals(obj); + } + com.google.storage.v2.WriteObjectRequest other = (com.google.storage.v2.WriteObjectRequest) obj; + + if (getWriteOffset() != other.getWriteOffset()) return false; + if (hasObjectChecksums() != other.hasObjectChecksums()) return false; + if (hasObjectChecksums()) { + if (!getObjectChecksums().equals(other.getObjectChecksums())) return false; + } + if (getFinishWrite() != other.getFinishWrite()) return false; + if (hasCommonObjectRequestParams() != other.hasCommonObjectRequestParams()) return false; + if (hasCommonObjectRequestParams()) { + if (!getCommonObjectRequestParams().equals(other.getCommonObjectRequestParams())) + return false; + } + if (!getFirstMessageCase().equals(other.getFirstMessageCase())) return false; + switch (firstMessageCase_) { + case 1: + if (!getUploadId().equals(other.getUploadId())) return false; + break; + case 2: + if (!getWriteObjectSpec().equals(other.getWriteObjectSpec())) return false; + break; + case 0: + default: + } + if (!getDataCase().equals(other.getDataCase())) return false; + switch (dataCase_) { + case 4: + if (!getChecksummedData().equals(other.getChecksummedData())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getWriteOffset()); + if (hasObjectChecksums()) { + hash = (37 * hash) + OBJECT_CHECKSUMS_FIELD_NUMBER; + hash = (53 * hash) + getObjectChecksums().hashCode(); + } + hash = (37 * hash) + FINISH_WRITE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getFinishWrite()); + if (hasCommonObjectRequestParams()) { + hash = (37 * hash) + COMMON_OBJECT_REQUEST_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getCommonObjectRequestParams().hashCode(); + } + switch (firstMessageCase_) { + case 1: + hash = (37 * hash) + UPLOAD_ID_FIELD_NUMBER; + hash = (53 * hash) + getUploadId().hashCode(); + break; + case 2: + hash = (37 * hash) + WRITE_OBJECT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getWriteObjectSpec().hashCode(); + break; + case 0: + default: + } + switch (dataCase_) { + case 4: + hash = (37 * hash) + CHECKSUMMED_DATA_FIELD_NUMBER; + hash = (53 * hash) + getChecksummedData().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.WriteObjectRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for [WriteObject][google.storage.v2.Storage.WriteObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.WriteObjectRequest) + com.google.storage.v2.WriteObjectRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectRequest.class, + com.google.storage.v2.WriteObjectRequest.Builder.class); + } + + // Construct using com.google.storage.v2.WriteObjectRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetObjectChecksumsFieldBuilder(); + internalGetCommonObjectRequestParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (writeObjectSpecBuilder_ != null) { + writeObjectSpecBuilder_.clear(); + } + writeOffset_ = 0L; + if (checksummedDataBuilder_ != null) { + checksummedDataBuilder_.clear(); + } + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + finishWrite_ = false; + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + firstMessageCase_ = 0; + firstMessage_ = null; + dataCase_ = 0; + data_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectRequest_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectRequest getDefaultInstanceForType() { + return com.google.storage.v2.WriteObjectRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectRequest build() { + com.google.storage.v2.WriteObjectRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectRequest buildPartial() { + com.google.storage.v2.WriteObjectRequest result = + new com.google.storage.v2.WriteObjectRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.WriteObjectRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.writeOffset_ = writeOffset_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.objectChecksums_ = + objectChecksumsBuilder_ == null ? objectChecksums_ : objectChecksumsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.finishWrite_ = finishWrite_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.commonObjectRequestParams_ = + commonObjectRequestParamsBuilder_ == null + ? commonObjectRequestParams_ + : commonObjectRequestParamsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.storage.v2.WriteObjectRequest result) { + result.firstMessageCase_ = firstMessageCase_; + result.firstMessage_ = this.firstMessage_; + if (firstMessageCase_ == 2 && writeObjectSpecBuilder_ != null) { + result.firstMessage_ = writeObjectSpecBuilder_.build(); + } + result.dataCase_ = dataCase_; + result.data_ = this.data_; + if (dataCase_ == 4 && checksummedDataBuilder_ != null) { + result.data_ = checksummedDataBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.WriteObjectRequest) { + return mergeFrom((com.google.storage.v2.WriteObjectRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.WriteObjectRequest other) { + if (other == com.google.storage.v2.WriteObjectRequest.getDefaultInstance()) return this; + if (other.getWriteOffset() != 0L) { + setWriteOffset(other.getWriteOffset()); + } + if (other.hasObjectChecksums()) { + mergeObjectChecksums(other.getObjectChecksums()); + } + if (other.getFinishWrite() != false) { + setFinishWrite(other.getFinishWrite()); + } + if (other.hasCommonObjectRequestParams()) { + mergeCommonObjectRequestParams(other.getCommonObjectRequestParams()); + } + switch (other.getFirstMessageCase()) { + case UPLOAD_ID: + { + firstMessageCase_ = 1; + firstMessage_ = other.firstMessage_; + onChanged(); + break; + } + case WRITE_OBJECT_SPEC: + { + mergeWriteObjectSpec(other.getWriteObjectSpec()); + break; + } + case FIRSTMESSAGE_NOT_SET: + { + break; + } + } + switch (other.getDataCase()) { + case CHECKSUMMED_DATA: + { + mergeChecksummedData(other.getChecksummedData()); + break; + } + case DATA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + firstMessageCase_ = 1; + firstMessage_ = s; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetWriteObjectSpecFieldBuilder().getBuilder(), extensionRegistry); + firstMessageCase_ = 2; + break; + } // case 18 + case 24: + { + writeOffset_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + input.readMessage( + internalGetChecksummedDataFieldBuilder().getBuilder(), extensionRegistry); + dataCase_ = 4; + break; + } // case 34 + case 50: + { + input.readMessage( + internalGetObjectChecksumsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 50 + case 56: + { + finishWrite_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int firstMessageCase_ = 0; + private java.lang.Object firstMessage_; + + public FirstMessageCase getFirstMessageCase() { + return FirstMessageCase.forNumber(firstMessageCase_); + } + + public Builder clearFirstMessage() { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + return this; + } + + private int dataCase_ = 0; + private java.lang.Object data_; + + public DataCase getDataCase() { + return DataCase.forNumber(dataCase_); + } + + public Builder clearData() { + dataCase_ = 0; + data_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + @java.lang.Override + public boolean hasUploadId() { + return firstMessageCase_ == 1; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + @java.lang.Override + public java.lang.String getUploadId() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (firstMessageCase_ == 1) { + firstMessage_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUploadIdBytes() { + java.lang.Object ref = ""; + if (firstMessageCase_ == 1) { + ref = firstMessage_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (firstMessageCase_ == 1) { + firstMessage_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @param value The uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + firstMessageCase_ = 1; + firstMessage_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUploadId() { + if (firstMessageCase_ == 1) { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * For resumable uploads. This should be the `upload_id` returned from a
+     * call to `StartResumableWriteResponse`.
+     * 
+ * + * string upload_id = 1; + * + * @param value The bytes for uploadId to set. + * @return This builder for chaining. + */ + public Builder setUploadIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + firstMessageCase_ = 1; + firstMessage_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + writeObjectSpecBuilder_; + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + @java.lang.Override + public boolean hasWriteObjectSpec() { + return firstMessageCase_ == 2; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getWriteObjectSpec() { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } else { + if (firstMessageCase_ == 2) { + return writeObjectSpecBuilder_.getMessage(); + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder setWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + firstMessage_ = value; + onChanged(); + } else { + writeObjectSpecBuilder_.setMessage(value); + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder setWriteObjectSpec( + com.google.storage.v2.WriteObjectSpec.Builder builderForValue) { + if (writeObjectSpecBuilder_ == null) { + firstMessage_ = builderForValue.build(); + onChanged(); + } else { + writeObjectSpecBuilder_.setMessage(builderForValue.build()); + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder mergeWriteObjectSpec(com.google.storage.v2.WriteObjectSpec value) { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2 + && firstMessage_ != com.google.storage.v2.WriteObjectSpec.getDefaultInstance()) { + firstMessage_ = + com.google.storage.v2.WriteObjectSpec.newBuilder( + (com.google.storage.v2.WriteObjectSpec) firstMessage_) + .mergeFrom(value) + .buildPartial(); + } else { + firstMessage_ = value; + } + onChanged(); + } else { + if (firstMessageCase_ == 2) { + writeObjectSpecBuilder_.mergeFrom(value); + } else { + writeObjectSpecBuilder_.setMessage(value); + } + } + firstMessageCase_ = 2; + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public Builder clearWriteObjectSpec() { + if (writeObjectSpecBuilder_ == null) { + if (firstMessageCase_ == 2) { + firstMessageCase_ = 0; + firstMessage_ = null; + onChanged(); + } + } else { + if (firstMessageCase_ == 2) { + firstMessageCase_ = 0; + firstMessage_ = null; + } + writeObjectSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + public com.google.storage.v2.WriteObjectSpec.Builder getWriteObjectSpecBuilder() { + return internalGetWriteObjectSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + @java.lang.Override + public com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder() { + if ((firstMessageCase_ == 2) && (writeObjectSpecBuilder_ != null)) { + return writeObjectSpecBuilder_.getMessageOrBuilder(); + } else { + if (firstMessageCase_ == 2) { + return (com.google.storage.v2.WriteObjectSpec) firstMessage_; + } + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + } + + /** + * + * + *
+     * For non-resumable uploads. Describes the overall upload, including the
+     * destination bucket and object name, preconditions, etc.
+     * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder> + internalGetWriteObjectSpecFieldBuilder() { + if (writeObjectSpecBuilder_ == null) { + if (!(firstMessageCase_ == 2)) { + firstMessage_ = com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + writeObjectSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.WriteObjectSpec, + com.google.storage.v2.WriteObjectSpec.Builder, + com.google.storage.v2.WriteObjectSpecOrBuilder>( + (com.google.storage.v2.WriteObjectSpec) firstMessage_, + getParentForChildren(), + isClean()); + firstMessage_ = null; + } + firstMessageCase_ = 2; + onChanged(); + return writeObjectSpecBuilder_; + } + + private long writeOffset_; + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value **must** be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value **must** be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An incorrect value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + @java.lang.Override + public long getWriteOffset() { + return writeOffset_; + } + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value **must** be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value **must** be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An incorrect value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeOffset to set. + * @return This builder for chaining. + */ + public Builder setWriteOffset(long value) { + + writeOffset_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The offset from the beginning of the object at which the data
+     * should be written.
+     *
+     * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+     * indicates the initial offset for the `Write()` call. The value **must** be
+     * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+     * return (0 if this is the first write to the object).
+     *
+     * On subsequent calls, this value **must** be no larger than the sum of the
+     * first `write_offset` and the sizes of all `data` chunks sent previously on
+     * this stream.
+     *
+     * An incorrect value causes an error.
+     * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteOffset() { + bitField0_ = (bitField0_ & ~0x00000004); + writeOffset_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + checksummedDataBuilder_; + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + @java.lang.Override + public boolean hasChecksummedData() { + return dataCase_ == 4; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedData getChecksummedData() { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } else { + if (dataCase_ == 4) { + return checksummedDataBuilder_.getMessage(); + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder setChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + data_ = value; + onChanged(); + } else { + checksummedDataBuilder_.setMessage(value); + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder setChecksummedData( + com.google.storage.v2.ChecksummedData.Builder builderForValue) { + if (checksummedDataBuilder_ == null) { + data_ = builderForValue.build(); + onChanged(); + } else { + checksummedDataBuilder_.setMessage(builderForValue.build()); + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder mergeChecksummedData(com.google.storage.v2.ChecksummedData value) { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4 && data_ != com.google.storage.v2.ChecksummedData.getDefaultInstance()) { + data_ = + com.google.storage.v2.ChecksummedData.newBuilder( + (com.google.storage.v2.ChecksummedData) data_) + .mergeFrom(value) + .buildPartial(); + } else { + data_ = value; + } + onChanged(); + } else { + if (dataCase_ == 4) { + checksummedDataBuilder_.mergeFrom(value); + } else { + checksummedDataBuilder_.setMessage(value); + } + } + dataCase_ = 4; + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public Builder clearChecksummedData() { + if (checksummedDataBuilder_ == null) { + if (dataCase_ == 4) { + dataCase_ = 0; + data_ = null; + onChanged(); + } + } else { + if (dataCase_ == 4) { + dataCase_ = 0; + data_ = null; + } + checksummedDataBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + public com.google.storage.v2.ChecksummedData.Builder getChecksummedDataBuilder() { + return internalGetChecksummedDataFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + @java.lang.Override + public com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder() { + if ((dataCase_ == 4) && (checksummedDataBuilder_ != null)) { + return checksummedDataBuilder_.getMessageOrBuilder(); + } else { + if (dataCase_ == 4) { + return (com.google.storage.v2.ChecksummedData) data_; + } + return com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The data to insert. If a crc32c checksum is provided that doesn't match
+     * the checksum computed by the service, the request fails.
+     * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder> + internalGetChecksummedDataFieldBuilder() { + if (checksummedDataBuilder_ == null) { + if (!(dataCase_ == 4)) { + data_ = com.google.storage.v2.ChecksummedData.getDefaultInstance(); + } + checksummedDataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ChecksummedData, + com.google.storage.v2.ChecksummedData.Builder, + com.google.storage.v2.ChecksummedDataOrBuilder>( + (com.google.storage.v2.ChecksummedData) data_, getParentForChildren(), isClean()); + data_ = null; + } + dataCase_ = 4; + onChanged(); + return checksummedDataBuilder_; + } + + private com.google.storage.v2.ObjectChecksums objectChecksums_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + objectChecksumsBuilder_; + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + public boolean hasObjectChecksums() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + public com.google.storage.v2.ObjectChecksums getObjectChecksums() { + if (objectChecksumsBuilder_ == null) { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } else { + return objectChecksumsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + objectChecksums_ = value; + } else { + objectChecksumsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setObjectChecksums( + com.google.storage.v2.ObjectChecksums.Builder builderForValue) { + if (objectChecksumsBuilder_ == null) { + objectChecksums_ = builderForValue.build(); + } else { + objectChecksumsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeObjectChecksums(com.google.storage.v2.ObjectChecksums value) { + if (objectChecksumsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && objectChecksums_ != null + && objectChecksums_ != com.google.storage.v2.ObjectChecksums.getDefaultInstance()) { + getObjectChecksumsBuilder().mergeFrom(value); + } else { + objectChecksums_ = value; + } + } else { + objectChecksumsBuilder_.mergeFrom(value); + } + if (objectChecksums_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearObjectChecksums() { + bitField0_ = (bitField0_ & ~0x00000010); + objectChecksums_ = null; + if (objectChecksumsBuilder_ != null) { + objectChecksumsBuilder_.dispose(); + objectChecksumsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksums.Builder getObjectChecksumsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetObjectChecksumsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder() { + if (objectChecksumsBuilder_ != null) { + return objectChecksumsBuilder_.getMessageOrBuilder(); + } else { + return objectChecksums_ == null + ? com.google.storage.v2.ObjectChecksums.getDefaultInstance() + : objectChecksums_; + } + } + + /** + * + * + *
+     * Optional. Checksums for the complete object. If the checksums computed by
+     * the service don't match the specified checksums the call fails. This field
+     * might only be provided in the first or last request (either with
+     * `first_message`, or `finish_write` set).
+     * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder> + internalGetObjectChecksumsFieldBuilder() { + if (objectChecksumsBuilder_ == null) { + objectChecksumsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.ObjectChecksums, + com.google.storage.v2.ObjectChecksums.Builder, + com.google.storage.v2.ObjectChecksumsOrBuilder>( + getObjectChecksums(), getParentForChildren(), isClean()); + objectChecksums_ = null; + } + return objectChecksumsBuilder_; + } + + private boolean finishWrite_; + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + @java.lang.Override + public boolean getFinishWrite() { + return finishWrite_; + } + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The finishWrite to set. + * @return This builder for chaining. + */ + public Builder setFinishWrite(boolean value) { + + finishWrite_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If `true`, this indicates that the write is complete. Sending any
+     * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+     * causes an error.
+     * For a non-resumable write (where the `upload_id` was not set in the first
+     * message), it is an error not to set this field in the final message of the
+     * stream.
+     * 
+ * + * bool finish_write = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFinishWrite() { + bitField0_ = (bitField0_ & ~0x00000020); + finishWrite_ = false; + onChanged(); + return this; + } + + private com.google.storage.v2.CommonObjectRequestParams commonObjectRequestParams_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + commonObjectRequestParamsBuilder_; + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + public boolean hasCommonObjectRequestParams() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + public com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams() { + if (commonObjectRequestParamsBuilder_ == null) { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } else { + return commonObjectRequestParamsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commonObjectRequestParams_ = value; + } else { + commonObjectRequestParamsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams.Builder builderForValue) { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParams_ = builderForValue.build(); + } else { + commonObjectRequestParamsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCommonObjectRequestParams( + com.google.storage.v2.CommonObjectRequestParams value) { + if (commonObjectRequestParamsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && commonObjectRequestParams_ != null + && commonObjectRequestParams_ + != com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance()) { + getCommonObjectRequestParamsBuilder().mergeFrom(value); + } else { + commonObjectRequestParams_ = value; + } + } else { + commonObjectRequestParamsBuilder_.mergeFrom(value); + } + if (commonObjectRequestParams_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCommonObjectRequestParams() { + bitField0_ = (bitField0_ & ~0x00000040); + commonObjectRequestParams_ = null; + if (commonObjectRequestParamsBuilder_ != null) { + commonObjectRequestParamsBuilder_.dispose(); + commonObjectRequestParamsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParams.Builder + getCommonObjectRequestParamsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetCommonObjectRequestParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.storage.v2.CommonObjectRequestParamsOrBuilder + getCommonObjectRequestParamsOrBuilder() { + if (commonObjectRequestParamsBuilder_ != null) { + return commonObjectRequestParamsBuilder_.getMessageOrBuilder(); + } else { + return commonObjectRequestParams_ == null + ? com.google.storage.v2.CommonObjectRequestParams.getDefaultInstance() + : commonObjectRequestParams_; + } + } + + /** + * + * + *
+     * Optional. A set of parameters common to Cloud Storage API requests
+     * concerning an object.
+     * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder> + internalGetCommonObjectRequestParamsFieldBuilder() { + if (commonObjectRequestParamsBuilder_ == null) { + commonObjectRequestParamsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.CommonObjectRequestParams, + com.google.storage.v2.CommonObjectRequestParams.Builder, + com.google.storage.v2.CommonObjectRequestParamsOrBuilder>( + getCommonObjectRequestParams(), getParentForChildren(), isClean()); + commonObjectRequestParams_ = null; + } + return commonObjectRequestParamsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.WriteObjectRequest) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.WriteObjectRequest) + private static final com.google.storage.v2.WriteObjectRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.WriteObjectRequest(); + } + + public static com.google.storage.v2.WriteObjectRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteObjectRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequestOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequestOrBuilder.java new file mode 100644 index 000000000000..14fc0f76b23a --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectRequestOrBuilder.java @@ -0,0 +1,295 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface WriteObjectRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.WriteObjectRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return Whether the uploadId field is set. + */ + boolean hasUploadId(); + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The uploadId. + */ + java.lang.String getUploadId(); + + /** + * + * + *
+   * For resumable uploads. This should be the `upload_id` returned from a
+   * call to `StartResumableWriteResponse`.
+   * 
+ * + * string upload_id = 1; + * + * @return The bytes for uploadId. + */ + com.google.protobuf.ByteString getUploadIdBytes(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return Whether the writeObjectSpec field is set. + */ + boolean hasWriteObjectSpec(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + * + * @return The writeObjectSpec. + */ + com.google.storage.v2.WriteObjectSpec getWriteObjectSpec(); + + /** + * + * + *
+   * For non-resumable uploads. Describes the overall upload, including the
+   * destination bucket and object name, preconditions, etc.
+   * 
+ * + * .google.storage.v2.WriteObjectSpec write_object_spec = 2; + */ + com.google.storage.v2.WriteObjectSpecOrBuilder getWriteObjectSpecOrBuilder(); + + /** + * + * + *
+   * Required. The offset from the beginning of the object at which the data
+   * should be written.
+   *
+   * In the first `WriteObjectRequest` of a `WriteObject()` action, it
+   * indicates the initial offset for the `Write()` call. The value **must** be
+   * equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+   * return (0 if this is the first write to the object).
+   *
+   * On subsequent calls, this value **must** be no larger than the sum of the
+   * first `write_offset` and the sizes of all `data` chunks sent previously on
+   * this stream.
+   *
+   * An incorrect value causes an error.
+   * 
+ * + * int64 write_offset = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeOffset. + */ + long getWriteOffset(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return Whether the checksummedData field is set. + */ + boolean hasChecksummedData(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + * + * @return The checksummedData. + */ + com.google.storage.v2.ChecksummedData getChecksummedData(); + + /** + * + * + *
+   * The data to insert. If a crc32c checksum is provided that doesn't match
+   * the checksum computed by the service, the request fails.
+   * 
+ * + * .google.storage.v2.ChecksummedData checksummed_data = 4; + */ + com.google.storage.v2.ChecksummedDataOrBuilder getChecksummedDataOrBuilder(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the objectChecksums field is set. + */ + boolean hasObjectChecksums(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The objectChecksums. + */ + com.google.storage.v2.ObjectChecksums getObjectChecksums(); + + /** + * + * + *
+   * Optional. Checksums for the complete object. If the checksums computed by
+   * the service don't match the specified checksums the call fails. This field
+   * might only be provided in the first or last request (either with
+   * `first_message`, or `finish_write` set).
+   * 
+ * + * + * .google.storage.v2.ObjectChecksums object_checksums = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.ObjectChecksumsOrBuilder getObjectChecksumsOrBuilder(); + + /** + * + * + *
+   * Optional. If `true`, this indicates that the write is complete. Sending any
+   * `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+   * causes an error.
+   * For a non-resumable write (where the `upload_id` was not set in the first
+   * message), it is an error not to set this field in the final message of the
+   * stream.
+   * 
+ * + * bool finish_write = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The finishWrite. + */ + boolean getFinishWrite(); + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the commonObjectRequestParams field is set. + */ + boolean hasCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The commonObjectRequestParams. + */ + com.google.storage.v2.CommonObjectRequestParams getCommonObjectRequestParams(); + + /** + * + * + *
+   * Optional. A set of parameters common to Cloud Storage API requests
+   * concerning an object.
+   * 
+ * + * + * .google.storage.v2.CommonObjectRequestParams common_object_request_params = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.storage.v2.CommonObjectRequestParamsOrBuilder getCommonObjectRequestParamsOrBuilder(); + + com.google.storage.v2.WriteObjectRequest.FirstMessageCase getFirstMessageCase(); + + com.google.storage.v2.WriteObjectRequest.DataCase getDataCase(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponse.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponse.java new file mode 100644 index 000000000000..6e3e5946a013 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponse.java @@ -0,0 +1,949 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Response message for
+ * [WriteObject][google.storage.v2.Storage.WriteObject].
+ * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectResponse} + */ +@com.google.protobuf.Generated +public final class WriteObjectResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.WriteObjectResponse) + WriteObjectResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "WriteObjectResponse"); + } + + // Use WriteObjectResponse.newBuilder() to construct. + private WriteObjectResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private WriteObjectResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectResponse.class, + com.google.storage.v2.WriteObjectResponse.Builder.class); + } + + private int writeStatusCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object writeStatus_; + + public enum WriteStatusCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PERSISTED_SIZE(1), + RESOURCE(2), + WRITESTATUS_NOT_SET(0); + private final int value; + + private WriteStatusCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static WriteStatusCase valueOf(int value) { + return forNumber(value); + } + + public static WriteStatusCase forNumber(int value) { + switch (value) { + case 1: + return PERSISTED_SIZE; + case 2: + return RESOURCE; + case 0: + return WRITESTATUS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public static final int PERSISTED_SIZE_FIELD_NUMBER = 1; + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + @java.lang.Override + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + @java.lang.Override + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + public static final int RESOURCE_FIELD_NUMBER = 2; + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (writeStatusCase_ == 1) { + output.writeInt64(1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + output.writeMessage(2, (com.google.storage.v2.Object) writeStatus_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (writeStatusCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 1, (long) ((java.lang.Long) writeStatus_)); + } + if (writeStatusCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.storage.v2.Object) writeStatus_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.WriteObjectResponse)) { + return super.equals(obj); + } + com.google.storage.v2.WriteObjectResponse other = + (com.google.storage.v2.WriteObjectResponse) obj; + + if (!getWriteStatusCase().equals(other.getWriteStatusCase())) return false; + switch (writeStatusCase_) { + case 1: + if (getPersistedSize() != other.getPersistedSize()) return false; + break; + case 2: + if (!getResource().equals(other.getResource())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (writeStatusCase_) { + case 1: + hash = (37 * hash) + PERSISTED_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPersistedSize()); + break; + case 2: + hash = (37 * hash) + RESOURCE_FIELD_NUMBER; + hash = (53 * hash) + getResource().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.WriteObjectResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for
+   * [WriteObject][google.storage.v2.Storage.WriteObject].
+   * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.WriteObjectResponse) + com.google.storage.v2.WriteObjectResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectResponse.class, + com.google.storage.v2.WriteObjectResponse.Builder.class); + } + + // Construct using com.google.storage.v2.WriteObjectResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (resourceBuilder_ != null) { + resourceBuilder_.clear(); + } + writeStatusCase_ = 0; + writeStatus_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectResponse_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectResponse getDefaultInstanceForType() { + return com.google.storage.v2.WriteObjectResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectResponse build() { + com.google.storage.v2.WriteObjectResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectResponse buildPartial() { + com.google.storage.v2.WriteObjectResponse result = + new com.google.storage.v2.WriteObjectResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.WriteObjectResponse result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.storage.v2.WriteObjectResponse result) { + result.writeStatusCase_ = writeStatusCase_; + result.writeStatus_ = this.writeStatus_; + if (writeStatusCase_ == 2 && resourceBuilder_ != null) { + result.writeStatus_ = resourceBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.WriteObjectResponse) { + return mergeFrom((com.google.storage.v2.WriteObjectResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.WriteObjectResponse other) { + if (other == com.google.storage.v2.WriteObjectResponse.getDefaultInstance()) return this; + switch (other.getWriteStatusCase()) { + case PERSISTED_SIZE: + { + setPersistedSize(other.getPersistedSize()); + break; + } + case RESOURCE: + { + mergeResource(other.getResource()); + break; + } + case WRITESTATUS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + writeStatus_ = input.readInt64(); + writeStatusCase_ = 1; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetResourceFieldBuilder().getBuilder(), extensionRegistry); + writeStatusCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int writeStatusCase_ = 0; + private java.lang.Object writeStatus_; + + public WriteStatusCase getWriteStatusCase() { + return WriteStatusCase.forNumber(writeStatusCase_); + } + + public Builder clearWriteStatus() { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + public boolean hasPersistedSize() { + return writeStatusCase_ == 1; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + public long getPersistedSize() { + if (writeStatusCase_ == 1) { + return (java.lang.Long) writeStatus_; + } + return 0L; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @param value The persistedSize to set. + * @return This builder for chaining. + */ + public Builder setPersistedSize(long value) { + + writeStatusCase_ = 1; + writeStatus_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * The total number of bytes that have been processed for the given object
+     * from all `WriteObject` calls. Only set if the upload has not finalized.
+     * 
+ * + * int64 persisted_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearPersistedSize() { + if (writeStatusCase_ == 1) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + resourceBuilder_; + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return writeStatusCase_ == 2; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } else { + if (writeStatusCase_ == 2) { + return resourceBuilder_.getMessage(); + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStatus_ = value; + onChanged(); + } else { + resourceBuilder_.setMessage(value); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder setResource(com.google.storage.v2.Object.Builder builderForValue) { + if (resourceBuilder_ == null) { + writeStatus_ = builderForValue.build(); + onChanged(); + } else { + resourceBuilder_.setMessage(builderForValue.build()); + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder mergeResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2 + && writeStatus_ != com.google.storage.v2.Object.getDefaultInstance()) { + writeStatus_ = + com.google.storage.v2.Object.newBuilder((com.google.storage.v2.Object) writeStatus_) + .mergeFrom(value) + .buildPartial(); + } else { + writeStatus_ = value; + } + onChanged(); + } else { + if (writeStatusCase_ == 2) { + resourceBuilder_.mergeFrom(value); + } else { + resourceBuilder_.setMessage(value); + } + } + writeStatusCase_ = 2; + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public Builder clearResource() { + if (resourceBuilder_ == null) { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + onChanged(); + } + } else { + if (writeStatusCase_ == 2) { + writeStatusCase_ = 0; + writeStatus_ = null; + } + resourceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + public com.google.storage.v2.Object.Builder getResourceBuilder() { + return internalGetResourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if ((writeStatusCase_ == 2) && (resourceBuilder_ != null)) { + return resourceBuilder_.getMessageOrBuilder(); + } else { + if (writeStatusCase_ == 2) { + return (com.google.storage.v2.Object) writeStatus_; + } + return com.google.storage.v2.Object.getDefaultInstance(); + } + } + + /** + * + * + *
+     * A resource containing the metadata for the uploaded object. Only set if
+     * the upload has finalized.
+     * 
+ * + * .google.storage.v2.Object resource = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetResourceFieldBuilder() { + if (resourceBuilder_ == null) { + if (!(writeStatusCase_ == 2)) { + writeStatus_ = com.google.storage.v2.Object.getDefaultInstance(); + } + resourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + (com.google.storage.v2.Object) writeStatus_, getParentForChildren(), isClean()); + writeStatus_ = null; + } + writeStatusCase_ = 2; + onChanged(); + return resourceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.WriteObjectResponse) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.WriteObjectResponse) + private static final com.google.storage.v2.WriteObjectResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.WriteObjectResponse(); + } + + public static com.google.storage.v2.WriteObjectResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteObjectResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponseOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponseOrBuilder.java new file mode 100644 index 000000000000..97841575ba77 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectResponseOrBuilder.java @@ -0,0 +1,98 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface WriteObjectResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.WriteObjectResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return Whether the persistedSize field is set. + */ + boolean hasPersistedSize(); + + /** + * + * + *
+   * The total number of bytes that have been processed for the given object
+   * from all `WriteObject` calls. Only set if the upload has not finalized.
+   * 
+ * + * int64 persisted_size = 1; + * + * @return The persistedSize. + */ + long getPersistedSize(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return Whether the resource field is set. + */ + boolean hasResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + * + * @return The resource. + */ + com.google.storage.v2.Object getResource(); + + /** + * + * + *
+   * A resource containing the metadata for the uploaded object. Only set if
+   * the upload has finalized.
+   * 
+ * + * .google.storage.v2.Object resource = 2; + */ + com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder(); + + com.google.storage.v2.WriteObjectResponse.WriteStatusCase getWriteStatusCase(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpec.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpec.java new file mode 100644 index 000000000000..94e998931ced --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpec.java @@ -0,0 +1,1794 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +/** + * + * + *
+ * Describes an attempt to insert an object, possibly over multiple requests.
+ * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectSpec} + */ +@com.google.protobuf.Generated +public final class WriteObjectSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.storage.v2.WriteObjectSpec) + WriteObjectSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "WriteObjectSpec"); + } + + // Use WriteObjectSpec.newBuilder() to construct. + private WriteObjectSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private WriteObjectSpec() { + predefinedAcl_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectSpec.class, + com.google.storage.v2.WriteObjectSpec.Builder.class); + } + + private int bitField0_; + public static final int RESOURCE_FIELD_NUMBER = 1; + private com.google.storage.v2.Object resource_; + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the resource field is set. + */ + @java.lang.Override + public boolean hasResource() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The resource. + */ + @java.lang.Override + public com.google.storage.v2.Object getResource() { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + + public static final int PREDEFINED_ACL_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + @java.lang.Override + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IF_GENERATION_MATCH_FIELD_NUMBER = 3; + private long ifGenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * generation matches the given value. Setting to `0` makes the operation
+   * succeed only if there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 3; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * generation matches the given value. Setting to `0` makes the operation
+   * succeed only if there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 3; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + public static final int IF_GENERATION_NOT_MATCH_FIELD_NUMBER = 4; + private long ifGenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live
+   * generation does not match the given value. If no live object exists, the
+   * precondition fails. Setting to `0` makes the operation succeed only if
+   * there is a live version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live
+   * generation does not match the given value. If no live object exists, the
+   * precondition fails. Setting to `0` makes the operation succeed only if
+   * there is a live version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + public static final int IF_METAGENERATION_MATCH_FIELD_NUMBER = 5; + private long ifMetagenerationMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + public static final int IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER = 6; + private long ifMetagenerationNotMatch_ = 0L; + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + public static final int OBJECT_SIZE_FIELD_NUMBER = 8; + private long objectSize_ = 0L; + + /** + * + * + *
+   * The expected final object size being uploaded.
+   * If this value is set, closing the stream after writing fewer or more than
+   * `object_size` bytes results in an `OUT_OF_RANGE` error.
+   *
+   * This situation is considered a client error, and if such an error occurs
+   * you must start the upload over from scratch, this time sending the correct
+   * number of bytes.
+   * 
+ * + * optional int64 object_size = 8; + * + * @return Whether the objectSize field is set. + */ + @java.lang.Override + public boolean hasObjectSize() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+   * The expected final object size being uploaded.
+   * If this value is set, closing the stream after writing fewer or more than
+   * `object_size` bytes results in an `OUT_OF_RANGE` error.
+   *
+   * This situation is considered a client error, and if such an error occurs
+   * you must start the upload over from scratch, this time sending the correct
+   * number of bytes.
+   * 
+ * + * optional int64 object_size = 8; + * + * @return The objectSize. + */ + @java.lang.Override + public long getObjectSize() { + return objectSize_; + } + + public static final int APPENDABLE_FIELD_NUMBER = 9; + private boolean appendable_ = false; + + /** + * + * + *
+   * If `true`, the object is created in appendable mode.
+   * This field might only be set when using `BidiWriteObject`.
+   * 
+ * + * optional bool appendable = 9; + * + * @return Whether the appendable field is set. + */ + @java.lang.Override + public boolean hasAppendable() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+   * If `true`, the object is created in appendable mode.
+   * This field might only be set when using `BidiWriteObject`.
+   * 
+ * + * optional bool appendable = 9; + * + * @return The appendable. + */ + @java.lang.Override + public boolean getAppendable() { + return appendable_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getResource()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(3, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(4, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt64(5, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(6, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, predefinedAcl_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeInt64(8, objectSize_); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeBool(9, appendable_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getResource()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, ifGenerationMatch_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ifGenerationNotMatch_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, ifMetagenerationMatch_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, ifMetagenerationNotMatch_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(predefinedAcl_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, predefinedAcl_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, objectSize_); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, appendable_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.storage.v2.WriteObjectSpec)) { + return super.equals(obj); + } + com.google.storage.v2.WriteObjectSpec other = (com.google.storage.v2.WriteObjectSpec) obj; + + if (hasResource() != other.hasResource()) return false; + if (hasResource()) { + if (!getResource().equals(other.getResource())) return false; + } + if (!getPredefinedAcl().equals(other.getPredefinedAcl())) return false; + if (hasIfGenerationMatch() != other.hasIfGenerationMatch()) return false; + if (hasIfGenerationMatch()) { + if (getIfGenerationMatch() != other.getIfGenerationMatch()) return false; + } + if (hasIfGenerationNotMatch() != other.hasIfGenerationNotMatch()) return false; + if (hasIfGenerationNotMatch()) { + if (getIfGenerationNotMatch() != other.getIfGenerationNotMatch()) return false; + } + if (hasIfMetagenerationMatch() != other.hasIfMetagenerationMatch()) return false; + if (hasIfMetagenerationMatch()) { + if (getIfMetagenerationMatch() != other.getIfMetagenerationMatch()) return false; + } + if (hasIfMetagenerationNotMatch() != other.hasIfMetagenerationNotMatch()) return false; + if (hasIfMetagenerationNotMatch()) { + if (getIfMetagenerationNotMatch() != other.getIfMetagenerationNotMatch()) return false; + } + if (hasObjectSize() != other.hasObjectSize()) return false; + if (hasObjectSize()) { + if (getObjectSize() != other.getObjectSize()) return false; + } + if (hasAppendable() != other.hasAppendable()) return false; + if (hasAppendable()) { + if (getAppendable() != other.getAppendable()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasResource()) { + hash = (37 * hash) + RESOURCE_FIELD_NUMBER; + hash = (53 * hash) + getResource().hashCode(); + } + hash = (37 * hash) + PREDEFINED_ACL_FIELD_NUMBER; + hash = (53 * hash) + getPredefinedAcl().hashCode(); + if (hasIfGenerationMatch()) { + hash = (37 * hash) + IF_GENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationMatch()); + } + if (hasIfGenerationNotMatch()) { + hash = (37 * hash) + IF_GENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfGenerationNotMatch()); + } + if (hasIfMetagenerationMatch()) { + hash = (37 * hash) + IF_METAGENERATION_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationMatch()); + } + if (hasIfMetagenerationNotMatch()) { + hash = (37 * hash) + IF_METAGENERATION_NOT_MATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIfMetagenerationNotMatch()); + } + if (hasObjectSize()) { + hash = (37 * hash) + OBJECT_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getObjectSize()); + } + if (hasAppendable()) { + hash = (37 * hash) + APPENDABLE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAppendable()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectSpec parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.storage.v2.WriteObjectSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.storage.v2.WriteObjectSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Describes an attempt to insert an object, possibly over multiple requests.
+   * 
+ * + * Protobuf type {@code google.storage.v2.WriteObjectSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.storage.v2.WriteObjectSpec) + com.google.storage.v2.WriteObjectSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.storage.v2.WriteObjectSpec.class, + com.google.storage.v2.WriteObjectSpec.Builder.class); + } + + // Construct using com.google.storage.v2.WriteObjectSpec.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetResourceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + resource_ = null; + if (resourceBuilder_ != null) { + resourceBuilder_.dispose(); + resourceBuilder_ = null; + } + predefinedAcl_ = ""; + ifGenerationMatch_ = 0L; + ifGenerationNotMatch_ = 0L; + ifMetagenerationMatch_ = 0L; + ifMetagenerationNotMatch_ = 0L; + objectSize_ = 0L; + appendable_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.storage.v2.StorageProto + .internal_static_google_storage_v2_WriteObjectSpec_descriptor; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getDefaultInstanceForType() { + return com.google.storage.v2.WriteObjectSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec build() { + com.google.storage.v2.WriteObjectSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec buildPartial() { + com.google.storage.v2.WriteObjectSpec result = + new com.google.storage.v2.WriteObjectSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.storage.v2.WriteObjectSpec result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.resource_ = resourceBuilder_ == null ? resource_ : resourceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.predefinedAcl_ = predefinedAcl_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ifGenerationMatch_ = ifGenerationMatch_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ifGenerationNotMatch_ = ifGenerationNotMatch_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.ifMetagenerationMatch_ = ifMetagenerationMatch_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.ifMetagenerationNotMatch_ = ifMetagenerationNotMatch_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.objectSize_ = objectSize_; + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.appendable_ = appendable_; + to_bitField0_ |= 0x00000040; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.storage.v2.WriteObjectSpec) { + return mergeFrom((com.google.storage.v2.WriteObjectSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.storage.v2.WriteObjectSpec other) { + if (other == com.google.storage.v2.WriteObjectSpec.getDefaultInstance()) return this; + if (other.hasResource()) { + mergeResource(other.getResource()); + } + if (!other.getPredefinedAcl().isEmpty()) { + predefinedAcl_ = other.predefinedAcl_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasIfGenerationMatch()) { + setIfGenerationMatch(other.getIfGenerationMatch()); + } + if (other.hasIfGenerationNotMatch()) { + setIfGenerationNotMatch(other.getIfGenerationNotMatch()); + } + if (other.hasIfMetagenerationMatch()) { + setIfMetagenerationMatch(other.getIfMetagenerationMatch()); + } + if (other.hasIfMetagenerationNotMatch()) { + setIfMetagenerationNotMatch(other.getIfMetagenerationNotMatch()); + } + if (other.hasObjectSize()) { + setObjectSize(other.getObjectSize()); + } + if (other.hasAppendable()) { + setAppendable(other.getAppendable()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetResourceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 24: + { + ifGenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ifGenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + ifMetagenerationMatch_ = input.readInt64(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + ifMetagenerationNotMatch_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 58: + { + predefinedAcl_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 58 + case 64: + { + objectSize_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 64 + case 72: + { + appendable_ = input.readBool(); + bitField0_ |= 0x00000080; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.storage.v2.Object resource_; + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + resourceBuilder_; + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the resource field is set. + */ + public boolean hasResource() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The resource. + */ + public com.google.storage.v2.Object getResource() { + if (resourceBuilder_ == null) { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } else { + return resourceBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + resource_ = value; + } else { + resourceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setResource(com.google.storage.v2.Object.Builder builderForValue) { + if (resourceBuilder_ == null) { + resource_ = builderForValue.build(); + } else { + resourceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeResource(com.google.storage.v2.Object value) { + if (resourceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && resource_ != null + && resource_ != com.google.storage.v2.Object.getDefaultInstance()) { + getResourceBuilder().mergeFrom(value); + } else { + resource_ = value; + } + } else { + resourceBuilder_.mergeFrom(value); + } + if (resource_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearResource() { + bitField0_ = (bitField0_ & ~0x00000001); + resource_ = null; + if (resourceBuilder_ != null) { + resourceBuilder_.dispose(); + resourceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.Object.Builder getResourceBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetResourceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder() { + if (resourceBuilder_ != null) { + return resourceBuilder_.getMessageOrBuilder(); + } else { + return resource_ == null ? com.google.storage.v2.Object.getDefaultInstance() : resource_; + } + } + + /** + * + * + *
+     * Required. Destination object, including its name and its metadata.
+     * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder> + internalGetResourceFieldBuilder() { + if (resourceBuilder_ == null) { + resourceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.storage.v2.Object, + com.google.storage.v2.Object.Builder, + com.google.storage.v2.ObjectOrBuilder>( + getResource(), getParentForChildren(), isClean()); + resource_ = null; + } + return resourceBuilder_; + } + + private java.lang.Object predefinedAcl_ = ""; + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + public java.lang.String getPredefinedAcl() { + java.lang.Object ref = predefinedAcl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + predefinedAcl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + public com.google.protobuf.ByteString getPredefinedAclBytes() { + java.lang.Object ref = predefinedAcl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + predefinedAcl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAcl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + predefinedAcl_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPredefinedAcl() { + predefinedAcl_ = getDefaultInstance().getPredefinedAcl(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Apply a predefined set of access controls to this object.
+     * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+     * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+     * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for predefinedAcl to set. + * @return This builder for chaining. + */ + public Builder setPredefinedAclBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + predefinedAcl_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private long ifGenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * generation matches the given value. Setting to `0` makes the operation
+     * succeed only if there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 3; + * + * @return Whether the ifGenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationMatch() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * generation matches the given value. Setting to `0` makes the operation
+     * succeed only if there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 3; + * + * @return The ifGenerationMatch. + */ + @java.lang.Override + public long getIfGenerationMatch() { + return ifGenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * generation matches the given value. Setting to `0` makes the operation
+     * succeed only if there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 3; + * + * @param value The ifGenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationMatch(long value) { + + ifGenerationMatch_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * generation matches the given value. Setting to `0` makes the operation
+     * succeed only if there are no live versions of the object.
+     * 
+ * + * optional int64 if_generation_match = 3; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000004); + ifGenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifGenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live
+     * generation does not match the given value. If no live object exists, the
+     * precondition fails. Setting to `0` makes the operation succeed only if
+     * there is a live version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfGenerationNotMatch() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live
+     * generation does not match the given value. If no live object exists, the
+     * precondition fails. Setting to `0` makes the operation succeed only if
+     * there is a live version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return The ifGenerationNotMatch. + */ + @java.lang.Override + public long getIfGenerationNotMatch() { + return ifGenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live
+     * generation does not match the given value. If no live object exists, the
+     * precondition fails. Setting to `0` makes the operation succeed only if
+     * there is a live version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @param value The ifGenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfGenerationNotMatch(long value) { + + ifGenerationNotMatch_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's live
+     * generation does not match the given value. If no live object exists, the
+     * precondition fails. Setting to `0` makes the operation succeed only if
+     * there is a live version of the object.
+     * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return This builder for chaining. + */ + public Builder clearIfGenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000008); + ifGenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationMatch() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + @java.lang.Override + public long getIfMetagenerationMatch() { + return ifMetagenerationMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @param value The ifMetagenerationMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationMatch(long value) { + + ifMetagenerationMatch_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration matches the given value.
+     * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationMatch() { + bitField0_ = (bitField0_ & ~0x00000010); + ifMetagenerationMatch_ = 0L; + onChanged(); + return this; + } + + private long ifMetagenerationNotMatch_; + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + @java.lang.Override + public boolean hasIfMetagenerationNotMatch() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return The ifMetagenerationNotMatch. + */ + @java.lang.Override + public long getIfMetagenerationNotMatch() { + return ifMetagenerationNotMatch_; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @param value The ifMetagenerationNotMatch to set. + * @return This builder for chaining. + */ + public Builder setIfMetagenerationNotMatch(long value) { + + ifMetagenerationNotMatch_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Makes the operation conditional on whether the object's current
+     * metageneration does not match the given value.
+     * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return This builder for chaining. + */ + public Builder clearIfMetagenerationNotMatch() { + bitField0_ = (bitField0_ & ~0x00000020); + ifMetagenerationNotMatch_ = 0L; + onChanged(); + return this; + } + + private long objectSize_; + + /** + * + * + *
+     * The expected final object size being uploaded.
+     * If this value is set, closing the stream after writing fewer or more than
+     * `object_size` bytes results in an `OUT_OF_RANGE` error.
+     *
+     * This situation is considered a client error, and if such an error occurs
+     * you must start the upload over from scratch, this time sending the correct
+     * number of bytes.
+     * 
+ * + * optional int64 object_size = 8; + * + * @return Whether the objectSize field is set. + */ + @java.lang.Override + public boolean hasObjectSize() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * The expected final object size being uploaded.
+     * If this value is set, closing the stream after writing fewer or more than
+     * `object_size` bytes results in an `OUT_OF_RANGE` error.
+     *
+     * This situation is considered a client error, and if such an error occurs
+     * you must start the upload over from scratch, this time sending the correct
+     * number of bytes.
+     * 
+ * + * optional int64 object_size = 8; + * + * @return The objectSize. + */ + @java.lang.Override + public long getObjectSize() { + return objectSize_; + } + + /** + * + * + *
+     * The expected final object size being uploaded.
+     * If this value is set, closing the stream after writing fewer or more than
+     * `object_size` bytes results in an `OUT_OF_RANGE` error.
+     *
+     * This situation is considered a client error, and if such an error occurs
+     * you must start the upload over from scratch, this time sending the correct
+     * number of bytes.
+     * 
+ * + * optional int64 object_size = 8; + * + * @param value The objectSize to set. + * @return This builder for chaining. + */ + public Builder setObjectSize(long value) { + + objectSize_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * The expected final object size being uploaded.
+     * If this value is set, closing the stream after writing fewer or more than
+     * `object_size` bytes results in an `OUT_OF_RANGE` error.
+     *
+     * This situation is considered a client error, and if such an error occurs
+     * you must start the upload over from scratch, this time sending the correct
+     * number of bytes.
+     * 
+ * + * optional int64 object_size = 8; + * + * @return This builder for chaining. + */ + public Builder clearObjectSize() { + bitField0_ = (bitField0_ & ~0x00000040); + objectSize_ = 0L; + onChanged(); + return this; + } + + private boolean appendable_; + + /** + * + * + *
+     * If `true`, the object is created in appendable mode.
+     * This field might only be set when using `BidiWriteObject`.
+     * 
+ * + * optional bool appendable = 9; + * + * @return Whether the appendable field is set. + */ + @java.lang.Override + public boolean hasAppendable() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * If `true`, the object is created in appendable mode.
+     * This field might only be set when using `BidiWriteObject`.
+     * 
+ * + * optional bool appendable = 9; + * + * @return The appendable. + */ + @java.lang.Override + public boolean getAppendable() { + return appendable_; + } + + /** + * + * + *
+     * If `true`, the object is created in appendable mode.
+     * This field might only be set when using `BidiWriteObject`.
+     * 
+ * + * optional bool appendable = 9; + * + * @param value The appendable to set. + * @return This builder for chaining. + */ + public Builder setAppendable(boolean value) { + + appendable_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * If `true`, the object is created in appendable mode.
+     * This field might only be set when using `BidiWriteObject`.
+     * 
+ * + * optional bool appendable = 9; + * + * @return This builder for chaining. + */ + public Builder clearAppendable() { + bitField0_ = (bitField0_ & ~0x00000080); + appendable_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.storage.v2.WriteObjectSpec) + } + + // @@protoc_insertion_point(class_scope:google.storage.v2.WriteObjectSpec) + private static final com.google.storage.v2.WriteObjectSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.storage.v2.WriteObjectSpec(); + } + + public static com.google.storage.v2.WriteObjectSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteObjectSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.storage.v2.WriteObjectSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpecOrBuilder.java b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpecOrBuilder.java new file mode 100644 index 000000000000..b8bac524c5c5 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/java/com/google/storage/v2/WriteObjectSpecOrBuilder.java @@ -0,0 +1,279 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/storage/v2/storage.proto +// Protobuf Java Version: 4.33.2 + +package com.google.storage.v2; + +@com.google.protobuf.Generated +public interface WriteObjectSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.storage.v2.WriteObjectSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the resource field is set. + */ + boolean hasResource(); + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The resource. + */ + com.google.storage.v2.Object getResource(); + + /** + * + * + *
+   * Required. Destination object, including its name and its metadata.
+   * 
+ * + * .google.storage.v2.Object resource = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.storage.v2.ObjectOrBuilder getResourceOrBuilder(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The predefinedAcl. + */ + java.lang.String getPredefinedAcl(); + + /** + * + * + *
+   * Optional. Apply a predefined set of access controls to this object.
+   * Valid values are `authenticatedRead`, `bucketOwnerFullControl`,
+   * `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`.
+   * 
+ * + * string predefined_acl = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for predefinedAcl. + */ + com.google.protobuf.ByteString getPredefinedAclBytes(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * generation matches the given value. Setting to `0` makes the operation
+   * succeed only if there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 3; + * + * @return Whether the ifGenerationMatch field is set. + */ + boolean hasIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * generation matches the given value. Setting to `0` makes the operation
+   * succeed only if there are no live versions of the object.
+   * 
+ * + * optional int64 if_generation_match = 3; + * + * @return The ifGenerationMatch. + */ + long getIfGenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live
+   * generation does not match the given value. If no live object exists, the
+   * precondition fails. Setting to `0` makes the operation succeed only if
+   * there is a live version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return Whether the ifGenerationNotMatch field is set. + */ + boolean hasIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's live
+   * generation does not match the given value. If no live object exists, the
+   * precondition fails. Setting to `0` makes the operation succeed only if
+   * there is a live version of the object.
+   * 
+ * + * optional int64 if_generation_not_match = 4; + * + * @return The ifGenerationNotMatch. + */ + long getIfGenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return Whether the ifMetagenerationMatch field is set. + */ + boolean hasIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration matches the given value.
+   * 
+ * + * optional int64 if_metageneration_match = 5; + * + * @return The ifMetagenerationMatch. + */ + long getIfMetagenerationMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return Whether the ifMetagenerationNotMatch field is set. + */ + boolean hasIfMetagenerationNotMatch(); + + /** + * + * + *
+   * Makes the operation conditional on whether the object's current
+   * metageneration does not match the given value.
+   * 
+ * + * optional int64 if_metageneration_not_match = 6; + * + * @return The ifMetagenerationNotMatch. + */ + long getIfMetagenerationNotMatch(); + + /** + * + * + *
+   * The expected final object size being uploaded.
+   * If this value is set, closing the stream after writing fewer or more than
+   * `object_size` bytes results in an `OUT_OF_RANGE` error.
+   *
+   * This situation is considered a client error, and if such an error occurs
+   * you must start the upload over from scratch, this time sending the correct
+   * number of bytes.
+   * 
+ * + * optional int64 object_size = 8; + * + * @return Whether the objectSize field is set. + */ + boolean hasObjectSize(); + + /** + * + * + *
+   * The expected final object size being uploaded.
+   * If this value is set, closing the stream after writing fewer or more than
+   * `object_size` bytes results in an `OUT_OF_RANGE` error.
+   *
+   * This situation is considered a client error, and if such an error occurs
+   * you must start the upload over from scratch, this time sending the correct
+   * number of bytes.
+   * 
+ * + * optional int64 object_size = 8; + * + * @return The objectSize. + */ + long getObjectSize(); + + /** + * + * + *
+   * If `true`, the object is created in appendable mode.
+   * This field might only be set when using `BidiWriteObject`.
+   * 
+ * + * optional bool appendable = 9; + * + * @return Whether the appendable field is set. + */ + boolean hasAppendable(); + + /** + * + * + *
+   * If `true`, the object is created in appendable mode.
+   * This field might only be set when using `BidiWriteObject`.
+   * 
+ * + * optional bool appendable = 9; + * + * @return The appendable. + */ + boolean getAppendable(); +} diff --git a/java-storage/proto-google-cloud-storage-v2/src/main/proto/google/storage/v2/storage.proto b/java-storage/proto-google-cloud-storage-v2/src/main/proto/google/storage/v2/storage.proto new file mode 100644 index 000000000000..d16c0f91d868 --- /dev/null +++ b/java-storage/proto-google-cloud-storage-v2/src/main/proto/google/storage/v2/storage.proto @@ -0,0 +1,3166 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.storage.v2; + +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/api/routing.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/type/date.proto"; + +option go_package = "cloud.google.com/go/storage/internal/apiv2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "StorageProto"; +option java_package = "com.google.storage.v2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; + +// ## API Overview and Naming Syntax +// +// The Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see [Cloud Storage +// documentation](https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// - Projects are referred to as they are defined by the Resource Manager API, +// using strings like `projects/123456` or `projects/my-string-id`. +// - Buckets are named using string names of the form: +// `projects/{project}/buckets/{bucket}`. +// For globally unique buckets, `_` might be substituted for the project. +// - Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ``` +// ReadObjectRequest { +// bucket: 'projects/_/buckets/my-bucket' +// object: 'my-object' +// } +// ``` +// +// Note that object names can contain `/` characters, which are treated as +// any other character (no special directory semantics). +service Storage { + option (google.api.default_host) = "storage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/devstorage.full_control," + "https://www.googleapis.com/auth/devstorage.read_only," + "https://www.googleapis.com/auth/devstorage.read_write"; + + // Permanently deletes an empty bucket. + // The request fails if there are any live or + // noncurrent objects in the bucket, but the request succeeds if the + // bucket only contains soft-deleted objects or incomplete uploads, such + // as ongoing XML API multipart uploads. Does not permanently delete + // soft-deleted objects. + // + // When this API is used to delete a bucket containing an object that has a + // soft delete policy + // enabled, the object becomes soft deleted, and the + // `softDeleteTime` and `hardDeleteTime` properties are set on the + // object. + // + // Objects and multipart uploads that were in the bucket at the time of + // deletion are also retained for the specified retention duration. When + // a soft-deleted bucket reaches the end of its retention duration, it + // is permanently deleted. The `hardDeleteTime` of the bucket always + // equals + // or exceeds the expiration time of the last soft-deleted object in the + // bucket. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.delete` IAM permission on the bucket. + rpc DeleteBucket(DeleteBucketRequest) returns (google.protobuf.Empty) { + option (google.api.routing) = { + routing_parameters { field: "name" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "name"; + } + + // Returns metadata for the specified bucket. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.get` + // IAM permission on + // the bucket. Additionally, to return specific bucket metadata, the + // authenticated user must have the following permissions: + // + // - To return the IAM policies: `storage.buckets.getIamPolicy` + // - To return the bucket IP filtering rules: `storage.buckets.getIpFilter` + rpc GetBucket(GetBucketRequest) returns (Bucket) { + option (google.api.routing) = { + routing_parameters { field: "name" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new bucket. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.create` IAM permission on the bucket. + // Additionally, to enable specific bucket features, the authenticated user + // must have the following permissions: + // + // - To enable object retention using the `enableObjectRetention` query + // parameter: `storage.buckets.enableObjectRetention` + // - To set the bucket IP filtering rules: `storage.buckets.setIpFilter` + rpc CreateBucket(CreateBucketRequest) returns (Bucket) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{project=**}" } + routing_parameters { + field: "bucket.project" + path_template: "{project=**}" + } + }; + option (google.api.method_signature) = "parent,bucket,bucket_id"; + } + + // Retrieves a list of buckets for a given project, ordered + // lexicographically by name. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.list` IAM permission on the bucket. + // Additionally, to enable specific bucket features, the authenticated + // user must have the following permissions: + // + // - To list the IAM policies: `storage.buckets.getIamPolicy` + // - To list the bucket IP filtering rules: `storage.buckets.getIpFilter` + rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{project=**}" } + }; + option (google.api.method_signature) = "parent"; + } + + // Permanently locks the retention + // policy that is + // currently applied to the specified bucket. + // + // Caution: Locking a bucket is an + // irreversible action. Once you lock a bucket: + // + // - You cannot remove the retention policy from the bucket. + // - You cannot decrease the retention period for the policy. + // + // Once locked, you must delete the entire bucket in order to remove the + // bucket's retention policy. However, before you can delete the bucket, you + // must delete all the objects in the bucket, which is only + // possible if all the objects have reached the retention period set by the + // retention policy. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.update` IAM permission on the bucket. + rpc LockBucketRetentionPolicy(LockBucketRetentionPolicyRequest) + returns (Bucket) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket"; + } + + // Gets the IAM policy for a specified bucket or managed folder. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.getIamPolicy` on the bucket or + // `storage.managedFolders.getIamPolicy` IAM permission on the + // managed folder. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "resource"; + } + + // Updates an IAM policy for the specified bucket or managed folder. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "resource,policy"; + } + + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. The `resource` field in the + // request should be `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.routing) = { + routing_parameters { field: "resource" path_template: "{bucket=**}" } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/objects/**" + } + routing_parameters { + field: "resource" + path_template: "{bucket=projects/*/buckets/*}/managedFolders/**" + } + }; + option (google.api.method_signature) = "resource,permissions"; + } + + // Updates a bucket. Changes to the bucket are readable immediately after + // writing, but configuration changes might take time to propagate. This + // method supports `patch` semantics. + // + // **IAM Permissions**: + // + // Requires `storage.buckets.update` IAM permission on the bucket. + // Additionally, to enable specific bucket features, the authenticated user + // must have the following permissions: + // + // - To set bucket IP filtering rules: `storage.buckets.setIpFilter` + // - To update public access prevention policies or access control lists + // (ACLs): `storage.buckets.setIamPolicy` + rpc UpdateBucket(UpdateBucketRequest) returns (Bucket) { + option (google.api.routing) = { + routing_parameters { field: "bucket.name" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket,update_mask"; + } + + // Concatenates a list of existing objects into a new object in the same + // bucket. The existing source objects are unaffected by this operation. + // + // **IAM Permissions**: + // + // Requires the `storage.objects.create` and `storage.objects.get` IAM + // permissions to use this method. If the new composite object + // overwrites an existing object, the authenticated user must also have + // the `storage.objects.delete` permission. If the request body includes + // the retention property, the authenticated user must also have the + // `storage.objects.setRetention` IAM permission. + rpc ComposeObject(ComposeObjectRequest) returns (Object) { + option (google.api.routing) = { + routing_parameters { + field: "destination.bucket" + path_template: "{bucket=**}" + } + }; + } + + // Deletes an object and its metadata. Deletions are permanent if versioning + // is not enabled for the bucket, or if the generation parameter is used, or + // if soft delete is not + // enabled for the bucket. + // When this API is used to delete an object from a bucket that has soft + // delete policy enabled, the object becomes soft deleted, and the + // `softDeleteTime` and `hardDeleteTime` properties are set on the object. + // This API cannot be used to permanently delete soft-deleted objects. + // Soft-deleted objects are permanently deleted according to their + // `hardDeleteTime`. + // + // You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject] + // API to restore soft-deleted objects until the soft delete retention period + // has passed. + // + // **IAM Permissions**: + // + // Requires `storage.objects.delete` IAM permission on the bucket. + rpc DeleteObject(DeleteObjectRequest) returns (google.protobuf.Empty) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket,object"; + option (google.api.method_signature) = "bucket,object,generation"; + } + + // Restores a + // soft-deleted object. + // When a soft-deleted object is restored, a new copy of that object is + // created in the same bucket and inherits the same metadata as the + // soft-deleted object. The inherited metadata is the metadata that existed + // when the original object became soft deleted, with the following + // exceptions: + // + // - The `createTime` of the new object is set to the time at which the + // soft-deleted object was restored. + // - The `softDeleteTime` and `hardDeleteTime` values are cleared. + // - A new generation is assigned and the metageneration is reset to 1. + // - If the soft-deleted object was in a bucket that had Autoclass enabled, + // the new object is + // restored to Standard storage. + // - The restored object inherits the bucket's default object ACL, unless + // `copySourceAcl` is `true`. + // + // If a live object using the same name already exists in the bucket and + // becomes overwritten, the live object becomes a noncurrent object if Object + // Versioning is enabled on the bucket. If Object Versioning is not enabled, + // the live object becomes soft deleted. + // + // **IAM Permissions**: + // + // Requires the following IAM permissions to use this method: + // + // - `storage.objects.restore` + // - `storage.objects.create` + // - `storage.objects.delete` (only required if overwriting an existing + // object) + // - `storage.objects.getIamPolicy` (only required if `projection` is `full` + // and the relevant bucket + // has uniform bucket-level access disabled) + // - `storage.objects.setIamPolicy` (only required if `copySourceAcl` is + // `true` and the relevant + // bucket has uniform bucket-level access disabled) + rpc RestoreObject(RestoreObjectRequest) returns (Object) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket,object,generation"; + } + + // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // fail. + // + // The behavior for any in-progress write operations is not guaranteed; + // they could either complete before the cancellation or fail if the + // cancellation completes first. + rpc CancelResumableWrite(CancelResumableWriteRequest) + returns (CancelResumableWriteResponse) { + option (google.api.routing) = { + routing_parameters { + field: "upload_id" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "upload_id"; + } + + // Retrieves object metadata. + // + // **IAM Permissions**: + // + // Requires `storage.objects.get` IAM permission on the bucket. + // To return object ACLs, the authenticated user must also have + // the `storage.objects.getIamPolicy` permission. + rpc GetObject(GetObjectRequest) returns (Object) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket,object"; + option (google.api.method_signature) = "bucket,object,generation"; + } + + // Retrieves object data. + // + // **IAM Permissions**: + // + // Requires `storage.objects.get` IAM permission on the bucket. + rpc ReadObject(ReadObjectRequest) returns (stream ReadObjectResponse) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "bucket,object"; + option (google.api.method_signature) = "bucket,object,generation"; + } + + // Reads an object's data. + // + // This bi-directional API reads data from an object, allowing you to request + // multiple data ranges within a single stream, even across several messages. + // If an error occurs with any request, the stream closes with a relevant + // error code. Since you can have multiple outstanding requests, the error + // response includes a `BidiReadObjectError` proto in its `details` field, + // reporting the specific error, if any, for each pending `read_id`. + // + // **IAM Permissions**: + // + // Requires `storage.objects.get` IAM permission on the bucket. + // + rpc BidiReadObject(stream BidiReadObjectRequest) + returns (stream BidiReadObjectResponse) { + option (google.api.routing) = { + routing_parameters { + field: "read_object_spec.bucket" + path_template: "{bucket=**}" + } + }; + } + + // Updates an object's metadata. + // Equivalent to JSON API's `storage.objects.patch` method. + // + // **IAM Permissions**: + // + // Requires `storage.objects.update` IAM permission on the bucket. + rpc UpdateObject(UpdateObjectRequest) returns (Object) { + option (google.api.routing) = { + routing_parameters { field: "object.bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "object,update_mask"; + } + + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are `DEADLINE_EXCEEDED`, + // `INTERNAL`, and `UNAVAILABLE`. For each case, the client should use + // binary exponential backoff before retrying. Additionally, writes can + // be resumed after `RESOURCE_EXHAUSTED` errors, but only after taking + // appropriate measures, which might include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes. For example, increase the timeouts and stop using + // more than one process to perform the upload. Follow the steps below for + // resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This might be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service skips data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. Even though the data isn't written, it might still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. + // + // The service does not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` causes an error. The client must check the response it + // receives to determine how much data the service is able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object results in an `OK` + // status, with a `WriteObjectResponse` containing the finalized object's + // metadata. + // + // Alternatively, you can use the `BidiWriteObject` operation to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. + // + // **IAM Permissions**: + // + // Requires `storage.objects.create` + // IAM permission on + // the bucket. + rpc WriteObject(stream WriteObjectRequest) returns (WriteObjectResponse) {} + + // Stores a new object and metadata. + // + // This is similar to the `WriteObject` call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client might specify one or both of the `state_lookup` and `flush` + // fields in each `BidiWriteObjectRequest`. If `flush` is specified, the data + // written so far is persisted to storage. If `state_lookup` is specified, the + // service responds with a `BidiWriteObjectResponse` that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // always occurs before a `state_lookup`, so that both might be set in the + // same request and the returned state is the state of the object + // post-flush. When the stream is closed, a `BidiWriteObjectResponse` + // is always sent to the client, regardless of the value of `state_lookup`. + rpc BidiWriteObject(stream BidiWriteObjectRequest) + returns (stream BidiWriteObjectResponse) {} + + // Retrieves a list of objects matching the criteria. + // + // **IAM Permissions**: + // + // The authenticated user requires `storage.objects.list` + // IAM permission to use this method. To return object ACLs, the + // authenticated user must also + // have the `storage.objects.getIamPolicy` permission. + rpc ListObjects(ListObjectsRequest) returns (ListObjectsResponse) { + option (google.api.routing) = { + routing_parameters { field: "parent" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = "parent"; + } + + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + rpc RewriteObject(RewriteObjectRequest) returns (RewriteResponse) { + option (google.api.routing) = { + routing_parameters { field: "source_bucket" } + routing_parameters { + field: "destination_bucket" + path_template: "{bucket=**}" + } + }; + } + + // Starts a resumable write operation. This + // method is part of the Resumable + // upload feature. + // This allows you to upload large objects in multiple chunks, which is more + // resilient to network interruptions than a single upload. The validity + // duration of the write operation, and the consequences of it becoming + // invalid, are service-dependent. + // + // **IAM Permissions**: + // + // Requires `storage.objects.create` IAM permission on the bucket. + rpc StartResumableWrite(StartResumableWriteRequest) + returns (StartResumableWriteResponse) { + option (google.api.routing) = { + routing_parameters { + field: "write_object_spec.resource.bucket" + path_template: "{bucket=**}" + } + }; + } + + // Determines the `persisted_size` of an object that is being written. This + // method is part of the resumable + // upload feature. + // The returned value is the size of the object that has been persisted so + // far. The value can be used as the `write_offset` for the next `Write()` + // call. + // + // If the object does not exist, meaning if it was deleted, or the + // first `Write()` has not yet reached the service, this method returns the + // error `NOT_FOUND`. + // + // This method is useful for clients that buffer data and need to know which + // data can be safely evicted. The client can call `QueryWriteStatus()` at any + // time to determine how much data has been logged for this object. + // For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values are + // non-decreasing. + rpc QueryWriteStatus(QueryWriteStatusRequest) + returns (QueryWriteStatusResponse) { + option (google.api.routing) = { + routing_parameters { + field: "upload_id" + path_template: "{bucket=projects/*/buckets/*}/**" + } + }; + option (google.api.method_signature) = "upload_id"; + } + + // Moves the source object to the destination object in the same bucket. + // This operation moves a source object to a destination object in the + // same bucket by renaming the object. The move itself is an atomic + // transaction, ensuring all steps either complete successfully or no + // changes are made. + // + // **IAM Permissions**: + // + // Requires the following IAM permissions to use this method: + // + // - `storage.objects.move` + // - `storage.objects.create` + // - `storage.objects.delete` (only required if overwriting an existing + // object) + rpc MoveObject(MoveObjectRequest) returns (Object) { + option (google.api.routing) = { + routing_parameters { field: "bucket" path_template: "{bucket=**}" } + }; + option (google.api.method_signature) = + "bucket,source_object,destination_object"; + } +} + +// Request message for [DeleteBucket][google.storage.v2.Storage.DeleteBucket]. +message DeleteBucketRequest { + // Required. Name of a bucket to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // If set, only deletes the bucket if its metageneration matches this value. + optional int64 if_metageneration_match = 2; + + // If set, only deletes the bucket if its metageneration does not match this + // value. + optional int64 if_metageneration_not_match = 3; +} + +// Request message for [GetBucket][google.storage.v2.Storage.GetBucket]. +message GetBucketRequest { + // Required. Name of a bucket. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // If set, only gets the bucket metadata if its metageneration matches this + // value. + optional int64 if_metageneration_match = 2; + + // If set, and if the bucket's current metageneration matches the specified + // value, the request returns an error. + optional int64 if_metageneration_not_match = 3; + + // Mask specifying which fields to read. + // A `*` field might be used to indicate all fields. + // If no mask is specified, it defaults to all fields. + optional google.protobuf.FieldMask read_mask = 5; +} + +// Request message for [CreateBucket][google.storage.v2.Storage.CreateBucket]. +message CreateBucketRequest { + // Required. The project to which this bucket belongs. This field must either + // be empty or `projects/_`. The project ID that owns this bucket should be + // specified in the `bucket.project` field. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/Bucket" + } + ]; + + // Optional. Properties of the new bucket being inserted. + // The name of the bucket is specified in the `bucket_id` field. Populating + // `bucket.name` field results in an error. + // The project of the bucket must be specified in the `bucket.project` field. + // This field must be in `projects/{projectIdentifier}` format, + // {projectIdentifier} can be the project ID or project number. The `parent` + // field must be either empty or `projects/_`. + Bucket bucket = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The ID to use for this bucket, which becomes the final component + // of the bucket's resource name. For example, the value `foo` might result in + // a bucket with the name `projects/123456/buckets/foo`. + string bucket_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Apply a predefined set of access controls to this bucket. + // Valid values are `authenticatedRead`, `private`, `projectPrivate`, + // `publicRead`, or `publicReadWrite`. + string predefined_acl = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Apply a predefined set of default object access controls to this + // bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`, + // `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`. + string predefined_default_object_acl = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, enable object retention on the bucket. + bool enable_object_retention = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for [ListBuckets][google.storage.v2.Storage.ListBuckets]. +message ListBucketsRequest { + // Required. The project whose buckets we are listing. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "storage.googleapis.com/Bucket" + } + ]; + + // Optional. Maximum number of buckets to return in a single response. The + // service uses this parameter or `1,000` items, whichever is smaller. If + // `acl` is present in the `read_mask`, the service uses this parameter of + // `200` items, whichever is smaller. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A previously-returned page token representing part of the larger + // set of results to view. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to buckets whose names begin with this prefix. + string prefix = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Mask specifying which fields to read from each result. + // If no mask is specified, it defaults to all fields except `items. + // owner`, `items.acl`, and `items.default_object_acl`. + // `*` might be used to mean "all fields". + optional google.protobuf.FieldMask read_mask = 5; + + // Optional. Allows listing of buckets, even if there are buckets that are + // unreachable. + bool return_partial_success = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for [ListBuckets][google.storage.v2.Storage.ListBuckets]. +message ListBucketsResponse { + // The list of items. + repeated Bucket buckets = 1; + + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + string next_page_token = 2; + + // Unreachable resources. + // This field can only be present if the caller specified + // return_partial_success to be true in the request to receive indications + // of temporarily missing resources. + // unreachable might be: + // unreachable = [ + // "projects/_/buckets/bucket1", + // "projects/_/buckets/bucket2", + // "projects/_/buckets/bucket3", + // ] + repeated string unreachable = 3; +} + +// Request message for +// [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy]. +message LockBucketRetentionPolicyRequest { + // Required. Name of a bucket. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. Makes the operation conditional on whether bucket's current + // metageneration matches the given value. Must be positive. + int64 if_metageneration_match = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for [UpdateBucket][google.storage.v2.Storage.UpdateBucket] method. +message UpdateBucketRequest { + // Required. The bucket to update. + // The bucket's `name` field is used to identify the bucket. + Bucket bucket = 1 [(google.api.field_behavior) = REQUIRED]; + + // If set, the request modifies the bucket if its metageneration matches this + // value. + optional int64 if_metageneration_match = 2; + + // If set, the request modifies the bucket if its metageneration doesn't + // match this value. + optional int64 if_metageneration_not_match = 3; + + // Optional. Apply a predefined set of access controls to this bucket. + // Valid values are `authenticatedRead`, `private`, `projectPrivate`, + // `publicRead`, or `publicReadWrite`. + string predefined_acl = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Apply a predefined set of default object access controls to this + // bucket. Valid values are `authenticatedRead`, `bucketOwnerFullControl`, + // `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`. + string predefined_default_object_acl = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // might accidentally reset the new field's value. + // + // Not specifying any fields is an error. + google.protobuf.FieldMask update_mask = 6 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [ComposeObject][google.storage.v2.Storage.ComposeObject]. +message ComposeObjectRequest { + // Description of a source object for a composition request. + message SourceObject { + // Preconditions for a source object of a composition request. + message ObjectPreconditions { + // Only perform the composition if the generation of the source object + // that would be used matches this value. If this value and a generation + // are both specified, they must be the same value or the call fails. + optional int64 if_generation_match = 1; + } + + // Required. The source object's name. All source objects must reside in the + // same bucket. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The generation of this object to use as the source. + int64 generation = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Conditions that must be met for this operation to execute. + ObjectPreconditions object_preconditions = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Required. Properties of the resulting object. + Object destination = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of source objects that is concatenated into a single + // object. + repeated SourceObject source_objects = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Apply a predefined set of access controls to the destination + // object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`, + // `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`. + string destination_predefined_acl = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 4; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 5; + + // Optional. Resource name of the Cloud KMS key, of the form + // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, + // that is used to encrypt the object. Overrides the object + // metadata's `kms_key_name` value, if any. + string kms_key = 6 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The checksums of the complete object. This is validated against + // the combined checksums of the component objects. + ObjectChecksums object_checksums = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Whether the source objects should be deleted in the compose request. + optional bool delete_source_objects = 11; +} + +// Request message for deleting an object. +message DeleteObjectRequest { + // Required. Name of the bucket in which the object resides. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. The name of the finalized object to delete. + // Note: If you want to delete an unfinalized resumable upload please use + // `CancelResumableWrite`. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, permanently deletes a specific revision of this + // object (as opposed to the latest version, the default). + int64 generation = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 5; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 6; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 7; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 8; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 10 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for +// [RestoreObject][google.storage.v2.Storage.RestoreObject]. +// `bucket`, `object`, and `generation` **must** be set. +message RestoreObjectRequest { + // Required. Name of the bucket in which the object resides. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. The name of the object to restore. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The specific revision of the object to restore. + int64 generation = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets. This parameter is optional, and is only required in the rare case + // when there are multiple soft-deleted objects with the same name and + // generation. + string restore_token = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 4; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 5; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 6; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 7; + + // If false or unset, the bucket's default object ACL is used. + // If true, copy the source object's access controls. + // Return an error if bucket has UBLA enabled. + optional bool copy_source_acl = 9; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for +// [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite]. +message CancelResumableWriteRequest { + // Required. The upload_id of the resumable upload to cancel. This should be + // copied from the `upload_id` field of `StartResumableWriteResponse`. + string upload_id = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Empty response message for canceling an in-progress resumable upload, is +// extended as needed. +message CancelResumableWriteResponse {} + +// Request message for [ReadObject][google.storage.v2.Storage.ReadObject]. +message ReadObjectRequest { + // Required. The name of the bucket containing the object to read. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. The name of the object to read. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, selects a specific revision of this object (as + // opposed to the latest version, the default). + int64 generation = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The offset for the first byte to return in the read, relative to + // the start of the object. + // + // A negative `read_offset` value is interpreted as the number of bytes + // back from the end of the object to be returned. For example, if an object's + // length is `15` bytes, a `ReadObjectRequest` with `read_offset` = `-5` and + // `read_limit` = `3` would return bytes `10` through `12` of the object. + // Requesting a negative offset with magnitude larger than the size of the + // object returns the entire object. + int64 read_offset = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of `data` bytes the server is allowed to + // return in the sum of all `Object` messages. A `read_limit` of zero + // indicates that there is no limit, and a negative `read_limit` causes an + // error. + // + // If the stream returns fewer bytes than allowed by the `read_limit` and no + // error occurred, the stream includes all data from the `read_offset` to the + // end of the resource. + int64 read_limit = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 6; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 7; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 8; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 9; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Mask specifying which fields to read. + // The `checksummed_data` field and its children are always present. + // If no mask is specified, it defaults to all fields except `metadata. + // owner` and `metadata.acl`. + // `*` might be used to mean "all fields". + optional google.protobuf.FieldMask read_mask = 12; +} + +// Request message for [GetObject][google.storage.v2.Storage.GetObject]. +message GetObjectRequest { + // Required. Name of the bucket in which the object resides. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. Name of the object. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, selects a specific revision of this object (as + // opposed to the latest version, the default). + int64 generation = 3 [(google.api.field_behavior) = OPTIONAL]; + + // If true, return the soft-deleted version of this object. + optional bool soft_deleted = 11; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 4; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 5; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 6; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 7; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Mask specifying which fields to read. + // If no mask is specified, it defaults to all fields except `metadata. + // acl` and `metadata.owner`. + // `*` might be used to mean "all fields". + optional google.protobuf.FieldMask read_mask = 10; + + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets and if `soft_deleted` is set to `true`. This parameter is optional, + // and is only required in the rare case when there are multiple soft-deleted + // objects with the same `name` and `generation`. + string restore_token = 12 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for [ReadObject][google.storage.v2.Storage.ReadObject]. +message ReadObjectResponse { + // A portion of the data for the object. The service might leave `data` + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + ChecksummedData checksummed_data = 1; + + // The checksums of the complete object. If the object is downloaded in full, + // the client should compute one of these checksums over the downloaded object + // and compare it against the value provided here. + ObjectChecksums object_checksums = 2; + + // If `read_offset` and or `read_limit` is specified on the + // `ReadObjectRequest`, `ContentRange` is populated on the first + // `ReadObjectResponse` message of the read stream. + ContentRange content_range = 3; + + // Metadata of the object whose media is being returned. + // Only populated in the first response in the stream. + Object metadata = 4; +} + +// Describes the object to read in a BidiReadObject request. +message BidiReadObjectSpec { + // Required. The name of the bucket containing the object to read. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. The name of the object to read. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, selects a specific revision of this object (as + // opposed to the latest version, the default). + int64 generation = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 4; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 5; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 6; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 7; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Mask specifying which fields to read. + // The `checksummed_data` field and its children are always present. + // If no mask is specified, it defaults to all fields except `metadata. + // owner` and `metadata.acl`. + // `*` might be used to mean "all fields". + // As per https://google.aip.dev/161, this field is deprecated. + // As an alternative, `grpc metadata` can be used: + optional google.protobuf.FieldMask read_mask = 12 [deprecated = true]; + + // The client can optionally set this field. The read handle is an optimized + // way of creating new streams. Read handles are generated and periodically + // refreshed from prior reads. + optional BidiReadHandle read_handle = 13; + + // The routing token that influences request routing for the stream. Must be + // provided if a BidiReadObjectRedirectedError is returned. + optional string routing_token = 14; +} + +// Request message for +// [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. +message BidiReadObjectRequest { + // Optional. The first message of each stream should set this field. If this + // is not the first message, an error is returned. Describes the object to + // read. + BidiReadObjectSpec read_object_spec = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Provides a list of 0 or more (up to 100) ranges to read. If a + // single range is large enough to require multiple responses, they are + // delivered in increasing offset order. There are no ordering guarantees + // across ranges. When no ranges are provided, the response message + // doesn't include `ObjectRangeData`. For full object downloads, the + // offset and size can be set to `0`. + repeated ReadRange read_ranges = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for +// [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. +message BidiReadObjectResponse { + // A portion of the object's data. The service might leave data + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + // The service might pipeline multiple responses belonging to different read + // requests. Each `ObjectRangeData` entry has a `read_id` that is set + // to the same value as the corresponding source read request. + repeated ObjectRangeData object_data_ranges = 6; + + // Metadata of the object whose media is being returned. + // Only populated in the first response in the stream and not populated when + // the stream is opened with a read handle. + Object metadata = 4; + + // This field is periodically refreshed, however it might not be set in + // every response. It allows the client to more efficiently open subsequent + // bidirectional streams to the same object. + BidiReadHandle read_handle = 7; +} + +// Error proto containing details for a redirected read. This error might be +// attached as details for an ABORTED response to BidiReadObject. +message BidiReadObjectRedirectedError { + // The read handle for the redirected read. If set, the client might use this + // in the BidiReadObjectSpec when retrying the read stream. + BidiReadHandle read_handle = 1; + + // The routing token the client must use when retrying the read stream. + // This value must be provided in the header `x-goog-request-params`, with key + // `routing_token` and this string verbatim as the value. + optional string routing_token = 2; +} + +// Error proto containing details for a redirected write. This error might be +// attached as details for an ABORTED response to BidiWriteObject. +message BidiWriteObjectRedirectedError { + // The routing token the client must use when retrying the write stream. + // This value must be provided in the header `x-goog-request-params`, with key + // `routing_token` and this string verbatim as the value. + optional string routing_token = 1; + + // Opaque value describing a previous write. If set, the client must use this + // in an AppendObjectSpec first_message when retrying the write stream. If not + // set, clients might retry the original request. + optional BidiWriteHandle write_handle = 2; + + // The generation of the object that triggered the redirect. This is set + // iff `write_handle` is set. If set, the client must use this in an + // `AppendObjectSpec` first_message when retrying the write stream. + optional int64 generation = 3; +} + +// Error extension proto containing details for all outstanding reads on the +// failed stream +message BidiReadObjectError { + // The error code for each outstanding read_range + repeated ReadRangeError read_range_errors = 1; +} + +// Error extension proto containing details for a single range read +message ReadRangeError { + // The id of the corresponding read_range + int64 read_id = 1; + + // The status which should be an enum value of [google.rpc.Code]. + google.rpc.Status status = 2; +} + +// Describes a range of bytes to read in a `BidiReadObjectRanges` request. +message ReadRange { + // Required. The offset for the first byte to return in the read, relative to + // the start of the object. + // + // A negative read_offset value is interpreted as the number of bytes + // back from the end of the object to be returned. For example, if an object's + // length is 15 bytes, a `ReadObjectRequest` with `read_offset` = -5 and + // `read_length` = 3 would return bytes 10 through 12 of the object. + // Requesting a negative offset with magnitude larger than the size of the + // object is equivalent to `read_offset` = 0. A `read_offset` larger than the + // size of the object results in an `OutOfRange` error. + int64 read_offset = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The maximum number of data bytes the server is allowed to return + // across all response messages with the same `read_id`. A `read_length` of + // zero indicates to read until the resource end, and a negative `read_length` + // causes an `OutOfRange` error. If the stream returns fewer bytes than + // allowed by the `read_length` and no error occurred, the stream includes all + // data from the `read_offset` to the resource end. + int64 read_length = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Read identifier provided by the client. When the client issues + // more than one outstanding `ReadRange` on the same stream, responses can be + // mapped back to their corresponding requests using this value. Clients must + // ensure that all outstanding requests have different read_id values. The + // server might close the stream with an error if this condition is not met. + int64 read_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Contains data and metadata for a range of an object. +message ObjectRangeData { + // A portion of the data for the object. + ChecksummedData checksummed_data = 1; + + // The `ReadRange` describes the content being returned with `read_id` set to + // the corresponding `ReadObjectRequest` in the stream. Multiple + // `ObjectRangeData` messages might have the same read_id but increasing + // offsets. `ReadObjectResponse` messages with the same `read_id` are + // guaranteed to be delivered in increasing offset order. + ReadRange read_range = 2; + + // If set, indicates there are no more bytes to read for the given ReadRange. + bool range_end = 3; +} + +// `BidiReadHandle` contains a handle from a previous `BiDiReadObject` +// invocation. The client can use this instead of `BidiReadObjectSpec` as an +// optimized way of opening subsequent bidirectional streams to the same object. +message BidiReadHandle { + // Required. Opaque value describing a previous read. + bytes handle = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// `BidiWriteHandle` contains a handle from a previous `BidiWriteObject` +// invocation. The client can use this instead of `BidiReadObjectSpec` as an +// optimized way of opening subsequent bidirectional streams to the same object. +message BidiWriteHandle { + // Required. Opaque value describing a previous write. + bytes handle = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes an attempt to insert an object, possibly over multiple requests. +message WriteObjectSpec { + // Required. Destination object, including its name and its metadata. + Object resource = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Apply a predefined set of access controls to this object. + // Valid values are `authenticatedRead`, `bucketOwnerFullControl`, + // `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`. + string predefined_acl = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current + // generation matches the given value. Setting to `0` makes the operation + // succeed only if there are no live versions of the object. + optional int64 if_generation_match = 3; + + // Makes the operation conditional on whether the object's live + // generation does not match the given value. If no live object exists, the + // precondition fails. Setting to `0` makes the operation succeed only if + // there is a live version of the object. + optional int64 if_generation_not_match = 4; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 5; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 6; + + // The expected final object size being uploaded. + // If this value is set, closing the stream after writing fewer or more than + // `object_size` bytes results in an `OUT_OF_RANGE` error. + // + // This situation is considered a client error, and if such an error occurs + // you must start the upload over from scratch, this time sending the correct + // number of bytes. + optional int64 object_size = 8; + + // If `true`, the object is created in appendable mode. + // This field might only be set when using `BidiWriteObject`. + optional bool appendable = 9; +} + +// Request message for [WriteObject][google.storage.v2.Storage.WriteObject]. +message WriteObjectRequest { + // The first message of each stream should set one of the following. + oneof first_message { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + string upload_id = 1; + + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec write_object_spec = 2; + } + + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An incorrect value causes an error. + int64 write_offset = 3 [(google.api.field_behavior) = REQUIRED]; + + // A portion of the data for the object. + oneof data { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request fails. + ChecksummedData checksummed_data = 4; + } + + // Optional. Checksums for the complete object. If the checksums computed by + // the service don't match the specified checksums the call fails. This field + // might only be provided in the first or last request (either with + // `first_message`, or `finish_write` set). + ObjectChecksums object_checksums = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // causes an error. + // For a non-resumable write (where the `upload_id` was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + bool finish_write = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A set of parameters common to Cloud Storage API requests + // concerning an object. + CommonObjectRequestParams common_object_request_params = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for +// [WriteObject][google.storage.v2.Storage.WriteObject]. +message WriteObjectResponse { + // The response sets one of the following. + oneof write_status { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + int64 persisted_size = 1; + + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Object resource = 2; + } +} + +// Describes an attempt to append to an object, possibly over multiple requests. +message AppendObjectSpec { + // Required. The name of the bucket containing the object to write. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. The name of the object to open for writing. + string object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The generation number of the object to open for writing. + int64 generation = 3 [(google.api.field_behavior) = REQUIRED]; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + // + // Note that metageneration preconditions are only checked if `write_handle` + // is empty. + optional int64 if_metageneration_match = 4; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + // + // Note that metageneration preconditions are only checked if `write_handle` + // is empty. + optional int64 if_metageneration_not_match = 5; + + // An optional routing token that influences request routing for the stream. + // Must be provided if a `BidiWriteObjectRedirectedError` is returned. + optional string routing_token = 6; + + // An optional write handle returned from a previous BidiWriteObjectResponse + // message or a BidiWriteObjectRedirectedError error. + // + // Note that metageneration preconditions are only checked if `write_handle` + // is empty. + optional BidiWriteHandle write_handle = 7; +} + +// Request message for +// [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject]. +message BidiWriteObjectRequest { + // The first message of each stream should set one of the following. + oneof first_message { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + string upload_id = 1; + + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec write_object_spec = 2; + + // For appendable uploads. Describes the object to append to. + AppendObjectSpec append_object_spec = 11; + } + + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value must be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value must be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An invalid value causes an error. + int64 write_offset = 3 [(google.api.field_behavior) = REQUIRED]; + + // A portion of the data for the object. + oneof data { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request fails. + ChecksummedData checksummed_data = 4; + } + + // Optional. Checksums for the complete object. If the checksums computed by + // the service don't match the specified checksums the call fails. Might only + // be provided in the first request or the last request (with finish_write + // set). + ObjectChecksums object_checksums = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true` + // or the client closes the stream, the service sends a + // `BidiWriteObjectResponse` containing the current persisted size. The + // persisted size sent in responses covers all the bytes the server has + // persisted thus far and can be used to decide what data is safe for the + // client to drop. Note that the object's current size reported by the + // `BidiWriteObjectResponse` might lag behind the number of bytes written by + // the client. This field is ignored if `finish_write` is set to true. + bool state_lookup = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Persists data written on the stream, up to and including the + // current message, to permanent storage. This option should be used sparingly + // as it might reduce performance. Ongoing writes are periodically persisted + // on the server even when `flush` is not set. This field is ignored if + // `finish_write` is set to true since there's no need to checkpoint or flush + // if this message completes the write. + bool flush = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // causes an error. + // For a non-resumable write (where the `upload_id` was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + bool finish_write = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 10 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BidiWriteObject. +message BidiWriteObjectResponse { + // The response sets one of the following. + oneof write_status { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + int64 persisted_size = 1; + + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Object resource = 2; + } + + // An optional write handle that is returned periodically in response + // messages. Clients should save it for later use in establishing a new stream + // if a connection is interrupted. + optional BidiWriteHandle write_handle = 3; +} + +// Request message for [ListObjects][google.storage.v2.Storage.ListObjects]. +message ListObjectsRequest { + // Required. Name of the bucket in which to look for objects. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Optional. Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results might be returned than requested. The service + // uses this parameter or 1,000 items, whichever is smaller. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A previously-returned page token representing part of the larger + // set of results to view. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, returns results in a directory-like mode. `items` + // contains only objects whose names, aside from the `prefix`, do not contain + // `delimiter`. Objects whose names, aside from the `prefix`, contain + // `delimiter` has their name, truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + string delimiter = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, objects that end in exactly one instance of `delimiter` + // has their metadata included in `items` in addition to + // `prefixes`. + bool include_trailing_delimiter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to objects whose names begin with this prefix. + string prefix = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, lists all versions of an object as distinct results. + bool versions = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Mask specifying which fields to read from each result. + // If no mask is specified, defaults to all fields except `items.acl` and + // `items.owner`. + // `*` might be used to mean all fields. + optional google.protobuf.FieldMask read_mask = 8; + + // Optional. Filter results to objects whose names are lexicographically equal + // to or after `lexicographic_start`. If `lexicographic_end` is also set, the + // objects listed have names between `lexicographic_start` (inclusive) and + // `lexicographic_end` (exclusive). + string lexicographic_start = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to objects whose names are lexicographically + // before `lexicographic_end`. If `lexicographic_start` is also set, the + // objects listed have names between `lexicographic_start` (inclusive) and + // `lexicographic_end` (exclusive). + string lexicographic_end = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, only list all soft-deleted versions of the object. + // Soft delete policy is required to set this option. + bool soft_deleted = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, includes folders and managed folders (besides objects) + // in the returned `prefixes`. Requires `delimiter` to be set to '/'. + bool include_folders_as_prefixes = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter results to objects and prefixes that match this glob + // pattern. See [List objects using + // glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) + // for the full syntax. + string match_glob = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. An expression used to filter the returned objects by the + // `context` field. For the full syntax, see [Filter objects by contexts + // syntax](https://cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts-syntax). + // If a `delimiter` is set, the returned `prefixes` are exempt from this + // filter. + string filter = 15 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request object for +// [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. +message QueryWriteStatusRequest { + // Required. The name of the resume token for the object whose write status is + // being requested. + string upload_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Response object for +// [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. +message QueryWriteStatusResponse { + // The response sets one of the following. + oneof write_status { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. This is the correct value for the + // 'write_offset' field to use when resuming the `WriteObject` operation. + // Only set if the upload has not finalized. + int64 persisted_size = 1; + + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Object resource = 2; + } +} + +// Request message for [RewriteObject][google.storage.v2.Storage.RewriteObject]. +// If the source object is encrypted using a Customer-Supplied Encryption Key +// the key information must be provided in the +// `copy_source_encryption_algorithm`, `copy_source_encryption_key_bytes`, and +// `copy_source_encryption_key_sha256_bytes` fields. If the destination object +// should be encrypted the keying information should be provided in the +// `encryption_algorithm`, `encryption_key_bytes`, and +// `encryption_key_sha256_bytes` fields of the +// `common_object_request_params.customer_encryption` field. +message RewriteObjectRequest { + // Required. Immutable. The name of the destination object. + // See the + // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + string destination_name = 24 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Required. Immutable. The name of the bucket containing the destination + // object. + string destination_bucket = 25 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Optional. The name of the Cloud KMS key that is used to encrypt the + // destination object. The Cloud KMS key must be located in same location as + // the object. If the parameter is not specified, the request uses the + // destination bucket's default encryption key, if any, or else the + // Google-managed encryption key. + string destination_kms_key = 27 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. Properties of the destination, post-rewrite object. + // The `name`, `bucket` and `kms_key` fields must not be populated (these + // values are specified in the `destination_name`, `destination_bucket`, and + // `destination_kms_key` fields). + // If `destination` is present it is used to construct the destination + // object's metadata; otherwise the destination object's metadata is + // copied from the source object. + Object destination = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Name of the bucket in which to find the source object. + string source_bucket = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. Name of the source object. + string source_object = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, selects a specific revision of the source object (as + // opposed to the latest version, the default). + int64 source_generation = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Include this field (from the previous rewrite response) on each + // rewrite request after the first one, until the rewrite response 'done' flag + // is true. Calls that provide a rewriteToken can omit all other request + // fields, but if included those fields must match the values provided in the + // first rewrite request. + string rewrite_token = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Apply a predefined set of access controls to the destination + // object. Valid values are `authenticatedRead`, `bucketOwnerFullControl`, + // `bucketOwnerRead`, `private`, `projectPrivate`, or `publicRead`. + string destination_predefined_acl = 28 + [(google.api.field_behavior) = OPTIONAL]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 7; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 8; + + // Makes the operation conditional on whether the destination object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 9; + + // Makes the operation conditional on whether the destination object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 10; + + // Makes the operation conditional on whether the source object's live + // generation matches the given value. + optional int64 if_source_generation_match = 11; + + // Makes the operation conditional on whether the source object's live + // generation does not match the given value. + optional int64 if_source_generation_not_match = 12; + + // Makes the operation conditional on whether the source object's current + // metageneration matches the given value. + optional int64 if_source_metageneration_match = 13; + + // Makes the operation conditional on whether the source object's current + // metageneration does not match the given value. + optional int64 if_source_metageneration_not_match = 14; + + // Optional. The maximum number of bytes that are rewritten per rewrite + // request. Most callers shouldn't need to specify this parameter - it is + // primarily in place to support testing. If specified the value must be an + // integral multiple of 1 MiB (1048576). Also, this only applies to requests + // where the source and destination span locations and/or storage classes. + // Finally, this value must not change across rewrite calls else you'll get an + // error that the `rewriteToken` is invalid. + int64 max_bytes_rewritten_per_call = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The algorithm used to encrypt the source object, if any. Used if + // the source object was encrypted with a Customer-Supplied Encryption Key. + string copy_source_encryption_algorithm = 16 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The raw bytes (not base64-encoded) AES-256 encryption key used to + // encrypt the source object, if it was encrypted with a Customer-Supplied + // Encryption Key. + bytes copy_source_encryption_key_bytes = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The raw bytes (not base64-encoded) SHA256 hash of the encryption + // key used to encrypt the source object, if it was encrypted with a + // Customer-Supplied Encryption Key. + bytes copy_source_encryption_key_sha256_bytes = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The checksums of the complete object. This is used to validate + // the destination object after rewriting. + ObjectChecksums object_checksums = 29 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A rewrite response. +message RewriteResponse { + // The total bytes written so far, which can be used to provide a waiting user + // with a progress indicator. This property is always present in the response. + int64 total_bytes_rewritten = 1; + + // The total size of the object being copied in bytes. This property is always + // present in the response. + int64 object_size = 2; + + // `true` if the copy is finished; otherwise, `false` if + // the copy is in progress. This property is always present in the response. + bool done = 3; + + // A token to use in subsequent requests to continue copying data. This token + // is present in the response only when there is more data to copy. + string rewrite_token = 4; + + // A resource containing the metadata for the copied-to object. This property + // is present in the response only when copying completes. + Object resource = 5; +} + +// Request message for [MoveObject][google.storage.v2.Storage.MoveObject]. +message MoveObjectRequest { + // Required. Name of the bucket in which the object resides. + string bucket = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Required. Name of the source object. + string source_object = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Name of the destination object. + string destination_object = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Makes the operation conditional on whether the source object's + // current generation matches the given value. `if_source_generation_match` + // and `if_source_generation_not_match` conditions are mutually exclusive: + // it's an error for both of them to be set in the request. + optional int64 if_source_generation_match = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the source object's + // current generation does not match the given value. + // `if_source_generation_match` and `if_source_generation_not_match` + // conditions are mutually exclusive: it's an error for both of them to be set + // in the request. + optional int64 if_source_generation_not_match = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the source object's + // current metageneration matches the given value. + // `if_source_metageneration_match` and `if_source_metageneration_not_match` + // conditions are mutually exclusive: it's an error for both of them to be set + // in the request. + optional int64 if_source_metageneration_match = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the source object's + // current metageneration does not match the given value. + // `if_source_metageneration_match` and `if_source_metageneration_not_match` + // conditions are mutually exclusive: it's an error for both of them to be set + // in the request. + optional int64 if_source_metageneration_not_match = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the destination + // object's current generation matches the given value. Setting to 0 makes the + // operation succeed only if there are no live versions of the object. + // `if_generation_match` and `if_generation_not_match` conditions are mutually + // exclusive: it's an error for both of them to be set in the request. + optional int64 if_generation_match = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the destination + // object's current generation does not match the given value. If no live + // object exists, the precondition fails. Setting to 0 makes the operation + // succeed only if there is a live version of the object. + // `if_generation_match` and `if_generation_not_match` conditions are mutually + // exclusive: it's an error for both of them to be set in the request. + optional int64 if_generation_not_match = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the destination + // object's current metageneration matches the given value. + // `if_metageneration_match` and `if_metageneration_not_match` conditions are + // mutually exclusive: it's an error for both of them to be set in the + // request. + optional int64 if_metageneration_match = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the operation conditional on whether the destination + // object's current metageneration does not match the given value. + // `if_metageneration_match` and `if_metageneration_not_match` conditions are + // mutually exclusive: it's an error for both of them to be set in the + // request. + optional int64 if_metageneration_not_match = 11 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for +// [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. +message StartResumableWriteRequest { + // Required. Contains the information necessary to start a resumable write. + WriteObjectSpec write_object_spec = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A set of parameters common to Storage API requests related to an + // object. + CommonObjectRequestParams common_object_request_params = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The checksums of the complete object. This is used to validate + // the uploaded object. For each upload, `object_checksums` can be provided + // when initiating a resumable upload with`StartResumableWriteRequest` or when + // completing a write with `WriteObjectRequest` with + // `finish_write` set to `true`. + ObjectChecksums object_checksums = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response object for +// [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. +message StartResumableWriteResponse { + // A unique identifier for the initiated resumable write operation. + // As the ID grants write access, you should keep it confidential during + // the upload to prevent unauthorized access and data tampering during your + // upload. This ID should be included in subsequent `WriteObject` requests to + // upload the object data. + string upload_id = 1; +} + +// Request message for [UpdateObject][google.storage.v2.Storage.UpdateObject]. +message UpdateObjectRequest { + // Required. The object to update. + // The object's bucket and name fields are used to identify the object to + // update. If present, the object's generation field selects a specific + // revision of this object whose metadata should be updated. Otherwise, + // assumes the live version of the object. + Object object = 1 [(google.api.field_behavior) = REQUIRED]; + + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + optional int64 if_generation_match = 2; + + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + optional int64 if_generation_not_match = 3; + + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + optional int64 if_metageneration_match = 4; + + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + optional int64 if_metageneration_not_match = 5; + + // Optional. Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + string predefined_acl = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // might accidentally reset the new field's value. + // + // Not specifying any fields is an error. + google.protobuf.FieldMask update_mask = 7 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A set of parameters common to Storage API requests concerning an + // object. + CommonObjectRequestParams common_object_request_params = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Overrides the unlocked retention config on the object. + bool override_unlocked_retention = 11 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Parameters that can be passed to any object request. +message CommonObjectRequestParams { + // Optional. Encryption algorithm used with the Customer-Supplied Encryption + // Keys feature. + string encryption_algorithm = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Encryption key used with the Customer-Supplied Encryption Keys + // feature. In raw bytes format (not base64-encoded). + bytes encryption_key_bytes = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SHA256 hash of encryption key used with the Customer-supplied + // encryption keys feature. + bytes encryption_key_sha256_bytes = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Shared constants. +message ServiceConstants { + // A collection of constant values meaningful to the Storage API. + enum Values { + option allow_alias = true; + + // Unused. Proto3 requires first enum to be 0. + VALUES_UNSPECIFIED = 0; + + // The maximum size chunk that can be returned in a single + // `ReadRequest`. + // 2 MiB. + MAX_READ_CHUNK_BYTES = 2097152; + + // The maximum size chunk that can be sent in a single WriteObjectRequest. + // 2 MiB. + MAX_WRITE_CHUNK_BYTES = 2097152; + + // The maximum size of an object in MB - whether written in a single stream + // or composed from multiple other objects. + // 5 TiB. + MAX_OBJECT_SIZE_MB = 5242880; + + // The maximum length field name that can be sent in a single + // custom metadata field. + // 1 KiB. + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES = 1024; + + // The maximum length field value that can be sent in a single + // custom_metadata field. + // 4 KiB. + MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES = 4096; + + // The maximum total bytes that can be populated into all field names and + // values of the custom_metadata for one object. + // 8 KiB. + MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES = 8192; + + // The maximum total bytes that can be populated into all bucket metadata + // fields. + // 20 KiB. + MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES = 20480; + + // The maximum number of NotificationConfigs that can be registered + // for a given bucket. + MAX_NOTIFICATION_CONFIGS_PER_BUCKET = 100; + + // The maximum number of LifecycleRules that can be registered for a given + // bucket. + MAX_LIFECYCLE_RULES_PER_BUCKET = 100; + + // The maximum number of custom attributes per NotificationConfigs. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTES = 5; + + // The maximum length of a custom attribute key included in + // NotificationConfig. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH = 256; + + // The maximum length of a custom attribute value included in a + // NotificationConfig. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = 1024; + + // The maximum number of key/value entries per bucket label. + MAX_LABELS_ENTRIES_COUNT = 64; + + // The maximum character length of the key or value in a bucket + // label map. + MAX_LABELS_KEY_VALUE_LENGTH = 63; + + // The maximum byte size of the key or value in a bucket label + // map. + MAX_LABELS_KEY_VALUE_BYTES = 128; + + // The maximum number of object IDs that can be included in a + // DeleteObjectsRequest. + MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST = 1000; + + // The maximum number of days for which a token returned by the + // GetListObjectsSplitPoints RPC is valid. + SPLIT_TOKEN_MAX_VALID_DAYS = 14; + } +} + +// A bucket. +message Bucket { + option (google.api.resource) = { + type: "storage.googleapis.com/Bucket" + pattern: "projects/{project}/buckets/{bucket}" + plural: "buckets" + singular: "bucket" + }; + + // Billing properties of a bucket. + message Billing { + // Optional. When set to true, Requester Pays is enabled for this bucket. + bool requester_pays = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Cross-Origin Response sharing (CORS) properties for a bucket. + // For more on Cloud Storage and CORS, see + // https://cloud.google.com/storage/docs/cross-origin. + // For more on CORS in general, see https://tools.ietf.org/html/rfc6454. + message Cors { + // Optional. The list of origins eligible to receive CORS response headers. + // For more information about origins, see [RFC + // 6454](https://tools.ietf.org/html/rfc6454). Note: `*` is permitted in the + // list of origins, and means `any origin`. + repeated string origin = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The list of HTTP methods on which to include CORS response + // headers, + // (`GET`, `OPTIONS`, `POST`, etc) Note: `*` is permitted in the list of + // methods, and means "any method". + repeated string method = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The list of HTTP headers other than the [simple response + // headers](https://www.w3.org/TR/cors/#simple-response-headers) to give + // permission for the user-agent to share across domains. + repeated string response_header = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value, in seconds, to return in the [Access-Control-Max-Age + // header](https://www.w3.org/TR/cors/#access-control-max-age-response-header) + // used in preflight responses. + int32 max_age_seconds = 4 [(google.api.field_behavior) = OPTIONAL]; + } + + // Encryption properties of a bucket. + message Encryption { + // Google Managed Encryption (GMEK) enforcement config of a bucket. + message GoogleManagedEncryptionEnforcementConfig { + // Restriction mode for google-managed encryption for new objects within + // the bucket. Valid values are: `NotRestricted` and `FullyRestricted`. + // If `NotRestricted` or unset, creation of new objects with + // google-managed encryption is allowed. + // If `FullyRestricted`, new objects can't be created using google-managed + // encryption. + optional string restriction_mode = 3; + + // Time from which the config was effective. This is service-provided. + optional google.protobuf.Timestamp effective_time = 2; + } + + // Customer Managed Encryption (CMEK) enforcement config of a bucket. + message CustomerManagedEncryptionEnforcementConfig { + // Restriction mode for customer-managed encryption for new objects within + // the bucket. Valid values are: `NotRestricted` and `FullyRestricted`. + // If `NotRestricted` or unset, creation of new objects with + // customer-managed encryption is allowed. + // If `FullyRestricted`, new objects can't be created using + // customer-managed encryption. + optional string restriction_mode = 3; + + // Time from which the config was effective. This is service-provided. + optional google.protobuf.Timestamp effective_time = 2; + } + + // Customer Supplied Encryption (CSEK) enforcement config of a bucket. + message CustomerSuppliedEncryptionEnforcementConfig { + // Restriction mode for customer-supplied encryption for new objects + // within the bucket. Valid values are: `NotRestricted` and + // `FullyRestricted`. + // If `NotRestricted` or unset, creation of new objects with + // customer-supplied encryption is allowed. + // If `FullyRestricted`, new objects can't be created using + // customer-supplied encryption. + optional string restriction_mode = 3; + + // Time from which the config was effective. This is service-provided. + optional google.protobuf.Timestamp effective_time = 2; + } + + // Optional. The name of the Cloud KMS key that is used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + string default_kms_key = 1 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. If omitted, then new objects with GMEK encryption-type is + // allowed. If set, then new objects created in this bucket must comply with + // enforcement config. Changing this has no effect on existing objects; it + // applies to new objects only. + optional GoogleManagedEncryptionEnforcementConfig + google_managed_encryption_enforcement_config = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If omitted, then new objects with CMEK encryption-type is + // allowed. If set, then new objects created in this bucket must comply with + // enforcement config. Changing this has no effect on existing objects; it + // applies to new objects only. + optional CustomerManagedEncryptionEnforcementConfig + customer_managed_encryption_enforcement_config = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If omitted, then new objects with CSEK encryption-type is + // allowed. If set, then new objects created in this bucket must comply with + // enforcement config. Changing this has no effect on existing objects; it + // applies to new objects only. + optional CustomerSuppliedEncryptionEnforcementConfig + customer_supplied_encryption_enforcement_config = 4 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Bucket restriction options. + message IamConfig { + // Settings for Uniform Bucket level access. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. + message UniformBucketLevelAccess { + // Optional. If set, access checks only use bucket-level IAM policies or + // above. + bool enabled = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The deadline time for changing + // `iam_config.uniform_bucket_level_access.enabled` from `true` to + // `false`. Mutable until the specified deadline is reached, but not + // afterward. + google.protobuf.Timestamp lock_time = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Bucket restriction options currently enforced on the bucket. + UniformBucketLevelAccess uniform_bucket_level_access = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether IAM enforces public access prevention. Valid values are + // `enforced` or `inherited`. + string public_access_prevention = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Lifecycle properties of a bucket. + // For more information, see [Object Lifecycle + // Management](https://cloud.google.com/storage/docs/lifecycle). + message Lifecycle { + // A lifecycle Rule, combining an action to take on an object and a + // condition which triggers that action. + message Rule { + // An action to take on an object. + message Action { + // Optional. Type of the action. Currently, only `Delete`, + // `SetStorageClass`, and `AbortIncompleteMultipartUpload` are + // supported. + string type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Target storage class. Required iff the type of the action + // is SetStorageClass. + string storage_class = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // A condition of an object which triggers some action. + message Condition { + // Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + // A value of 0 indicates that all objects immediately match this + // condition. + optional int32 age_days = 1; + + // Optional. This condition is satisfied when an object is created + // before midnight of the specified date in UTC. + google.type.Date created_before = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Relevant only for versioned objects. If the value is + // `true`, this condition matches live objects; if the value + // is `false`, it matches archived objects. + optional bool is_live = 3; + + // Relevant only for versioned objects. If the value is N, this + // condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + optional int32 num_newer_versions = 4; + + // Optional. Objects having any of the storage classes specified by this + // condition are matched. Values include `MULTI_REGIONAL`, `REGIONAL`, + // `NEARLINE`, `COLDLINE`, `STANDARD`, and + // `DURABLE_REDUCED_AVAILABILITY`. + repeated string matches_storage_class = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Number of days that have elapsed since the custom timestamp set on an + // object. + // The value of the field must be a nonnegative integer. + optional int32 days_since_custom_time = 7; + + // Optional. An object matches this condition if the custom timestamp + // set on the object is before the specified date in UTC. + google.type.Date custom_time_before = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if these many days have been + // passed since it became noncurrent. The value of the field must be a + // nonnegative integer. If it's zero, the object version becomes + // eligible for Lifecycle action as soon as it becomes noncurrent. + optional int32 days_since_noncurrent_time = 9; + + // Optional. This condition is relevant only for versioned objects. An + // object version satisfies this condition only if it became noncurrent + // before the specified date in UTC. + google.type.Date noncurrent_time_before = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. List of object name prefixes. If any prefix exactly matches + // the beginning of the object name, the condition evaluates to true. + repeated string matches_prefix = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. List of object name suffixes. If any suffix exactly matches + // the end of the object name, the condition evaluates to true. + repeated string matches_suffix = 12 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. The action to take. + Action action = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The condition under which the action is taken. + Condition condition = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. A lifecycle management rule, which is made of an action to take + // and the condition under which the action is taken. + repeated Rule rule = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Logging-related properties of a bucket. + message Logging { + // Optional. The destination bucket where the current bucket's logs should + // be placed, using path format (like `projects/123456/buckets/foo`). + string log_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A prefix for log object names. + string log_object_prefix = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Object Retention related properties of a bucket. + message ObjectRetention { + // Optional. Output only. If true, object retention is enabled for the + // bucket. + bool enabled = 1 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + } + + // Retention policy properties of a bucket. + message RetentionPolicy { + // Optional. Server-determined value that indicates the time from which + // policy was enforced and effective. + google.protobuf.Timestamp effective_time = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Once locked, an object retention policy cannot be modified. + bool is_locked = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The duration that objects need to be retained. Retention + // duration must be greater than zero and less than 100 years. Note that + // enforcement of retention periods less than a day is not guaranteed. Such + // periods should only be used for testing purposes. Any `nanos` value + // specified is rounded down to the nearest second. + google.protobuf.Duration retention_duration = 4 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Soft delete policy properties of a bucket. + message SoftDeletePolicy { + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + optional google.protobuf.Duration retention_duration = 1; + + // Time from which the policy was effective. This is service-provided. + optional google.protobuf.Timestamp effective_time = 2; + } + + // Properties of a bucket related to versioning. + // For more information about Cloud Storage versioning, see [Object + // versioning](https://cloud.google.com/storage/docs/object-versioning). + message Versioning { + // Optional. While set to true, versioning is fully enabled for this bucket. + bool enabled = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Properties of a bucket related to accessing the contents as a static + // website. For details, see [hosting a static website using Cloud + // Storage](https://cloud.google.com/storage/docs/hosting-static-website). + message Website { + // Optional. If the requested object path is missing, the service ensures + // the path has a trailing '/', append this suffix, and attempt to retrieve + // the resulting object. This allows the creation of `index.html` objects to + // represent directory pages. + string main_page_suffix = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If the requested object path is missing, and any + // `mainPageSuffix` object is missing, if applicable, the service + // returns the named object from this bucket as the content for a + // [404 Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4) + // result. + string not_found_page = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Configuration for [configurable dual- + // regions](https://cloud.google.com/storage/docs/locations#configurable). It + // should specify precisely two eligible regions within the same multi-region. + // For details, see + // [locations](https://cloud.google.com/storage/docs/locations). + message CustomPlacementConfig { + // Optional. List of locations to use for data placement. + repeated string data_locations = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Configuration for a bucket's Autoclass feature. + message Autoclass { + // Optional. Enables Autoclass. + bool enabled = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Latest instant at which the `enabled` field was set to true + // after being disabled/unconfigured or set to false after being enabled. If + // Autoclass is enabled when the bucket is created, the value of the + // `toggle_time` field is set to the bucket `create_time`. + google.protobuf.Timestamp toggle_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // An object in an Autoclass bucket eventually cools down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + optional string terminal_storage_class = 3; + + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + optional google.protobuf.Timestamp terminal_storage_class_update_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // The [bucket IP + // filtering](https://cloud.google.com/storage/docs/ip-filtering-overview) + // configuration. Specifies the network sources that can access the bucket, as + // well as its underlying objects. + message IpFilter { + // The public network IP address ranges that can access the bucket and its + // data. + message PublicNetworkSource { + // Optional. The list of IPv4 and IPv6 cidr blocks that are allowed to + // operate or access the bucket and its underlying objects. + repeated string allowed_ip_cidr_ranges = 1 + [(google.api.field_behavior) = OPTIONAL]; + } + + // The list of VPC networks that can access the bucket. + message VpcNetworkSource { + // Name of the network. + // + // Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME` + optional string network = 1; + + // Optional. The list of public or private IPv4 and IPv6 CIDR ranges that + // can access the bucket. In the CIDR IP address block, the specified IP + // address must be properly truncated, meaning all the host bits must be + // zero or else the input is considered malformed. For example, + // `192.0.2.0/24` is accepted but `192.0.2.1/24` is not. Similarly, for + // IPv6, `2001:db8::/32` is accepted whereas `2001:db8::1/32` is not. + repeated string allowed_ip_cidr_ranges = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // The state of the IP filter configuration. Valid values are `Enabled` and + // `Disabled`. When set to `Enabled`, IP filtering rules are applied to a + // bucket and all incoming requests to the bucket are evaluated against + // these rules. When set to `Disabled`, IP filtering rules are not applied + // to a bucket. + optional string mode = 1; + + // Public IPs allowed to operate or access the bucket. + optional PublicNetworkSource public_network_source = 2; + + // Optional. The list of network sources that are allowed to access + // operations on the bucket or the underlying objects. + repeated VpcNetworkSource vpc_network_sources = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether or not to allow VPCs from orgs different than the + // bucket's parent org to access the bucket. When set to true, validations + // on the existence of the VPCs won't be performed. If set to false, each + // VPC network source is checked to belong to the same org as the bucket as + // well as validated for existence. + bool allow_cross_org_vpcs = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Whether or not to allow all P4SA access to the bucket. When set to true, + // IP filter config validation doesn't apply. + optional bool allow_all_service_agent_access = 5; + } + + // Configuration for a bucket's hierarchical namespace feature. + message HierarchicalNamespace { + // Optional. Enables the hierarchical namespace feature. + bool enabled = 1 [(google.api.field_behavior) = OPTIONAL]; + } + + // Identifier. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the `bucket name` of other Cloud Storage APIs. Example: `pub`. + string bucket_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The etag of the bucket. + // If included in the metadata of an `UpdateBucketRequest`, the operation is + // only performed if the `etag` matches that of the bucket. + string etag = 29; + + // Immutable. The project which owns this bucket, in the format of + // `projects/{projectIdentifier}`. + // `{projectIdentifier}` can be the project ID or project number. + // Output values are always in the project number format. + string project = 3 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Output only. The metadata generation of this bucket. + int64 metageneration = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to `US`. + // Attempting to update this field after the bucket is created results in an + // error. + string location = 5 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. The location type of the bucket (region, dual-region, + // multi-region, etc). + string location_type = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The bucket's default storage class, used whenever no storageClass + // is specified for a newly-created object. This defines how objects in the + // bucket are stored and determines the SLA and the cost of storage. + // If this value is not specified when the bucket is created, it defaults + // to `STANDARD`. For more information, see [Storage + // classes](https://developers.google.com/storage/docs/storage-classes). + string storage_class = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The recovery point objective for cross-region replication of the + // bucket. Applicable only for dual- and multi-region buckets. `DEFAULT` uses + // default replication. `ASYNC_TURBO` enables turbo replication, valid for + // dual-region buckets only. If rpo is not specified when the bucket is + // created, it defaults to `DEFAULT`. For more information, see [Turbo + // replication](https://cloud.google.com/storage/docs/availability-durability#turbo-replication). + string rpo = 27 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Access controls on the bucket. + // If `iam_config.uniform_bucket_level_access` is enabled on this bucket, + // requests to set, read, or modify acl is an error. + repeated BucketAccessControl acl = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Default access controls to apply to new objects when no ACL is + // provided. If `iam_config.uniform_bucket_level_access` is enabled on this + // bucket, requests to set, read, or modify acl is an error. + repeated ObjectAccessControl default_object_acl = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's lifecycle configuration. See [Lifecycle + // Management](https://developers.google.com/storage/docs/lifecycle) for more + // information. + Lifecycle lifecycle = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The creation time of the bucket. + google.protobuf.Timestamp create_time = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The bucket's [CORS](https://www.w3.org/TR/cors/) + // configuration. + repeated Cors cors = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The modification time of the bucket. + google.protobuf.Timestamp update_time = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The default value for event-based hold on newly created objects + // in this bucket. Event-based hold is a way to retain objects indefinitely + // until an event occurs, signified by the hold's release. After being + // released, such objects are subject to bucket-level retention (if any). One + // sample use case of this flag is for banks to hold loan documents for at + // least 3 years after loan is paid in full. Here, bucket-level retention is 3 + // years and the event is loan being paid in full. In this example, these + // objects are held intact for any number of years until the event has + // occurred (event-based hold on the object is released) and then 3 more years + // after that. That means retention duration of the objects begins from the + // moment event-based hold transitioned from true to false. Objects under + // event-based hold cannot be deleted, overwritten or archived until the hold + // is removed. + bool default_event_based_hold = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. User-provided labels, in key/value pairs. + map labels = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's website config, controlling how the service behaves + // when accessing bucket contents as a web site. See the [Static website + // examples](https://cloud.google.com/storage/docs/static-website) for more + // information. + Website website = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's versioning configuration. + Versioning versioning = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's logging config, which defines the destination bucket + // and name prefix (if any) for the current bucket's logs. + Logging logging = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The owner of the bucket. This is always the project team's + // owner group. + Owner owner = 19 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Encryption config for a bucket. + Encryption encryption = 20 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's billing configuration. + Billing billing = 21 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's retention policy. The retention policy enforces a + // minimum retention time for all objects contained in the bucket, based on + // their creation time. Any attempt to overwrite or delete objects younger + // than the retention period results in a `PERMISSION_DENIED` error. An + // unlocked retention policy can be modified or removed from the bucket via a + // storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. + // Attempting to remove or decrease period of a locked retention policy + // results in a `PERMISSION_DENIED` error. + RetentionPolicy retention_policy = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's IAM configuration. + IamConfig iam_config = 23 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Reserved for future use. + bool satisfies_pzs = 25 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Configuration that, if present, specifies the data placement for + // a [configurable + // dual-region](https://cloud.google.com/storage/docs/locations#location-dr). + CustomPlacementConfig custom_placement_config = 26 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's Autoclass configuration. If there is no + // configuration, the Autoclass feature is disabled and has no effect on the + // bucket. + Autoclass autoclass = 28 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's hierarchical namespace configuration. If there is no + // configuration, the hierarchical namespace feature is disabled and has + // no effect on the bucket. + HierarchicalNamespace hierarchical_namespace = 32 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + SoftDeletePolicy soft_delete_policy = 31 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's object retention configuration. Must be enabled + // before objects in the bucket might have retention configured. + ObjectRetention object_retention = 33 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The bucket's IP filter configuration. + optional IpFilter ip_filter = 38 [(google.api.field_behavior) = OPTIONAL]; +} + +// An access-control entry. +message BucketAccessControl { + // Optional. The access permission for the entity. + string role = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The ID of the access-control entry. + string id = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com` + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format is + // returned on response. + string entity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format is returned in the response. + string entity_alt = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The ID for the entity, if any. + string entity_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The `etag` of the `BucketAccessControl`. + // If included in the metadata of an update or delete request message, the + // operation operation is only performed if the etag matches that of the + // bucket's `BucketAccessControl`. + string etag = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The email address associated with the entity, if any. + string email = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The domain associated with the entity, if any. + string domain = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The project team associated with the entity, if any. + ProjectTeam project_team = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Message used to convey content being read or written, along with an optional +// checksum. +message ChecksummedData { + // Optional. The data. + bytes content = 1 [ctype = CORD, (google.api.field_behavior) = OPTIONAL]; + + // If set, the CRC32C digest of the content field. + optional fixed32 crc32c = 2; +} + +// Message used for storing full (not subrange) object checksums. +message ObjectChecksums { + // CRC32C digest of the object data. Computed by the Cloud Storage service for + // all written objects. + // If set in a WriteObjectRequest, service validates that the stored + // object matches this checksum. + optional fixed32 crc32c = 1; + + // Optional. 128 bit MD5 hash of the object data. For more information about + // using the MD5 hash, see [Data validation and change + // detection](https://cloud.google.com/storage/docs/data-validation). Not all + // objects provide an MD5 hash. For example, composite objects provide only + // crc32c hashes. This value is equivalent to running `cat object.txt | + // openssl md5 -binary` + bytes md5_hash = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The payload of a single user-defined object context. +message ObjectCustomContextPayload { + // Required. The value of the object context. + string value = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time at which the object context was created. + google.protobuf.Timestamp create_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time at which the object context was last updated. + google.protobuf.Timestamp update_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// All contexts of an object grouped by type. +message ObjectContexts { + // Optional. User-defined object contexts. + // The maximum key or value size is `256` characters. + // The maximum number of entries is `50`. + // The maximum total serialized size of all entries is `25KiB`. + map custom = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Describes the customer-supplied encryption key mechanism used to store an +// object's data at rest. +message CustomerEncryption { + // Optional. The encryption algorithm. + string encryption_algorithm = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SHA256 hash value of the encryption key. + // In raw bytes format (not base64-encoded). + bytes key_sha256_bytes = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// An object. +message Object { + // Specifies retention parameters of the object. Objects under retention + // cannot be deleted or overwritten until their retention expires. + message Retention { + // Retention mode values. + enum Mode { + // No specified mode. Object is not under retention. + MODE_UNSPECIFIED = 0; + + // Retention period might be decreased or increased. + // The Retention configuration might be removed. + // The mode might be changed to locked. + UNLOCKED = 1; + + // Retention period might be increased. + // The Retention configuration cannot be removed. + // The mode cannot be changed. + LOCKED = 2; + } + + // Optional. The mode of the Retention. + Mode mode = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The timestamp that the object needs to be retained until. + // Value cannot be set in the past or more than 100 years in the future. + google.protobuf.Timestamp retain_until_time = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Immutable. The name of this object. Nearly any sequence of unicode + // characters is valid. See + // [Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + string name = 1 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. The name of the bucket containing this object. + string bucket = 2 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; + + // Optional. The `etag` of an object. + // If included in the metadata of an update or delete request message, the + // operation is only performed if the etag matches that of the live + // object. + string etag = 27 [(google.api.field_behavior) = OPTIONAL]; + + // Immutable. The content generation of this object. Used for object + // versioning. + int64 generation = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Restore token used to differentiate deleted objects with the + // same name and generation. This field is output only, and only set for + // deleted objects in HNS buckets. + optional string restore_token = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The version of the metadata for this generation of this + // object. Used for preconditions and for detecting changes in metadata. A + // metageneration number is only meaningful in the context of a particular + // generation of a particular object. + int64 metageneration = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Storage class of the object. + string storage_class = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Content-Length of the object data in bytes, matching + // [RFC 7230 §3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2]). + int64 size = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Content-Encoding of the object data, matching + // [RFC 7231 §3.1.2.2](https://tools.ietf.org/html/rfc7231#section-3.1.2.2) + string content_encoding = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Content-Disposition of the object data, matching + // [RFC 6266](https://tools.ietf.org/html/rfc6266). + string content_disposition = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cache-Control directive for the object data, matching + // [RFC 7234 §5.2](https://tools.ietf.org/html/rfc7234#section-5.2). + // If omitted, and the object is accessible to all anonymous users, the + // default is `public, max-age=3600`. + string cache_control = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Access controls on the object. + // If `iam_config.uniform_bucket_level_access` is enabled on the parent + // bucket, requests to set, read, or modify acl is an error. + repeated ObjectAccessControl acl = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Content-Language of the object data, matching + // [RFC 7231 §3.1.3.2](https://tools.ietf.org/html/rfc7231#section-3.1.3.2). + string content_language = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. If this object is noncurrent, this is the time when the object + // became noncurrent. + google.protobuf.Timestamp delete_time = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the object was finalized. + google.protobuf.Timestamp finalize_time = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Content-Type of the object data, matching + // [RFC 7231 §3.1.1.5](https://tools.ietf.org/html/rfc7231#section-3.1.1.5). + // If an object is stored without a Content-Type, it is served as + // `application/octet-stream`. + string content_type = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The creation time of the object. + google.protobuf.Timestamp create_time = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of underlying components that make up this object. + // Components are accumulated by compose operations. + int32 component_count = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Hashes for the data part of this object. This field is used + // for output only and is silently ignored if provided in requests. The + // checksums of the complete object regardless of data range. If the object is + // downloaded in full, the client should compute one of these checksums over + // the downloaded object and compare it against the value provided here. + ObjectChecksums checksums = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The modification time of the object metadata. + // Set initially to object creation time and then updated whenever any + // metadata of the object changes. This includes changes made by a requester, + // such as modifying custom metadata, as well as changes made by Cloud Storage + // on behalf of a requester, such as changing the storage class based on an + // Object Lifecycle Configuration. + google.protobuf.Timestamp update_time = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Cloud KMS Key used to encrypt this object, if the object is + // encrypted by such a key. + string kms_key = 18 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Output only. The time at which the object's storage class was last changed. + // When the object is initially created, it is set to `time_created`. + google.protobuf.Timestamp update_storage_class_time = 19 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Whether an object is under temporary hold. While this flag is set + // to true, the object is protected against deletion and overwrites. A common + // use case of this flag is regulatory investigations where objects need to be + // retained while the investigation is ongoing. Note that unlike event-based + // hold, temporary hold does not impact retention expiration time of an + // object. + bool temporary_hold = 20 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A server-determined value that specifies the earliest time that + // the object's retention period expires. Note 1: This field is not provided + // for objects with an active event-based hold, since retention expiration is + // unknown until the hold is removed. Note 2: This value can be provided even + // when temporary hold is set (so that the user can reason about policy + // without having to first unset the temporary hold). + google.protobuf.Timestamp retention_expire_time = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. User-provided metadata, in key/value pairs. + map metadata = 22 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. User-defined or system-defined object contexts. Each object + // context is a key-payload pair, where the key provides the identification + // and the payload holds the associated value and additional metadata. + ObjectContexts contexts = 38 [(google.api.field_behavior) = OPTIONAL]; + + // Whether an object is under event-based hold. + // An event-based hold is a way to force the retention of an object until + // after some event occurs. Once the hold is released by explicitly setting + // this field to `false`, the object becomes subject to any bucket-level + // retention policy, except that the retention duration is calculated + // from the time the event based hold was lifted, rather than the time the + // object was created. + // + // In a `WriteObject` request, not setting this field implies that the value + // should be taken from the parent bucket's `default_event_based_hold` field. + // In a response, this field is always set to `true` or `false`. + optional bool event_based_hold = 23; + + // Output only. The owner of the object. This is always the uploader of the + // object. + Owner owner = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Metadata of customer-supplied encryption key, if the object is + // encrypted by such a key. + CustomerEncryption customer_encryption = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-specified timestamp set on an object. + google.protobuf.Timestamp custom_time = 26 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. This is the time when the object became soft-deleted. + // + // Soft-deleted objects are only accessible if a soft_delete_policy is + // enabled. Also see `hard_delete_time`. + optional google.protobuf.Timestamp soft_delete_time = 28 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the object is permanently deleted. + // + // Only set when an object becomes soft-deleted with a `soft_delete_policy`. + // Otherwise, the object is not accessible. + optional google.protobuf.Timestamp hard_delete_time = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Retention configuration of this object. + // Might only be configured if the bucket has object retention enabled. + Retention retention = 30 [(google.api.field_behavior) = OPTIONAL]; +} + +// An access-control entry. +message ObjectAccessControl { + // Optional. The access permission for the entity. One of the following + // values: + // * `READER` + // * `WRITER` + // * `OWNER` + string role = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The ID of the access-control entry. + string id = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com`. + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format is + // returned in the response. + string entity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format is returned in the response. + string entity_alt = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The ID for the entity, if any. + string entity_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The etag of the ObjectAccessControl. + // If included in the metadata of an update or delete request message, the + // operation is only performed if the etag matches that of the live + // object's ObjectAccessControl. + string etag = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The email address associated with the entity, if any. + string email = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The domain associated with the entity, if any. + string domain = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The project team associated with the entity, if any. + ProjectTeam project_team = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// The result of a call to Objects.ListObjects +message ListObjectsResponse { + // The list of items. + repeated Object objects = 1; + + // The list of prefixes of objects matching-but-not-listed up to and including + // the requested delimiter. + repeated string prefixes = 2; + + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + string next_page_token = 3; +} + +// Represents the Viewers, Editors, or Owners of a given project. +message ProjectTeam { + // Optional. The project number. + string project_number = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The team. + string team = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The owner of a specific resource. +message Owner { + // Optional. The entity, in the form `user-`*userId*. + string entity = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The ID for the entity. + string entity_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies a requested range of bytes to download. +message ContentRange { + // The starting offset of the object data. This value is inclusive. + int64 start = 1; + + // The ending offset of the object data. This value is exclusive. + int64 end = 2; + + // The complete length of the object data. + int64 complete_length = 3; +} diff --git a/java-storage/samples/install-without-bom/pom.xml b/java-storage/samples/install-without-bom/pom.xml new file mode 100644 index 000000000000..37be59e0e665 --- /dev/null +++ b/java-storage/samples/install-without-bom/pom.xml @@ -0,0 +1,144 @@ + + + 4.0.0 + com.google.cloud + storage-install-without-bom + jar + Google Google Cloud Storage Install Without Bom + https://github.com/googleapis/java-storage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + + + + com.google.cloud + google-cloud-storage + 2.63.0 + + + com.google.cloud + google-cloud-storage-control + 2.63.0 + + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + org.apache.httpcomponents + httpclient + 4.5.14 + test + + + org.apache.httpcomponents + httpmime + 4.5.14 + test + + + com.google.cloud + google-cloud-pubsub + 1.148.0 + test + + + com.google.cloud + google-cloud-kms + 2.89.0 + test + + + com.google.cloud + google-cloud-storage + 2.63.0 + tests + test + + + + org.slf4j + jul-to-slf4j + 2.0.17 + test + + + ch.qos.logback + logback-classic + 1.3.16 + test + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + propagators-gcp + 0.33.0-alpha + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + + diff --git a/java-storage/samples/pom.xml b/java-storage/samples/pom.xml new file mode 100644 index 000000000000..65ef20b76226 --- /dev/null +++ b/java-storage/samples/pom.xml @@ -0,0 +1,57 @@ + + + 4.0.0 + com.google.cloud + google-cloud-storage-samples + 0.0.1-SNAPSHOT + pom + Google Google Cloud Storage Samples Parent + https://github.com/googleapis/java-storage + + Java idiomatic client for Google Cloud Platform services. + + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + install-without-bom + snapshot + snippets + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.7.0 + + true + + + + + diff --git a/java-storage/samples/snapshot/pom.xml b/java-storage/samples/snapshot/pom.xml new file mode 100644 index 000000000000..14bce185c260 --- /dev/null +++ b/java-storage/samples/snapshot/pom.xml @@ -0,0 +1,136 @@ + + + 4.0.0 + com.google.cloud + storage-snapshot + jar + Google Google Cloud Storage Snapshot Samples + https://github.com/googleapis/java-storage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + + com.google.cloud + google-cloud-storage + 2.64.1-SNAPSHOT + + + com.google.cloud + google-cloud-storage-control + 2.64.1-SNAPSHOT + compile + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + org.apache.httpcomponents + httpmime + 4.5.14 + test + + + com.google.cloud + google-cloud-pubsub + 1.148.0 + test + + + com.google.cloud + google-cloud-kms + 2.89.0 + test + + + com.google.cloud + google-cloud-storage + 2.64.1-SNAPSHOT + tests + test + + + + org.slf4j + jul-to-slf4j + 2.0.17 + test + + + ch.qos.logback + logback-classic + 1.3.16 + test + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + propagators-gcp + 0.33.0-alpha + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + + diff --git a/java-storage/samples/snippets/pom.xml b/java-storage/samples/snippets/pom.xml new file mode 100644 index 000000000000..4995f8a85f59 --- /dev/null +++ b/java-storage/samples/snippets/pom.xml @@ -0,0 +1,122 @@ + + + 4.0.0 + com.google.cloud + storage-snippets + jar + Google Google Cloud Storage Snippets + https://github.com/googleapis/java-storage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + com.google.cloud + google-cloud-storage + + + com.google.cloud + google-cloud-storage-control + + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + org.apache.httpcomponents + httpclient + 4.5.14 + test + + + org.apache.httpcomponents + httpmime + 4.5.14 + test + + + com.google.cloud + google-cloud-pubsub + 1.148.0 + test + + + com.google.cloud + google-cloud-kms + 2.89.0 + test + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + propagators-gcp + 0.33.0-alpha + + + + com.google.cloud + google-cloud-storage + 2.63.0 + tests + test + + + + org.slf4j + jul-to-slf4j + 2.0.17 + test + + + ch.qos.logback + logback-classic + 1.3.16 + test + + + + + diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/ConfigureRetries.java b/java-storage/samples/snippets/src/main/java/com/example/storage/ConfigureRetries.java new file mode 100644 index 000000000000..5cdf01b1b6cc --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/ConfigureRetries.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_configure_retries] + +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRetryStrategy; +import org.threeten.bp.Duration; + +public final class ConfigureRetries { + public static void main(String[] args) { + String bucketName = "my-bucket"; + String blobName = "blob/to/delete"; + deleteBlob(bucketName, blobName); + } + + static void deleteBlob(String bucketName, String blobName) { + // Customize retry behavior + RetrySettings retrySettings = + StorageOptions.getDefaultRetrySettings().toBuilder() + // Set the max number of attempts to 10 (initial attempt plus 9 retries) + .setMaxAttempts(10) + // Set the backoff multiplier to 3.0 + .setRetryDelayMultiplier(3.0) + // Set the max duration of all attempts to 5 minutes + .setTotalTimeout(Duration.ofMinutes(5)) + .build(); + + StorageOptions alwaysRetryStorageOptions = + StorageOptions.newBuilder() + // Customize retry so all requests are retried even if they are non-idempotent. + .setStorageRetryStrategy(StorageRetryStrategy.getUniformStorageRetryStrategy()) + // provide the previously configured retrySettings + .setRetrySettings(retrySettings) + .build(); + + // Instantiate a client + Storage storage = alwaysRetryStorageOptions.getService(); + + // Delete the blob + BlobId blobId = BlobId.of(bucketName, blobName); + boolean success = storage.delete(blobId); + + System.out.printf( + "Deletion of Blob %s completed %s.%n", blobId, success ? "successfully" : "unsuccessfully"); + } +} +// [END storage_configure_retries] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/GenerateSignedPostPolicyV4.java b/java-storage/samples/snippets/src/main/java/com/example/storage/GenerateSignedPostPolicyV4.java new file mode 100644 index 000000000000..a1dd9ebaaa5f --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/GenerateSignedPostPolicyV4.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_generate_signed_post_policy_v4] + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.PostPolicyV4; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class GenerateSignedPostPolicyV4 { + /** + * Generating a signed POST policy requires Credentials which implement ServiceAccountSigner. + * These can be set explicitly using the Storage.PostPolicyV4Option.signWith(ServiceAccountSigner) + * option. If you don't, you could also pass a service account signer to StorageOptions, i.e. + * StorageOptions().newBuilder().setCredentials(ServiceAccountSignerCredentials). In this example, + * neither of these options are used, which means the following code only works when the + * credentials are defined via the environment variable GOOGLE_APPLICATION_CREDENTIALS, and those + * credentials are authorized to sign a policy. See the documentation for + * Storage.generateSignedPostPolicyV4 for more details. + */ + public static void generateSignedPostPolicyV4( + String projectId, String bucketName, String blobName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of the GCS bucket to upload to + // String bucketName = "your-bucket-name" + + // The name to give the object uploaded to GCS + // String blobName = "your-object-name" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + PostPolicyV4.PostFieldsV4 fields = + PostPolicyV4.PostFieldsV4.newBuilder().setCustomMetadataField("test", "data").build(); + + PostPolicyV4 policy = + storage.generateSignedPostPolicyV4( + BlobInfo.newBuilder(bucketName, blobName).build(), 10, TimeUnit.MINUTES, fields); + + StringBuilder htmlForm = + new StringBuilder( + "
\n"); + for (Map.Entry entry : policy.getFields().entrySet()) { + htmlForm.append( + " \n"); + } + htmlForm.append("
\n"); + htmlForm.append("
\n"); + htmlForm.append("\n"); + + System.out.println( + "You can use the following HTML form to upload an object to bucket " + + bucketName + + " for the next ten minutes:"); + System.out.println(htmlForm.toString()); + } +} +// [END storage_generate_signed_post_policy_v4] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/GetServiceAccount.java b/java-storage/samples/snippets/src/main/java/com/example/storage/GetServiceAccount.java new file mode 100644 index 000000000000..e190376a0a56 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/GetServiceAccount.java @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_get_service_account] +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class GetServiceAccount { + public static void getServiceAccount(String projectId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + ServiceAccount serviceAccount = storage.getServiceAccount(projectId); + System.out.println( + "The GCS service account for project " + projectId + " is: " + serviceAccount.getEmail()); + } +} +// [END storage_get_service_account] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcDpSample.java b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcDpSample.java new file mode 100644 index 000000000000..2ae2ffce6b5b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcDpSample.java @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_grpc_quickstart_dp] +// Imports the Google Cloud client library +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class QuickstartGrpcDpSample { + public static void main(String... args) throws Exception { + + // Create an instance of options which will use the Google Cloud Storage gRPC API for all + // operations + StorageOptions options = StorageOptions.grpc().setAttemptDirectPath(true).build(); + + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (Storage storage = options.getService()) { + // The name for the new bucket + String bucketName = args[0]; // "my-new-bucket"; + + // Creates the new bucket using a request to the gRPC API via Direct Google Access + Bucket bucket = storage.create(BucketInfo.of(bucketName)); + + System.out.printf("Bucket %s created.%n", bucket.getName()); + } + } +} +// [END storage_grpc_quickstart_dp] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcSample.java b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcSample.java new file mode 100644 index 000000000000..84c737ba97ee --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartGrpcSample.java @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_grpc_quickstart] +// Imports the Google Cloud client library +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class QuickstartGrpcSample { + public static void main(String... args) throws Exception { + + // Create an instance of options which will use the Google Cloud Storage gRPC API for all + // operations + StorageOptions options = StorageOptions.grpc().build(); + + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (Storage storage = options.getService()) { + // The name for the new bucket + String bucketName = args[0]; // "my-new-bucket"; + + // Creates the new bucket using a request to the gRPC API + Bucket bucket = storage.create(BucketInfo.of(bucketName)); + + System.out.printf("Bucket %s created.%n", bucket.getName()); + } + } +} +// [END storage_grpc_quickstart] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartOpenTelemetrySample.java b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartOpenTelemetrySample.java new file mode 100644 index 000000000000..dd062a1ded11 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartOpenTelemetrySample.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import com.google.cloud.opentelemetry.propagators.XCloudTraceContextPropagator; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.context.propagation.TextMapPropagator; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; + +// [START storage_enable_otel_tracing] +public class QuickstartOpenTelemetrySample { + public static void main(String... args) throws Exception { + SpanExporter spanExporter = TraceExporter.createWithDefaultConfiguration(); + TextMapPropagator propagators = + TextMapPropagator.composite( + W3CTraceContextPropagator.getInstance(), + new XCloudTraceContextPropagator(/* oneway= */ true)); + + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(propagators)) + .setTracerProvider( + SdkTracerProvider.builder() + // Sample Rate is set to alwaysOn + // It is recommended to sample based on a ratio for standard use ie. + // .setSampler(Sampler.traceIdRatioBased(0.2)) // sample only 20% of trace ids + .setSampler(Sampler.alwaysOn()) + .addSpanProcessor(BatchSpanProcessor.builder(spanExporter).build()) + .build()) + .build(); + StorageOptions options = StorageOptions.newBuilder().setOpenTelemetry(openTelemetry).build(); + Storage storage = options.getService(); + System.out.println("Created an instance of storage with OpenTelemetry configured"); + } +} +// [END storage_enable_otel_tracing] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartSample.java b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartSample.java new file mode 100644 index 000000000000..a5ef2b16d725 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartSample.java @@ -0,0 +1,40 @@ +/* + * Copyright 2015 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_quickstart] +// Imports the Google Cloud client library +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class QuickstartSample { + public static void main(String... args) throws Exception { + // Instantiates a client + Storage storage = StorageOptions.getDefaultInstance().getService(); + + // The name for the new bucket + String bucketName = args[0]; // "my-new-bucket"; + + // Creates the new bucket + Bucket bucket = storage.create(BucketInfo.of(bucketName)); + + System.out.printf("Bucket %s created.%n", bucket.getName()); + } +} +// [END storage_quickstart] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartStorageControlSample.java b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartStorageControlSample.java new file mode 100644 index 000000000000..51458e8c1b90 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/QuickstartStorageControlSample.java @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +// [START storage_control_quickstart_sample] +import com.google.storage.control.v2.GetStorageLayoutRequest; +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.StorageLayout; +import com.google.storage.control.v2.StorageLayoutName; + +public class QuickstartStorageControlSample { + public static void main(String... args) throws Exception { + String bucketName = args[0]; // "your-bucket-name"; + + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (StorageControlClient storageControlClient = StorageControlClient.create()) { + GetStorageLayoutRequest request = + GetStorageLayoutRequest.newBuilder() + // Set project to "_" to signify global bucket + .setName(StorageLayoutName.format("_", bucketName)) + .build(); + StorageLayout response = storageControlClient.getStorageLayout(request); + System.out.printf("Performed getStorageLayout request for %s%n", response.getName()); + } + } +} +// [END storage_control_quickstart_sample] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketDefaultOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketDefaultOwner.java new file mode 100644 index 000000000000..8e2e369fdafb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketDefaultOwner.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_add_bucket_default_owner] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class AddBucketDefaultOwner { + + public static void addBucketDefaultOwner(String bucketName, String userEmail) { + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The email of the user you wish to add as a default owner + // String userEmail = "someuser@domain.com" + + Storage storage = StorageOptions.newBuilder().build().getService(); + Bucket bucket = storage.get(bucketName); + Acl newDefaultOwner = Acl.of(new User(userEmail), Role.OWNER); + + bucket.createDefaultAcl(newDefaultOwner); + System.out.println("Added user " + userEmail + " as an owner on " + bucketName); + } +} +// [END storage_add_bucket_default_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamConditionalBinding.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamConditionalBinding.java new file mode 100644 index 000000000000..e1d08d3872fc --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamConditionalBinding.java @@ -0,0 +1,81 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_add_bucket_conditional_iam_binding] + +import com.google.cloud.Binding; +import com.google.cloud.Condition; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class AddBucketIamConditionalBinding { + /** Example of adding a conditional binding to the Bucket-level IAM */ + public static void addBucketIamConditionalBinding(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // For more information please read: + // https://cloud.google.com/storage/docs/access-control/iam + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Policy originalPolicy = + storage.getIamPolicy(bucketName, Storage.BucketSourceOption.requestedPolicyVersion(3)); + + String role = "roles/storage.objectViewer"; + String member = "group:example@google.com"; + + // Create a condition + String conditionTitle = "Title"; + String conditionDescription = "Description"; + String conditionExpression = + "resource.name.startsWith(\"projects/_/buckets/bucket-name/objects/prefix-a-\")"; + Condition.Builder conditionBuilder = Condition.newBuilder(); + conditionBuilder.setTitle(conditionTitle); + conditionBuilder.setDescription(conditionDescription); + conditionBuilder.setExpression(conditionExpression); + + // getBindingsList() returns an ImmutableList, we copy over to an ArrayList so it's mutable + List bindings = new ArrayList(originalPolicy.getBindingsList()); + + // Add condition to a binding + Binding.Builder newBindingBuilder = + Binding.newBuilder() + .setRole(role) + .setMembers(Arrays.asList(member)) + .setCondition(conditionBuilder.build()); + bindings.add(newBindingBuilder.build()); + + // Update policy with new conditional binding + Policy.Builder updatedPolicyBuilder = originalPolicy.toBuilder(); + updatedPolicyBuilder.setBindings(bindings).setVersion(3); + + storage.setIamPolicy(bucketName, updatedPolicyBuilder.build()); + + System.out.printf( + "Added %s with role %s to %s with condition %s %s %s\n", + member, role, bucketName, conditionTitle, conditionDescription, conditionExpression); + } +} +// [END storage_add_bucket_conditional_iam_binding] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamMember.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamMember.java new file mode 100644 index 000000000000..c5c86e62248e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketIamMember.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_add_bucket_iam_member] + +import com.google.cloud.Binding; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class AddBucketIamMember { + /** Example of adding a member to the Bucket-level IAM */ + public static void addBucketIamMember(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // For more information please read: + // https://cloud.google.com/storage/docs/access-control/iam + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Policy originalPolicy = + storage.getIamPolicy(bucketName, Storage.BucketSourceOption.requestedPolicyVersion(3)); + + String role = "roles/storage.objectViewer"; + String member = "group:example@google.com"; + + // getBindingsList() returns an ImmutableList and copying over to an ArrayList so it's mutable. + List bindings = new ArrayList(originalPolicy.getBindingsList()); + + // Create a new binding using role and member + Binding.Builder newMemberBindingBuilder = Binding.newBuilder(); + newMemberBindingBuilder.setRole(role).setMembers(Arrays.asList(member)); + bindings.add(newMemberBindingBuilder.build()); + + // Update policy to add member + Policy.Builder updatedPolicyBuilder = originalPolicy.toBuilder(); + updatedPolicyBuilder.setBindings(bindings).setVersion(3); + Policy updatedPolicy = storage.setIamPolicy(bucketName, updatedPolicyBuilder.build()); + + System.out.printf("Added %s with role %s to %s\n", member, role, bucketName); + } +} +// [END storage_add_bucket_iam_member] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketLabel.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketLabel.java new file mode 100644 index 000000000000..473f27021541 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketLabel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_add_bucket_label] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.HashMap; +import java.util.Map; + +public class AddBucketLabel { + public static void addBucketLabel( + String projectId, String bucketName, String labelKey, String labelValue) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The key of the label to add + // String labelKey = "label-key-to-add"; + + // The value of the label to add + // String labelValue = "label-value-to-add"; + + Map newLabels = new HashMap<>(); + newLabels.put(labelKey, labelValue); + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + Map labels = bucket.getLabels(); + if (labels != null) { + newLabels.putAll(labels); + } + bucket.toBuilder().setLabels(newLabels).build().update(); + + System.out.println( + "Added label " + labelKey + " with value " + labelValue + " to bucket " + bucketName + "."); + } +} +// [END storage_add_bucket_label] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketOwner.java new file mode 100644 index 000000000000..ddbd5bfefe28 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/AddBucketOwner.java @@ -0,0 +1,49 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_add_bucket_owner] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class AddBucketOwner { + + public static void addBucketOwner(String projectId, String bucketName, String userEmail) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // Email of the user you wish to add as an owner + // String userEmail = "someuser@domain.com" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + Acl newOwner = Acl.of(new User(userEmail), Role.OWNER); + + bucket.createAcl(newOwner); + System.out.println("Added user " + userEmail + " as an owner on " + bucketName); + } +} + +// [END storage_add_bucket_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ChangeDefaultStorageClass.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ChangeDefaultStorageClass.java new file mode 100644 index 000000000000..03c4358f6edc --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ChangeDefaultStorageClass.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_change_default_storage_class] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; + +public class ChangeDefaultStorageClass { + public static void changeDefaultStorageClass(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // See the StorageClass documentation for other valid storage classes: + // https://googleapis.dev/java/google-cloud-clients/latest/com/google/cloud/storage/StorageClass.html + StorageClass storageClass = StorageClass.COLDLINE; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket = bucket.toBuilder().setStorageClass(storageClass).build().update(); + + System.out.println( + "Default storage class for bucket " + + bucketName + + " has been set to " + + bucket.getStorageClass()); + } +} +// [END storage_change_default_storage_class] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ConfigureBucketCors.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ConfigureBucketCors.java new file mode 100644 index 000000000000..3097978ad390 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ConfigureBucketCors.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_cors_configuration] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Cors; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; + +public class ConfigureBucketCors { + public static void configureBucketCors( + String projectId, + String bucketName, + String origin, + String responseHeader, + Integer maxAgeSeconds) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The origin for this CORS config to allow requests from + // String origin = "http://example.appspot.com"; + + // The response header to share across origins + // String responseHeader = "Content-Type"; + + // The maximum amount of time the browser can make requests before it must repeat preflighted + // requests + // Integer maxAgeSeconds = 3600; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + // See the HttpMethod documentation for other HTTP methods available: + // https://cloud.google.com/appengine/docs/standard/java/javadoc/com/google/appengine/api/urlfetch/HTTPMethod + HttpMethod method = HttpMethod.GET; + + Cors cors = + Cors.newBuilder() + .setOrigins(ImmutableList.of(Cors.Origin.of(origin))) + .setMethods(ImmutableList.of(method)) + .setResponseHeaders(ImmutableList.of(responseHeader)) + .setMaxAgeSeconds(maxAgeSeconds) + .build(); + + bucket.toBuilder().setCors(ImmutableList.of(cors)).build().update(); + + System.out.println( + "Bucket " + + bucketName + + " was updated with a CORS config to allow GET requests from " + + origin + + " sharing " + + responseHeader + + " responses across origins"); + } +} +// [END storage_cors_configuration] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucket.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucket.java new file mode 100644 index 000000000000..53041f62c523 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucket.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class CreateBucket { + public static void createBucket(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Bucket bucket = storage.create(BucketInfo.newBuilder(bucketName).build()); + + System.out.println("Created bucket " + bucket.getName()); + } +} +// [END storage_create_bucket] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketDualRegion.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketDualRegion.java new file mode 100644 index 000000000000..6c5be68d45e3 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketDualRegion.java @@ -0,0 +1,80 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket_dual_region] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.CustomPlacementConfig; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.Arrays; + +public class CreateBucketDualRegion { + + public static void createBucketDualRegion( + String projectId, + String bucketName, + String location, + String firstRegion, + String secondRegion) { + // The ID of your GCP project. + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket. + // String bucketName = "your-unique-bucket-name"; + + // The location your dual regions will be located in. + // String location = "US"; + + // One of the regions the dual region bucket is to be created in. + // String firstRegion = "US-EAST1"; + + // The second region the dual region bucket is to be created in. + // String secondRegion = "US-WEST1"; + + // See this documentation for other valid locations and regions: + // https://cloud.google.com/storage/docs/locations + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + CustomPlacementConfig config = + CustomPlacementConfig.newBuilder() + .setDataLocations(Arrays.asList(firstRegion, secondRegion)) + .build(); + + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setLocation(location) + .setCustomPlacementConfig(config) + .build(); + + Bucket bucket = storage.create(bucketInfo); + + System.out.println( + "Created bucket " + + bucket.getName() + + " in location " + + bucket.getLocation() + + " with location type " + + bucket.getLocationType() + + " with Custom Placement Config " + + bucket.getCustomPlacementConfig().toString()); + } +} +// [END storage_create_bucket_dual_region] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketPubSubNotification.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketPubSubNotification.java new file mode 100644 index 000000000000..da2b3b3d1382 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketPubSubNotification.java @@ -0,0 +1,65 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket_notifications] +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.NotificationInfo.EventType; +import com.google.cloud.storage.NotificationInfo.PayloadFormat; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.Map; + +public class CreateBucketPubSubNotification { + + public static void createBucketPubSubNotification( + String bucketName, + String topicName, + Map customAttributes, + EventType[] eventTypes, + String objectNamePrefix, + PayloadFormat payloadFormat) { + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the topic you would like to create a notification for + // String topicName = "projects/{your-project}/topics/{your-topic}"; + + // Any custom attributes + // Map customAttributes = Map.of("label", "value"); + + // The object name prefix for which this notification configuration applies + // String objectNamePrefix = "blob-"; + + // Desired content of the Payload + // PayloadFormat payloadFormat = PayloadFormat.JSON_API_V1.JSON_API_V1; + + Storage storage = StorageOptions.newBuilder().build().getService(); + NotificationInfo notificationInfo = + NotificationInfo.newBuilder(topicName) + .setCustomAttributes(customAttributes) + .setEventTypes(eventTypes) + .setObjectNamePrefix(objectNamePrefix) + .setPayloadFormat(payloadFormat) + .build(); + Notification notification = storage.createNotification(bucketName, notificationInfo); + String topic = notification.getTopic(); + System.out.println("Successfully created notification for topic " + topic); + } +} +// [END storage_create_bucket_notifications] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithObjectRetention.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithObjectRetention.java new file mode 100644 index 000000000000..5854a6da2cf8 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithObjectRetention.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket_with_object_retention] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class CreateBucketWithObjectRetention { + public static void createBucketWithObjectRetention(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Bucket bucket = + storage.create( + BucketInfo.of(bucketName), Storage.BucketTargetOption.enableObjectRetention(true)); + + System.out.println( + "Created bucket " + + bucket.getName() + + " with object retention enabled setting: " + + bucket.getObjectRetention().getMode().toString()); + } +} + +// [END storage_create_bucket_with_object_retention] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithStorageClassAndLocation.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithStorageClassAndLocation.java new file mode 100644 index 000000000000..c1cb336b8112 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithStorageClassAndLocation.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket_class_location] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; + +public class CreateBucketWithStorageClassAndLocation { + public static void createBucketWithStorageClassAndLocation(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // See the StorageClass documentation for other valid storage classes: + // https://googleapis.dev/java/google-cloud-clients/latest/com/google/cloud/storage/StorageClass.html + StorageClass storageClass = StorageClass.COLDLINE; + + // See this documentation for other valid locations: + // http://g.co/cloud/storage/docs/bucket-locations#location-mr + String location = "ASIA"; + + Bucket bucket = + storage.create( + BucketInfo.newBuilder(bucketName) + .setStorageClass(storageClass) + .setLocation(location) + .build()); + + System.out.println( + "Created bucket " + + bucket.getName() + + " in " + + bucket.getLocation() + + " with storage class " + + bucket.getStorageClass()); + } +} +// [END storage_create_bucket_class_location] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithTurboReplication.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithTurboReplication.java new file mode 100644 index 000000000000..7c67b64e3da0 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/CreateBucketWithTurboReplication.java @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_create_bucket_turbo_replication] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Rpo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class CreateBucketWithTurboReplication { + public static void createBucketWithTurboReplication( + String projectId, String bucketName, String location) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The dual-region location to create your bucket in + // String location = "NAM4" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Bucket bucket = + storage.create( + BucketInfo.newBuilder(bucketName) + .setLocation(location) + .setRpo(Rpo.ASYNC_TURBO) + .build()); + + System.out.println( + "Created bucket " + + bucket.getName() + + " in " + + bucket.getLocation() + + " with RPO setting" + + bucket.getRpo()); + } +} +// [END storage_create_bucket_turbo_replication] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucket.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucket.java new file mode 100644 index 000000000000..925ca0d22964 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucket.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_delete_bucket] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DeleteBucket { + public static void deleteBucket(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of the bucket to delete + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.delete(); + + System.out.println("Bucket " + bucket.getName() + " was deleted"); + } +} +// [END storage_delete_bucket] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucketPubSubNotification.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucketPubSubNotification.java new file mode 100644 index 000000000000..dd59e2e18953 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DeleteBucketPubSubNotification.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_delete_bucket_notification] +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DeleteBucketPubSubNotification { + + public static void deleteBucketPubSubNotification(String bucketName, String notificationId) { + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The NotificationId for the notification you would like to delete + // String notificationId = "your-unique-notification-id" + + Storage storage = StorageOptions.newBuilder().build().getService(); + boolean success = storage.deleteNotification(bucketName, notificationId); + if (success) { + System.out.println("Successfully deleted notification"); + } else { + System.out.println("Failed to find notification"); + } + } +} +// [END storage_delete_bucket_notification] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableBucketVersioning.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableBucketVersioning.java new file mode 100644 index 000000000000..49547c96a2fc --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableBucketVersioning.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_versioning] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DisableBucketVersioning { + public static void disableBucketVersioning(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder().setVersioningEnabled(false).build().update(); + + System.out.println("Versioning is now disabled for bucket " + bucketName); + } +} +// [END storage_disable_versioning] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableDefaultEventBasedHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableDefaultEventBasedHold.java new file mode 100644 index 000000000000..a5529d60d853 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableDefaultEventBasedHold.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_default_event_based_hold] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class DisableDefaultEventBasedHold { + public static void disableDefaultEventBasedHold(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + storage.update( + bucket.toBuilder().setDefaultEventBasedHold(false).build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Default event-based hold was disabled for " + bucketName); + } +} +// [END storage_disable_default_event_based_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableLifecycleManagement.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableLifecycleManagement.java new file mode 100644 index 000000000000..6463358ed1d7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableLifecycleManagement.java @@ -0,0 +1,43 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_bucket_lifecycle_management] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageOptions; + +public class DisableLifecycleManagement { + public static void disableLifecycleManagement(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + storage.update( + bucket.toBuilder().deleteLifecycleRules().build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Lifecycle management was disabled for bucket " + bucketName); + } +} +// [END storage_disable_bucket_lifecycle_management] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableRequesterPays.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableRequesterPays.java new file mode 100644 index 000000000000..aaa9694c9ad7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableRequesterPays.java @@ -0,0 +1,42 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_requester_pays] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DisableRequesterPays { + public static void disableRequesterPays(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName, Storage.BucketGetOption.userProject(projectId)); + bucket.toBuilder() + .setRequesterPays(false) + .build() + .update(Storage.BucketTargetOption.userProject(projectId)); + + System.out.println("Requester pays disabled for bucket " + bucketName); + } +} +// [END storage_disable_requester_pays] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableSoftDelete.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableSoftDelete.java new file mode 100644 index 000000000000..97d657146939 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableSoftDelete.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_soft_delete] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.time.Duration; + +public class DisableSoftDelete { + public static void disableSoftDelete(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder() + .setSoftDeletePolicy( + // Setting the retention duration to 0 disables Soft Delete. + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofSeconds(0)) + .build()) + .build() + .update(); + + System.out.println("Soft delete for " + bucketName + " was disabled"); + } +} +// [END storage_disable_soft_delete] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableUniformBucketLevelAccess.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableUniformBucketLevelAccess.java new file mode 100644 index 000000000000..dc77bc99f531 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/DisableUniformBucketLevelAccess.java @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_disable_uniform_bucket_level_access] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class DisableUniformBucketLevelAccess { + public static void disableUniformBucketLevelAccess(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + + BucketInfo.IamConfiguration iamConfiguration = + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(false) + .build(); + + storage.update( + bucket.toBuilder().setIamConfiguration(iamConfiguration).build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Uniform bucket-level access was disabled for " + bucketName); + } +} +// [END storage_disable_uniform_bucket_level_access] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableBucketVersioning.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableBucketVersioning.java new file mode 100644 index 000000000000..8685bf9278e5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableBucketVersioning.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_enable_versioning] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class EnableBucketVersioning { + public static void enableBucketVersioning(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder().setVersioningEnabled(true).build().update(); + + System.out.println("Versioning is now enabled for bucket " + bucketName); + } +} +// [END storage_enable_versioning] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableDefaultEventBasedHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableDefaultEventBasedHold.java new file mode 100644 index 000000000000..0ae5ba8a68fe --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableDefaultEventBasedHold.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_enable_default_event_based_hold] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class EnableDefaultEventBasedHold { + public static void enableDefaultEventBasedHold(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + storage.update( + bucket.toBuilder().setDefaultEventBasedHold(true).build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Default event-based hold was enabled for " + bucketName); + } +} +// [END storage_enable_default_event_based_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableLifecycleManagement.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableLifecycleManagement.java new file mode 100644 index 000000000000..bf2767321e0d --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableLifecycleManagement.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_enable_bucket_lifecycle_management] +import static com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleAction; +import static com.google.cloud.storage.BucketInfo.LifecycleRule.LifecycleCondition; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo.LifecycleRule; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; + +public class EnableLifecycleManagement { + public static void enableLifecycleManagement(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + // See the LifecycleRule documentation for additional info on what you can do with lifecycle + // management rules. This one deletes objects that are over 100 days old. + // https://googleapis.dev/java/google-cloud-clients/latest/com/google/cloud/storage/BucketInfo.LifecycleRule.html + bucket.toBuilder() + .setLifecycleRules( + ImmutableList.of( + new LifecycleRule( + LifecycleAction.newDeleteAction(), + LifecycleCondition.newBuilder().setAge(100).build()))) + .build() + .update(); + + System.out.println("Lifecycle management was enabled and configured for bucket " + bucketName); + } +} +// [END storage_enable_bucket_lifecycle_management] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableRequesterPays.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableRequesterPays.java new file mode 100644 index 000000000000..089cb2e54930 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableRequesterPays.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_enable_requester_pays] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class EnableRequesterPays { + public static void enableRequesterPays(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder().setRequesterPays(true).build().update(); + + System.out.println("Requester pays enabled for bucket " + bucketName); + } +} +// [END storage_enable_requester_pays] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableUniformBucketLevelAccess.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableUniformBucketLevelAccess.java new file mode 100644 index 000000000000..a8ae606bd0b4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/EnableUniformBucketLevelAccess.java @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_enable_uniform_bucket_level_access] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class EnableUniformBucketLevelAccess { + public static void enableUniformBucketLevelAccess(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + + BucketInfo.IamConfiguration iamConfiguration = + BucketInfo.IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build(); + + storage.update( + bucket.toBuilder() + .setIamConfiguration(iamConfiguration) + .setAcl(null) + .setDefaultAcl(null) + .build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Uniform bucket-level access was enabled for " + bucketName); + } +} +// [END storage_enable_uniform_bucket_level_access] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketAutoclass.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketAutoclass.java new file mode 100644 index 000000000000..77430b03d1f5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketAutoclass.java @@ -0,0 +1,55 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_autoclass] + +import com.google.cloud.storage.BucketInfo.Autoclass; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; +import java.time.OffsetDateTime; + +public class GetBucketAutoclass { + public static void getBucketAutoclass(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Autoclass autoclass = storage.get(bucketName).getAutoclass(); + String status = autoclass.getEnabled() ? "enabled" : "disabled"; + String toggleTime = autoclass.getToggleTime().toString(); + StorageClass terminalStorageClass = autoclass.getTerminalStorageClass(); + OffsetDateTime terminalStorageClassUpdateTime = autoclass.getTerminalStorageClassUpdateTime(); + + System.out.println( + "Autoclass is currently " + + status + + " for bucket " + + bucketName + + " and was last changed at " + + toggleTime + + ". The terminal storage class is set to be " + + terminalStorageClass.name() + + " last updated at " + + terminalStorageClassUpdateTime.toString()); + } +} +// [END storage_get_autoclass] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketMetadata.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketMetadata.java new file mode 100644 index 000000000000..742120f85daf --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketMetadata.java @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_bucket_metadata] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.Map; + +public class GetBucketMetadata { + public static void getBucketMetadata(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Select all fields. Fields can be selected individually e.g. Storage.BucketField.NAME + Bucket bucket = + storage.get(bucketName, Storage.BucketGetOption.fields(Storage.BucketField.values())); + + // Print bucket metadata + System.out.println("BucketName: " + bucket.getName()); + System.out.println("DefaultEventBasedHold: " + bucket.getDefaultEventBasedHold()); + System.out.println("DefaultKmsKeyName: " + bucket.getDefaultKmsKeyName()); + System.out.println("Id: " + bucket.getGeneratedId()); + System.out.println("IndexPage: " + bucket.getIndexPage()); + System.out.println("Location: " + bucket.getLocation()); + System.out.println("LocationType: " + bucket.getLocationType()); + System.out.println("Metageneration: " + bucket.getMetageneration()); + System.out.println("NotFoundPage: " + bucket.getNotFoundPage()); + System.out.println("RetentionEffectiveTime: " + bucket.getRetentionEffectiveTime()); + System.out.println("RetentionPeriod: " + bucket.getRetentionPeriod()); + System.out.println("RetentionPolicyIsLocked: " + bucket.retentionPolicyIsLocked()); + System.out.println("RequesterPays: " + bucket.requesterPays()); + System.out.println("SelfLink: " + bucket.getSelfLink()); + System.out.println("StorageClass: " + bucket.getStorageClass().name()); + System.out.println("TimeCreated: " + bucket.getCreateTime()); + System.out.println("VersioningEnabled: " + bucket.versioningEnabled()); + System.out.println("ObjectRetention: " + bucket.getObjectRetention()); + if (bucket.getLabels() != null) { + System.out.println("\n\n\nLabels:"); + for (Map.Entry label : bucket.getLabels().entrySet()) { + System.out.println(label.getKey() + "=" + label.getValue()); + } + } + if (bucket.getLifecycleRules() != null) { + System.out.println("\n\n\nLifecycle Rules:"); + for (BucketInfo.LifecycleRule rule : bucket.getLifecycleRules()) { + System.out.println(rule); + } + } + } +} +// [END storage_get_bucket_metadata] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketRpo.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketRpo.java new file mode 100644 index 000000000000..a0bc61681df1 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetBucketRpo.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_rpo] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class GetBucketRpo { + public static void getBucketRpo(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + String rpo = bucket.getRpo().toString(); + + System.out.println("The RPO setting of bucket " + bucketName + " is " + rpo); + } +} +// [END storage_get_rpo] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetDefaultEventBasedHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetDefaultEventBasedHold.java new file mode 100644 index 000000000000..7bf993384310 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetDefaultEventBasedHold.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_default_event_based_hold] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class GetDefaultEventBasedHold { + public static void getDefaultEventBasedHold(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get( + bucketName, + Storage.BucketGetOption.fields(Storage.BucketField.DEFAULT_EVENT_BASED_HOLD)); + + if (bucket.getDefaultEventBasedHold() != null && bucket.getDefaultEventBasedHold()) { + System.out.println("Default event-based hold is enabled for " + bucketName); + } else { + System.out.println("Default event-based hold is not enabled for " + bucketName); + } + } +} +// [END storage_get_default_event_based_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetPublicAccessPrevention.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetPublicAccessPrevention.java new file mode 100644 index 000000000000..204faf0f46a3 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetPublicAccessPrevention.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_public_access_prevention] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class GetPublicAccessPrevention { + public static void getPublicAccessPrevention(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + // Gets Bucket Metadata and prints publicAccessPrevention value (either 'inherited' or + // 'enforced'). + BucketInfo.PublicAccessPrevention publicAccessPrevention = + bucket.getIamConfiguration().getPublicAccessPrevention(); + + System.out.println( + "Public access prevention is set to " + + publicAccessPrevention.getValue() + + " for " + + bucketName); + } +} +// [END storage_get_public_access_prevention] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRequesterPaysStatus.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRequesterPaysStatus.java new file mode 100644 index 000000000000..5665cf91066e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRequesterPaysStatus.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_requester_pays_status] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class GetRequesterPaysStatus { + public static void getRequesterPaysStatus(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get(bucketName, Storage.BucketGetOption.fields(Storage.BucketField.BILLING)); + + System.out.println("Requester pays status : " + bucket.requesterPays()); + } +} +// [END storage_get_requester_pays_status] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRetentionPolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRetentionPolicy.java new file mode 100644 index 000000000000..09938efd088e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetRetentionPolicy.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_retention_policy] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class GetRetentionPolicy { + public static void getRetentionPolicy(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get( + bucketName, Storage.BucketGetOption.fields(Storage.BucketField.RETENTION_POLICY)); + + System.out.println("Retention Policy for " + bucketName); + System.out.println("Retention Period: " + bucket.getRetentionPeriod()); + if (bucket.retentionPolicyIsLocked() != null && bucket.retentionPolicyIsLocked()) { + System.out.println("Retention Policy is locked"); + } + if (bucket.getRetentionEffectiveTime() != null) { + System.out.println("Effective Time: " + new Date(bucket.getRetentionEffectiveTime())); + } + } +} +// [END storage_get_retention_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetSoftDeletePolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetSoftDeletePolicy.java new file mode 100644 index 000000000000..32f4277a050f --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetSoftDeletePolicy.java @@ -0,0 +1,44 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_soft_delete_policy] +import com.google.cloud.storage.BucketInfo.SoftDeletePolicy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.time.Duration; + +public class GetSoftDeletePolicy { + public static void getSoftDeletePolicy(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + SoftDeletePolicy policy = storage.get(bucketName).getSoftDeletePolicy(); + + if (Duration.ofSeconds(0).equals(policy.getRetentionDuration())) { + System.out.println("Soft delete is disabled for " + bucketName); + } else { + System.out.println("The soft delete policy for " + bucketName + " is:"); + System.out.println(policy); + } + } +} +// [END storage_get_soft_delete_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetUniformBucketLevelAccess.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetUniformBucketLevelAccess.java new file mode 100644 index 000000000000..4e596887d93d --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/GetUniformBucketLevelAccess.java @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_get_uniform_bucket_level_access] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class GetUniformBucketLevelAccess { + public static void getUniformBucketLevelAccess(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get( + bucketName, Storage.BucketGetOption.fields(Storage.BucketField.IAMCONFIGURATION)); + BucketInfo.IamConfiguration iamConfiguration = bucket.getIamConfiguration(); + + Boolean enabled = iamConfiguration.isUniformBucketLevelAccessEnabled(); + Date lockedTime = new Date(iamConfiguration.getUniformBucketLevelAccessLockedTime()); + + if (enabled != null && enabled) { + System.out.println("Uniform bucket-level access is enabled for " + bucketName); + System.out.println("Bucket will be locked on " + lockedTime); + } else { + System.out.println("Uniform bucket-level access is disabled for " + bucketName); + } + } +} +// [END storage_get_uniform_bucket_level_access] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketIamMembers.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketIamMembers.java new file mode 100644 index 000000000000..4bbf3704b25a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketIamMembers.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_view_bucket_iam_members] +import com.google.cloud.Binding; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListBucketIamMembers { + public static void listBucketIamMembers(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // For more information please read: + // https://cloud.google.com/storage/docs/access-control/iam + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Policy policy = + storage.getIamPolicy(bucketName, Storage.BucketSourceOption.requestedPolicyVersion(3)); + + // Print binding information + for (Binding binding : policy.getBindingsList()) { + System.out.printf("Role: %s Members: %s\n", binding.getRole(), binding.getMembers()); + + // Print condition if one is set + boolean bindingIsConditional = binding.getCondition() != null; + if (bindingIsConditional) { + System.out.printf("Condition Title: %s\n", binding.getCondition().getTitle()); + System.out.printf("Condition Description: %s\n", binding.getCondition().getDescription()); + System.out.printf("Condition Expression: %s\n", binding.getCondition().getExpression()); + } + } + } +} +// [END storage_view_bucket_iam_members] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBuckets.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBuckets.java new file mode 100644 index 000000000000..cd6237cb14d9 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBuckets.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_list_buckets] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListBuckets { + public static void listBuckets(String projectId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Page buckets = storage.list(); + + for (Bucket bucket : buckets.iterateAll()) { + System.out.println(bucket.getName()); + } + } +} +// [END storage_list_buckets] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketsWithPartialSuccess.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketsWithPartialSuccess.java new file mode 100644 index 000000000000..53ed4440e8e4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListBucketsWithPartialSuccess.java @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_list_buckets_partial_success] + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListBucketsWithPartialSuccess { + public static void listBucketsWithPartialSuccess(String projectId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Page buckets = storage.list(Storage.BucketListOption.returnPartialSuccess(true)); + + // Retrieve the list of buckets that are unreachable due to issues like regional outages or + // permission issues + System.out.println("Unreachable buckets: \n"); + for (Bucket bucket : buckets.iterateAll()) { + if (Boolean.TRUE.equals(bucket.isUnreachable())) { + System.out.println(bucket.getName()); + } + } + } +} +// [END storage_list_buckets_partial_success] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListPubSubNotifications.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListPubSubNotifications.java new file mode 100644 index 000000000000..b3fb614f8b7e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/ListPubSubNotifications.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_list_bucket_notifications] +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.List; + +public class ListPubSubNotifications { + + public static void listPubSubNotifications(String bucketName) { + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().build().getService(); + List notificationList = storage.listNotifications(bucketName); + for (Notification notification : notificationList) { + System.out.println( + "Found notification " + notification.getTopic() + " for bucket " + bucketName); + } + } +} +// [END storage_list_bucket_notifications] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/LockRetentionPolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/LockRetentionPolicy.java new file mode 100644 index 000000000000..fd339f4e3ad7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/LockRetentionPolicy.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_lock_retention_policy] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class LockRetentionPolicy { + public static void lockRetentionPolicy(String projectId, String bucketName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get(bucketName, Storage.BucketGetOption.fields(Storage.BucketField.METAGENERATION)); + Bucket lockedBucket = + bucket.lockRetentionPolicy(Storage.BucketTargetOption.metagenerationMatch()); + + System.out.println("Retention period for " + bucketName + " is now locked"); + System.out.println( + "Retention policy effective as of " + new Date(lockedBucket.getRetentionEffectiveTime())); + } +} +// [END storage_lock_retention_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/MakeBucketPublic.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/MakeBucketPublic.java new file mode 100644 index 000000000000..09e9b3207436 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/MakeBucketPublic.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_bucket_public_iam] +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRoles; + +public class MakeBucketPublic { + public static void makeBucketPublic(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Policy originalPolicy = storage.getIamPolicy(bucketName); + storage.setIamPolicy( + bucketName, + originalPolicy.toBuilder() + .addIdentity(StorageRoles.objectViewer(), Identity.allUsers()) // All users can view + .build()); + + System.out.println("Bucket " + bucketName + " is now publicly readable"); + } +} +// [END storage_set_bucket_public_iam] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAcl.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAcl.java new file mode 100644 index 000000000000..57974f7a5f04 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAcl.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_print_bucket_acl] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.List; + +public class PrintBucketAcl { + + public static void printBucketAcl(String projectId, String bucketName) { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + List bucketAcls = bucket.getAcl(); + + for (Acl acl : bucketAcls) { + + // This will give you the role. + // See https://cloud.google.com/storage/docs/access-control/lists#permissions + String role = acl.getRole().name(); + + // This will give you the Entity type (i.e. User, Group, Project etc.) + // See https://cloud.google.com/storage/docs/access-control/lists#scopes + String entityType = acl.getEntity().getType().name(); + + System.out.printf("%s: %s \n", role, entityType); + } + } +} +// [END storage_print_bucket_acl] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAclFilterByUser.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAclFilterByUser.java new file mode 100644 index 000000000000..68690299e576 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintBucketAclFilterByUser.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_print_bucket_acl_for_user] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class PrintBucketAclFilterByUser { + + public static void printBucketAclFilterByUser(String bucketName, String userEmail) { + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The email of the user whose acl is being retrieved. + // String userEmail = "someuser@domain.com" + + Storage storage = StorageOptions.newBuilder().build().getService(); + Bucket bucket = storage.get(bucketName); + + Acl userAcl = bucket.getAcl(new User(userEmail)); + String userRole = userAcl.getRole().name(); + System.out.println("User " + userEmail + " has role " + userRole); + } +} + +// [END storage_print_bucket_acl_for_user] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintPubSubNotification.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintPubSubNotification.java new file mode 100644 index 000000000000..083a0b551a4c --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/PrintPubSubNotification.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_print_pubsub_bucket_notification] +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class PrintPubSubNotification { + + public static void printPubSubNotification(String bucketName, String notificationId) { + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The Pub/Sub topic you would like to find + // String notificationId = "your-unique-notification-id" + + Storage storage = StorageOptions.newBuilder().build().getService(); + Notification notification = storage.getNotification(bucketName, notificationId); + System.out.println( + "Found notification " + notification.getTopic() + " for bucket " + bucketName); + } +} +// [END storage_print_pubsub_bucket_notification] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketCors.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketCors.java new file mode 100644 index 000000000000..220a24cb4e26 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketCors.java @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_cors_configuration] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Cors; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.ArrayList; +import java.util.List; + +public class RemoveBucketCors { + public static void removeBucketCors(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = + storage.get(bucketName, Storage.BucketGetOption.fields(Storage.BucketField.CORS)); + + // getCors() returns the List and copying over to an ArrayList so it's mutable. + List cors = new ArrayList<>(bucket.getCors()); + + // Clear bucket CORS configuration. + cors.clear(); + + // Update bucket to remove CORS. + bucket.toBuilder().setCors(cors).build().update(); + System.out.println("Removed CORS configuration from bucket " + bucketName); + } +} +// [END storage_remove_cors_configuration] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultKmsKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultKmsKey.java new file mode 100644 index 000000000000..26c074750723 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultKmsKey.java @@ -0,0 +1,43 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_bucket_delete_default_kms_key] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageOptions; + +public class RemoveBucketDefaultKmsKey { + public static void removeBucketDefaultKmsKey(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + storage.update( + bucket.toBuilder().setDefaultKmsKeyName(null).build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println("Default KMS key was removed from " + bucketName); + } +} +// [END storage_bucket_delete_default_kms_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultOwner.java new file mode 100644 index 000000000000..f3e342b284dd --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketDefaultOwner.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_bucket_default_owner] + +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class RemoveBucketDefaultOwner { + + public static void removeBucketDefaultOwner(String bucketName, String userEmail) { + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The email of the user you wish to remove as a default owner + // String userEmail = "someuser@domain.com" + + Storage storage = StorageOptions.newBuilder().build().getService(); + Bucket bucket = storage.get(bucketName); + User userToRemove = new User(userEmail); + + boolean success = bucket.deleteDefaultAcl(userToRemove); + if (success) { + System.out.println("Removed user " + userEmail + " as an owner on " + bucketName); + } else { + System.out.println("User " + userEmail + " was not found"); + } + } +} +// [END storage_remove_bucket_default_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamConditionalBinding.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamConditionalBinding.java new file mode 100644 index 000000000000..71ea948177b7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamConditionalBinding.java @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_bucket_conditional_iam_binding] + +import com.google.cloud.Binding; +import com.google.cloud.Condition; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +public class RemoveBucketIamConditionalBinding { + /** Example of removing a conditional binding to the Bucket-level IAM */ + public static void removeBucketIamConditionalBinding(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // For more information please read: + // https://cloud.google.com/storage/docs/access-control/iam + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Policy originalPolicy = + storage.getIamPolicy(bucketName, Storage.BucketSourceOption.requestedPolicyVersion(3)); + + String role = "roles/storage.objectViewer"; + + // getBindingsList() returns an ImmutableList and copying over to an ArrayList so it's mutable. + List bindings = new ArrayList(originalPolicy.getBindingsList()); + + // Create a condition to compare against + Condition.Builder conditionBuilder = Condition.newBuilder(); + conditionBuilder.setTitle("Title"); + conditionBuilder.setDescription("Description"); + conditionBuilder.setExpression( + "resource.name.startsWith(\"projects/_/buckets/bucket-name/objects/prefix-a-\")"); + + Iterator iterator = bindings.iterator(); + while (iterator.hasNext()) { + Binding binding = (Binding) iterator.next(); + boolean foundRole = binding.getRole().equals(role); + boolean conditionsEqual = conditionBuilder.build().equals(binding.getCondition()); + + // Remove condition when the role and condition are equal + if (foundRole && conditionsEqual) { + iterator.remove(); + break; + } + } + + // Update policy to remove conditional binding + Policy.Builder updatedPolicyBuilder = originalPolicy.toBuilder(); + updatedPolicyBuilder.setBindings(bindings).setVersion(3); + Policy updatedPolicy = storage.setIamPolicy(bucketName, updatedPolicyBuilder.build()); + + System.out.println("Conditional Binding was removed."); + } +} +// [END storage_remove_bucket_conditional_iam_binding] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamMember.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamMember.java new file mode 100644 index 000000000000..026b3ab09427 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketIamMember.java @@ -0,0 +1,70 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_bucket_iam_member] + +import com.google.cloud.Binding; +import com.google.cloud.Policy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.ArrayList; +import java.util.List; + +public class RemoveBucketIamMember { + public static void removeBucketIamMember(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // For more information please read: + // https://cloud.google.com/storage/docs/access-control/iam + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Policy originalPolicy = + storage.getIamPolicy(bucketName, Storage.BucketSourceOption.requestedPolicyVersion(3)); + + String role = "roles/storage.objectViewer"; + String member = "group:example@google.com"; + + // getBindingsList() returns an ImmutableList and copying over to an ArrayList so it's mutable. + List bindings = new ArrayList(originalPolicy.getBindingsList()); + + // Remove role-member binding without a condition. + for (int index = 0; index < bindings.size(); index++) { + Binding binding = bindings.get(index); + boolean foundRole = binding.getRole().equals(role); + boolean foundMember = binding.getMembers().contains(member); + boolean bindingIsNotConditional = binding.getCondition() == null; + + if (foundRole && foundMember && bindingIsNotConditional) { + bindings.set(index, binding.toBuilder().removeMembers(member).build()); + break; + } + } + + // Update policy to remove member + Policy.Builder updatedPolicyBuilder = originalPolicy.toBuilder(); + updatedPolicyBuilder.setBindings(bindings).setVersion(3); + Policy updatedPolicy = storage.setIamPolicy(bucketName, updatedPolicyBuilder.build()); + + System.out.printf("Removed %s with role %s from %s\n", member, role, bucketName); + } +} +// [END storage_remove_bucket_iam_member] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketLabel.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketLabel.java new file mode 100644 index 000000000000..1e63ba76d348 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketLabel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_bucket_label] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.HashMap; +import java.util.Map; + +public class RemoveBucketLabel { + public static void removeBucketLabel(String projectId, String bucketName, String labelKey) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The key of the label to remove from the bucket + // String labelKey = "label-key-to-remove"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Map labelsToRemove = new HashMap<>(); + labelsToRemove.put(labelKey, null); + + Bucket bucket = storage.get(bucketName); + Map labels; + if (bucket.getLabels() == null) { + labels = new HashMap<>(); + } else { + labels = new HashMap(bucket.getLabels()); + } + labels.putAll(labelsToRemove); + bucket.toBuilder().setLabels(labels).build().update(); + + System.out.println("Removed label " + labelKey + " from bucket " + bucketName); + } +} +// [END storage_remove_bucket_label] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketOwner.java new file mode 100644 index 000000000000..07334b78d50d --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveBucketOwner.java @@ -0,0 +1,52 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_bucket_owner] + +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class RemoveBucketOwner { + + public static void removeBucketOwner(String projectId, String bucketName, String userEmail) { + + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // Email of the user you wish to remove as an owner + // String userEmail = "someuser@domain.com" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + User ownerToRemove = new User(userEmail); + + boolean success = bucket.deleteAcl(ownerToRemove); + if (success) { + System.out.println("Removed user " + userEmail + " as an owner on " + bucketName); + } else { + System.out.println("User " + userEmail + " was not found"); + } + } +} + +// [END storage_remove_bucket_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveRetentionPolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveRetentionPolicy.java new file mode 100644 index 000000000000..8ad4875b29f6 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/RemoveRetentionPolicy.java @@ -0,0 +1,50 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_remove_retention_policy] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class RemoveRetentionPolicy { + public static void removeRetentionPolicy(String projectId, String bucketName) + throws StorageException, IllegalArgumentException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Bucket bucket = + storage.get( + bucketName, Storage.BucketGetOption.fields(Storage.BucketField.RETENTION_POLICY)); + if (bucket.retentionPolicyIsLocked() != null && bucket.retentionPolicyIsLocked()) { + throw new IllegalArgumentException( + "Unable to remove retention policy as retention policy is locked."); + } + + bucket.toBuilder().setRetentionPeriod(null).build().update(); + + System.out.println("Retention policy for " + bucketName + " has been removed"); + } +} +// [END storage_remove_retention_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetAsyncTurboRpo.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetAsyncTurboRpo.java new file mode 100644 index 000000000000..137e940b26f1 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetAsyncTurboRpo.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_rpo_async_turbo] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Rpo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetAsyncTurboRpo { + public static void setAsyncTurboRpo(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + bucket.toBuilder().setRpo(Rpo.ASYNC_TURBO).build().update(); + + System.out.println("Turbo replication was enabled for " + bucketName); + } +} +// [END storage_set_rpo_async_turbo] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketAutoclass.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketAutoclass.java new file mode 100644 index 000000000000..395acc023f3b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketAutoclass.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_autoclass] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo.Autoclass; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; + +public class SetBucketAutoclass { + public static void setBucketAutoclass( + String projectId, String bucketName, StorageClass storageClass) throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The storage class that objects in an Autoclass bucket eventually transition to if not read + // for a certain length of time + // StorageClass storageClass = StorageClass.ARCHIVE; + + // Configure the Autoclass setting for a bucket. + + // Note: terminal_storage_class field is optional and defaults to NEARLINE if not otherwise + // specified. Valid terminal_storage_class values are NEARLINE and ARCHIVE. + boolean enabled = true; + + try (Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).build().getService()) { + Bucket bucket = storage.get(bucketName); + + Bucket toUpdate = + bucket.toBuilder() + .setAutoclass( + Autoclass.newBuilder() + .setEnabled(enabled) + .setTerminalStorageClass(storageClass) + .build()) + .build(); + + Bucket updated = storage.update(toUpdate, BucketTargetOption.metagenerationMatch()); + + System.out.println( + "Autoclass for bucket " + + bucketName + + " was " + + (updated.getAutoclass().getEnabled() ? "enabled." : "disabled.")); + System.out.println( + "Autoclass terminal storage class is " + + updated.getAutoclass().getTerminalStorageClass().toString()); + } + } +} +// [END storage_set_autoclass] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketDefaultKmsKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketDefaultKmsKey.java new file mode 100644 index 000000000000..995001dbb111 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketDefaultKmsKey.java @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_bucket_default_kms_key] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class SetBucketDefaultKmsKey { + public static void setBucketDefaultKmsKey(String projectId, String bucketName, String kmsKeyName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the KMS key to use as a default + // String kmsKeyName = + // "projects/your-project-id/locations/us/keyRings/my_key_ring/cryptoKeys/my_key" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + // first look up the bucket, so we will have its metageneration + Bucket bucket = storage.get(bucketName); + + Bucket updated = + storage.update( + bucket.toBuilder().setDefaultKmsKeyName(kmsKeyName).build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println( + "KMS Key " + + updated.getDefaultKmsKeyName() + + "was set to default for bucket " + + bucketName); + } +} +// [END storage_set_bucket_default_kms_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketWebsiteInfo.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketWebsiteInfo.java new file mode 100644 index 000000000000..b9e1afd41635 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetBucketWebsiteInfo.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_define_bucket_website_configuration] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetBucketWebsiteInfo { + public static void setBucketWesbiteInfo( + String projectId, String bucketName, String indexPage, String notFoundPage) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your static website bucket + // String bucketName = "www.example.com"; + + // The index page for a static website bucket + // String indexPage = "index.html"; + + // The 404 page for a static website bucket + // String notFoundPage = "404.html"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder().setIndexPage(indexPage).setNotFoundPage(notFoundPage).build().update(); + + System.out.println( + "Static website bucket " + + bucketName + + " is set up to use " + + indexPage + + " as the index page and " + + notFoundPage + + " as the 404 page"); + } +} +// [END storage_define_bucket_website_configuration] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetClientEndpoint.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetClientEndpoint.java new file mode 100644 index 000000000000..9fdf7b976616 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetClientEndpoint.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_client_endpoint] + +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetClientEndpoint { + + public static void setClientEndpoint(String projectId, String endpoint) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The endpoint you wish to target + // String endpoint = "https://storage.googleapis.com" + + Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).setHost(endpoint).build().getService(); + + System.out.println( + "Storage Client initialized with endpoint " + storage.getOptions().getHost()); + } +} + +// [END storage_set_client_endpoint] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetDefaultRpo.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetDefaultRpo.java new file mode 100644 index 000000000000..c135e0436f34 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetDefaultRpo.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_rpo_default] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Rpo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetDefaultRpo { + public static void setDefaultRpo(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + bucket.toBuilder().setRpo(Rpo.DEFAULT).build().update(); + + System.out.println("Replication was set to default for " + bucketName); + } +} +// [END storage_set_rpo_default] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionEnforced.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionEnforced.java new file mode 100644 index 000000000000..c959dce21030 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionEnforced.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_public_access_prevention_enforced] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetPublicAccessPreventionEnforced { + public static void setPublicAccessPreventionEnforced(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + // Enforces public access prevention for the bucket + bucket.toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.ENFORCED) + .build()) + .build() + .update(); + + System.out.println("Public access prevention is set to enforced for " + bucketName); + } +} +// [END storage_set_public_access_prevention_enforced] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionInherited.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionInherited.java new file mode 100644 index 000000000000..0208f7082428 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetPublicAccessPreventionInherited.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_public_access_prevention_inherited] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class SetPublicAccessPreventionInherited { + public static void setPublicAccessPreventionInherited(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + + // Sets public access prevention to 'inherited' for the bucket + bucket.toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.INHERITED) + .build()) + .build() + .update(); + + System.out.println("Public access prevention is set to 'inherited' for " + bucketName); + } +} +// [END storage_set_public_access_prevention_inherited] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetRetentionPolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetRetentionPolicy.java new file mode 100644 index 000000000000..4491ed389781 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetRetentionPolicy.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_retention_policy] + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.time.Duration; + +public class SetRetentionPolicy { + public static void setRetentionPolicy( + String projectId, String bucketName, Long retentionPeriodSeconds) throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The retention period for objects in bucket + // Long retentionPeriodSeconds = 3600L; // 1 hour in seconds + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // first look up the bucket so we will have its metageneration + Bucket bucket = storage.get(bucketName); + Bucket bucketWithRetentionPolicy = + storage.update( + bucket.toBuilder() + .setRetentionPeriodDuration(Duration.ofSeconds(retentionPeriodSeconds)) + .build(), + BucketTargetOption.metagenerationMatch()); + + System.out.println( + "Retention period for " + + bucketName + + " is now " + + bucketWithRetentionPolicy.getRetentionPeriodDuration()); + } +} +// [END storage_set_retention_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetSoftDeletePolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetSoftDeletePolicy.java new file mode 100644 index 000000000000..e923cb2faa21 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/bucket/SetSoftDeletePolicy.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +// [START storage_set_soft_delete_policy] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.time.Duration; + +public class SetSoftDeletePolicy { + public static void setSoftDeletePolicy(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + bucket.toBuilder() + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(10)) + .build()) + .build() + .update(); + + System.out.println( + "Soft delete policy for " + bucketName + " was set to a 10-day retention period"); + } +} +// [END storage_set_soft_delete_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheCreate.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheCreate.java new file mode 100644 index 000000000000..496b4e38a677 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheCreate.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_create_anywhere_cache] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateAnywhereCacheMetadata; +import com.google.storage.control.v2.CreateAnywhereCacheRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public final class AnywhereCacheCreate { + + public static void anywhereCacheCreate(String bucketName, String cacheName, String zoneName) + throws InterruptedException, ExecutionException, IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + CreateAnywhereCacheRequest request = + CreateAnywhereCacheRequest.newBuilder() + // Set project to "_" to signify globally scoped bucket + .setParent(BucketName.format("_", bucketName)) + .setAnywhereCache( + AnywhereCache.newBuilder().setName(cacheName).setZone(zoneName).build()) + .build(); + + // Start a long-running operation (LRO). + OperationFuture operation = + storageControl.createAnywhereCacheAsync(request); + + // Await the LROs completion. + AnywhereCache anywhereCache = operation.get(); + System.out.printf("Created anywhere cache: %s%n", anywhereCache.getName()); + } + } +} +// [END storage_control_create_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheDisable.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheDisable.java new file mode 100644 index 000000000000..272efc91e50e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheDisable.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_disable_anywhere_cache] + +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.DisableAnywhereCacheRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class AnywhereCacheDisable { + + public static void anywhereCacheDisable(String cacheName) throws IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + DisableAnywhereCacheRequest request = + DisableAnywhereCacheRequest.newBuilder().setName(cacheName).build(); + + AnywhereCache anywhereCache = storageControl.disableAnywhereCache(request); + + System.out.printf("Disabled anywhere cache: %s%n", anywhereCache.getName()); + } + } +} +// [END storage_control_disable_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheGet.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheGet.java new file mode 100644 index 000000000000..917dd3f2c16a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheGet.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_get_anywhere_cache] + +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.GetAnywhereCacheRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class AnywhereCacheGet { + + public static void anywhereCacheGet(String cacheName) throws IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + GetAnywhereCacheRequest request = + GetAnywhereCacheRequest.newBuilder().setName(cacheName).build(); + + AnywhereCache anywhereCache = storageControl.getAnywhereCache(request); + + System.out.printf("Got anywhere cache: %s%n", anywhereCache.getName()); + } + } +} +// [END storage_control_get_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheList.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheList.java new file mode 100644 index 000000000000..70c0f1271df3 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheList.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_list_anywhere_caches] + +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.ListAnywhereCachesRequest; +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.StorageControlClient.ListAnywhereCachesPagedResponse; +import java.io.IOException; + +public final class AnywhereCacheList { + + public static void anywhereCacheList(String bucketName) throws IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + ListAnywhereCachesRequest request = + ListAnywhereCachesRequest.newBuilder() + .setParent(BucketName.format("_", bucketName)) + .build(); + + ListAnywhereCachesPagedResponse page = storageControl.listAnywhereCaches(request); + for (AnywhereCache anywhereCache : page.iterateAll()) { + System.out.println(anywhereCache.getName()); + } + } + } +} +// [END storage_control_list_anywhere_caches] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCachePause.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCachePause.java new file mode 100644 index 000000000000..bf4a20d75fa9 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCachePause.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_pause_anywhere_cache] + +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.PauseAnywhereCacheRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class AnywhereCachePause { + + public static void anywhereCachePause(String cacheName) throws IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + PauseAnywhereCacheRequest request = + PauseAnywhereCacheRequest.newBuilder().setName(cacheName).build(); + + AnywhereCache anywhereCache = storageControl.pauseAnywhereCache(request); + + System.out.printf("Paused anywhere cache: %s%n", anywhereCache.getName()); + } + } +} +// [END storage_control_pause_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheResume.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheResume.java new file mode 100644 index 000000000000..db02a6f3d9ce --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheResume.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_resume_anywhere_cache] + +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.ResumeAnywhereCacheRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class AnywhereCacheResume { + + public static void anywhereCacheResume(String cacheName) throws IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + ResumeAnywhereCacheRequest request = + ResumeAnywhereCacheRequest.newBuilder().setName(cacheName).build(); + + AnywhereCache anywhereCache = storageControl.resumeAnywhereCache(request); + + System.out.printf("Resumed anywhere cache: %s%n", anywhereCache.getName()); + } + } +} +// [END storage_control_resume_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheUpdate.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheUpdate.java new file mode 100644 index 000000000000..558d66db6b44 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/AnywhereCacheUpdate.java @@ -0,0 +1,55 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_update_anywhere_cache] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.protobuf.FieldMask; +import com.google.storage.control.v2.AnywhereCache; +import com.google.storage.control.v2.StorageControlClient; +import com.google.storage.control.v2.UpdateAnywhereCacheMetadata; +import com.google.storage.control.v2.UpdateAnywhereCacheRequest; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public final class AnywhereCacheUpdate { + + public static void anywhereCacheUpdate(String cacheName, String admissionPolicy) + throws InterruptedException, ExecutionException, IOException { + try (StorageControlClient storageControl = StorageControlClient.create()) { + + AnywhereCache pendingUpdate = + AnywhereCache.newBuilder().setName(cacheName).setAdmissionPolicy(admissionPolicy).build(); + + UpdateAnywhereCacheRequest request = + UpdateAnywhereCacheRequest.newBuilder() + .setAnywhereCache(pendingUpdate) + .setUpdateMask(FieldMask.newBuilder().addPaths("admission_policy").build()) + .build(); + + // Start a long-running operation (LRO). + OperationFuture operation = + storageControl.updateAnywhereCacheAsync(request); + + // Await the LROs completion. + AnywhereCache updatedAnywhereCache = operation.get(); + System.out.printf("Updated anywhere cache: %s%n", updatedAnywhereCache.getName()); + } + } +} +// [END storage_control_update_anywhere_cache] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateFolder.java new file mode 100644 index 000000000000..6bfc6609275b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateFolder.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_create_folder] +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateFolderRequest; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class CreateFolder { + + public static void createFolder(String bucketName, String folderName) throws IOException { + // The name of the bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the folder within the bucket + // String folderName = "your-unique-folder-name"; + + try (StorageControlClient storageControl = StorageControlClient.create()) { + + CreateFolderRequest request = + CreateFolderRequest.newBuilder() + // Set project to "_" to signify globally scoped bucket + .setParent(BucketName.format("_", bucketName)) + .setFolderId(folderName) + .build(); + + Folder newFolder = storageControl.createFolder(request); + + System.out.printf("Created folder: %s%n", newFolder.getName()); + } + } +} +// [END storage_control_create_folder] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateHierarchicalNamespaceBucket.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateHierarchicalNamespaceBucket.java new file mode 100644 index 000000000000..f6d0868c26bf --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/CreateHierarchicalNamespaceBucket.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_create_bucket_hierarchical_namespace] +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.HierarchicalNamespace; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public final class CreateHierarchicalNamespaceBucket { + + public static void createHierarchicalNamespaceBucket(String projectId, String bucketName) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + StorageOptions storageOptions = StorageOptions.newBuilder().setProjectId(projectId).build(); + try (Storage storage = storageOptions.getService()) { + + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + // Hierarchical namespace buckets must use uniform bucket-level access. + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .setHierarchicalNamespace(HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .build(); + + Bucket bucket = storage.create(bucketInfo); + + System.out.printf( + "Created bucket %s with Hierarchical Namespace enabled.%n", bucket.getName()); + } + } +} +// [END storage_create_bucket_hierarchical_namespace] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/DeleteFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/DeleteFolder.java new file mode 100644 index 000000000000..187a682f1a02 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/DeleteFolder.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_delete_folder] + +import com.google.storage.control.v2.DeleteFolderRequest; +import com.google.storage.control.v2.FolderName; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class DeleteFolder { + + public static void deleteFolder(String bucketName, String folderName) throws IOException { + // The name of the bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the folder within the bucket + // String folderName = "your-unique-folder-name"; + + try (StorageControlClient storageControl = StorageControlClient.create()) { + + // Set project to "_" to signify globally scoped bucket + String folderResourceName = FolderName.format("_", bucketName, folderName); + DeleteFolderRequest request = + DeleteFolderRequest.newBuilder().setName(folderResourceName).build(); + + storageControl.deleteFolder(request); + + System.out.printf("Deleted folder: %s%n", folderResourceName); + } + } +} +// [END storage_control_delete_folder] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/GetFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/GetFolder.java new file mode 100644 index 000000000000..454ed7b3eced --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/GetFolder.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_get_folder] + +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.FolderName; +import com.google.storage.control.v2.GetFolderRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class GetFolder { + + public static void getFolder(String bucketName, String folderName) throws IOException { + // The name of the bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the folder within the bucket + // String folderName = "your-unique-folder-name"; + + try (StorageControlClient storageControl = StorageControlClient.create()) { + + GetFolderRequest request = + GetFolderRequest.newBuilder() + // Set project to "_" to signify globally scoped bucket + .setName(FolderName.format("_", bucketName, folderName)) + .build(); + + Folder newFolder = storageControl.getFolder(request); + + System.out.printf("Got folder: %s%n", newFolder.getName()); + } + } +} +// [END storage_control_get_folder] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/ListFolders.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/ListFolders.java new file mode 100644 index 000000000000..2778213e7cd5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/ListFolders.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_list_folders] + +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.ListFoldersRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; + +public final class ListFolders { + + public static void listFolders(String bucketName) throws IOException { + // The name of the bucket + // String bucketName = "your-unique-bucket-name"; + + try (StorageControlClient storageControl = StorageControlClient.create()) { + + ListFoldersRequest request = + ListFoldersRequest.newBuilder() + // Set project to "_" to signify globally scoped bucket + .setParent(BucketName.format("_", bucketName)) + .build(); + + Iterable folders = storageControl.listFolders(request).iterateAll(); + for (Folder folder : folders) { + System.out.printf("Found folder: %s%n", folder.getName()); + } + } + } +} +// [END storage_control_list_folders] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/RenameFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/RenameFolder.java new file mode 100644 index 000000000000..e8f95b6f5f72 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/control/v2/RenameFolder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +// [START storage_control_rename_folder] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.FolderName; +import com.google.storage.control.v2.RenameFolderMetadata; +import com.google.storage.control.v2.RenameFolderRequest; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public final class RenameFolder { + + public static void renameFolder( + String bucketName, String sourceFolderName, String destinationFolderName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // The name of the bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the folder within the bucket + // String sourceFolderName = "your-unique-source-folder-name"; + + // The new name of the folder within the bucket + // String destinationFolderName = "your-unique-destination-folder-name"; + + try (StorageControlClient storageControl = StorageControlClient.create()) { + + // Set project to "_" to signify globally scoped bucket + String sourceFolderResourceName = FolderName.format("_", bucketName, sourceFolderName); + RenameFolderRequest request = + RenameFolderRequest.newBuilder() + .setName(sourceFolderResourceName) + .setDestinationFolderId(destinationFolderName) + .build(); + + OperationFuture renameOperation = + storageControl.renameFolderAsync(request); + + Folder destinationFolder = renameOperation.get(30, TimeUnit.SECONDS); + + System.out.printf( + "Renamed folder from %s to %s%n", sourceFolderResourceName, destinationFolder.getName()); + } + } +} +// [END storage_control_rename_folder] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ActivateHmacKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ActivateHmacKey.java new file mode 100644 index 000000000000..9a23e391615e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ActivateHmacKey.java @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_activate_hmac_key] + +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class ActivateHmacKey { + public static void activateHmacKey(String accessId, String projectId) throws StorageException { + // The access ID of the HMAC key. + // String accessId = "GOOG0234230X00"; + + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + HmacKey.HmacKeyMetadata metadata = + storage.getHmacKey(accessId, Storage.GetHmacKeyOption.projectId(projectId)); + HmacKey.HmacKeyMetadata newMetadata = + storage.updateHmacKeyState(metadata, HmacKey.HmacKeyState.ACTIVE); + + System.out.println("The HMAC key is now active."); + System.out.println("The HMAC key metadata is:"); + System.out.println("ID: " + newMetadata.getId()); + System.out.println("Access ID: " + newMetadata.getAccessId()); + System.out.println("Project ID: " + newMetadata.getProjectId()); + System.out.println("Service Account Email: " + newMetadata.getServiceAccount().getEmail()); + System.out.println("State: " + newMetadata.getState().toString()); + System.out.println("Time Created: " + new Date(newMetadata.getCreateTime()).toString()); + System.out.println("Time Updated: " + new Date(newMetadata.getUpdateTime()).toString()); + System.out.println("ETag: " + newMetadata.getEtag()); + } +} +// [END storage_activate_hmac_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/CreateHmacKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/CreateHmacKey.java new file mode 100644 index 000000000000..5c729a8dd178 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/CreateHmacKey.java @@ -0,0 +1,60 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_create_hmac_key] + +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class CreateHmacKey { + public static void createHmacKey(String serviceAccountEmail, String projectId) + throws StorageException { + + // The service account email for which the new HMAC key will be created. + // String serviceAccountEmail = "service-account@iam.gserviceaccount.com"; + + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + ServiceAccount account = ServiceAccount.of(serviceAccountEmail); + HmacKey hmacKey = + storage.createHmacKey(account, Storage.CreateHmacKeyOption.projectId(projectId)); + + String secret = hmacKey.getSecretKey(); + HmacKey.HmacKeyMetadata metadata = hmacKey.getMetadata(); + + System.out.println("The Base64 encoded secret is: " + secret); + System.out.println("Do not lose that secret, there is no API to recover it."); + System.out.println("The HMAC key metadata is:"); + System.out.println("ID: " + metadata.getId()); + System.out.println("Access ID: " + metadata.getAccessId()); + System.out.println("Project ID: " + metadata.getProjectId()); + System.out.println("Service Account Email: " + metadata.getServiceAccount().getEmail()); + System.out.println("State: " + metadata.getState().toString()); + System.out.println("Time Created: " + new Date(metadata.getCreateTime()).toString()); + System.out.println("Time Updated: " + new Date(metadata.getUpdateTime()).toString()); + System.out.println("ETag: " + metadata.getEtag()); + } +} +// [END storage_create_hmac_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeactivateHmacKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeactivateHmacKey.java new file mode 100644 index 000000000000..129cedf9e4ee --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeactivateHmacKey.java @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_deactivate_hmac_key] + +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class DeactivateHmacKey { + public static void deactivateHmacKey(String accessId, String projectId) throws StorageException { + // The access ID of the HMAC key. + // String accessId = "GOOG0234230X00"; + + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + HmacKey.HmacKeyMetadata metadata = + storage.getHmacKey(accessId, Storage.GetHmacKeyOption.projectId(projectId)); + HmacKey.HmacKeyMetadata newMetadata = + storage.updateHmacKeyState(metadata, HmacKey.HmacKeyState.INACTIVE); + + System.out.println("The HMAC key is now inactive."); + System.out.println("The HMAC key metadata is:"); + System.out.println("ID: " + newMetadata.getId()); + System.out.println("Access ID: " + newMetadata.getAccessId()); + System.out.println("Project ID: " + newMetadata.getProjectId()); + System.out.println("Service Account Email: " + newMetadata.getServiceAccount().getEmail()); + System.out.println("State: " + newMetadata.getState().toString()); + System.out.println("Time Created: " + new Date(newMetadata.getCreateTime()).toString()); + System.out.println("Time Updated: " + new Date(newMetadata.getUpdateTime()).toString()); + System.out.println("ETag: " + newMetadata.getEtag()); + } +} +// [END storage_deactivate_hmac_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeleteHmacKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeleteHmacKey.java new file mode 100644 index 000000000000..afd599f6e2da --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/DeleteHmacKey.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_delete_hmac_key] + +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class DeleteHmacKey { + public static void deleteHmacKey(String accessId, String projectId) throws StorageException { + + // The access ID of the HMAC key. + // String accessId = "GOOG0234230X00"; + + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + HmacKey.HmacKeyMetadata metadata = + storage.getHmacKey(accessId, Storage.GetHmacKeyOption.projectId(projectId)); + storage.deleteHmacKey(metadata); + + System.out.println( + "The key is deleted, though it will still appear in " + + "getHmacKeys() results if called with showDeletedKey."); + } +} +// [END storage_delete_hmac_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/GetHmacKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/GetHmacKey.java new file mode 100644 index 000000000000..eea8fe22b5c7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/GetHmacKey.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_get_hmac_key] + +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; + +public class GetHmacKey { + public static void getHmacKey(String accessId, String projectId) throws StorageException { + // The access ID of the HMAC key. + // String accessId = "GOOG0234230X00"; + + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + HmacKey.HmacKeyMetadata metadata = + storage.getHmacKey(accessId, Storage.GetHmacKeyOption.projectId(projectId)); + + System.out.println("The HMAC key metadata is:"); + System.out.println("ID: " + metadata.getId()); + System.out.println("Access ID: " + metadata.getAccessId()); + System.out.println("Project ID: " + metadata.getProjectId()); + System.out.println("Service Account Email: " + metadata.getServiceAccount().getEmail()); + System.out.println("State: " + metadata.getState().toString()); + System.out.println("Time Created: " + new Date(metadata.getCreateTime()).toString()); + System.out.println("Time Updated: " + new Date(metadata.getUpdateTime()).toString()); + System.out.println("ETag: " + metadata.getEtag()); + } +} +// [END storage_get_hmac_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ListHmacKeys.java b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ListHmacKeys.java new file mode 100644 index 000000000000..538fc9590966 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/hmac/ListHmacKeys.java @@ -0,0 +1,43 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.hmac; + +// [START storage_list_hmac_keys] + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class ListHmacKeys { + public static void listHmacKeys(String projectId) throws StorageException { + // The ID of the project to which the service account belongs. + // String projectId = "project-id"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Page page = + storage.listHmacKeys(Storage.ListHmacKeysOption.projectId(projectId)); + + for (HmacKey.HmacKeyMetadata metadata : page.iterateAll()) { + System.out.println("Service Account Email: " + metadata.getServiceAccount().getEmail()); + System.out.println("Access ID: " + metadata.getAccessId()); + } + } +} +// [END storage_list_hmac_keys] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/CreateManagedFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/CreateManagedFolder.java new file mode 100644 index 000000000000..23744ed6e7f2 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/CreateManagedFolder.java @@ -0,0 +1,44 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +// [START storage_control_managed_folder_create] + +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.StorageControlClient; + +public class CreateManagedFolder { + public static void managedFolderCreate(String bucketName, String managedFolderId) + throws Exception { + + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (StorageControlClient storageControlClient = StorageControlClient.create()) { + CreateManagedFolderRequest request = + CreateManagedFolderRequest.newBuilder() + // Set project to "_" to signify global bucket + .setParent(BucketName.format("_", bucketName)) + .setManagedFolder(ManagedFolder.newBuilder().build()) + .setManagedFolderId(managedFolderId) + .build(); + String response = storageControlClient.createManagedFolder(request).getName(); + System.out.printf("Performed createManagedFolder request for %s%n", response); + } + } +} +// [END storage_control_managed_folder_create] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/DeleteManagedFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/DeleteManagedFolder.java new file mode 100644 index 000000000000..b72640c193aa --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/DeleteManagedFolder.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +// [START storage_control_managed_folder_delete] +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.DeleteManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolderName; +import com.google.storage.control.v2.StorageControlClient; + +class DeleteManagedFolder { + public static void managedFolderDelete(String bucketName, String managedFolderId) + throws Exception { + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (StorageControlClient storageControlClient = StorageControlClient.create()) { + // Set project to "_" to signify global bucket + BucketName resourceBucketName = BucketName.of("_", bucketName); + DeleteManagedFolderRequest deleteManagedFolderRequest = + DeleteManagedFolderRequest.newBuilder() + .setName( + ManagedFolderName.format( + resourceBucketName.getProject(), + resourceBucketName.getBucket(), + managedFolderId)) + .build(); + storageControlClient.deleteManagedFolder(deleteManagedFolderRequest); + System.out.printf("Deleted Managed Folder %s%n", managedFolderId); + } + } +} + +// [END storage_control_managed_folder_delete] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/GetManagedFolder.java b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/GetManagedFolder.java new file mode 100644 index 000000000000..683444c1f7f0 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/GetManagedFolder.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +// [START storage_control_managed_folder_get] + +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.GetManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.ManagedFolderName; +import com.google.storage.control.v2.StorageControlClient; + +class GetManagedFolder { + + public static void managedFolderGet(String bucketName, String managedFolderId) throws Exception { + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (StorageControlClient storageControlClient = StorageControlClient.create()) { + // Set project to "_" to signify global bucket + BucketName resourceBucketName = BucketName.of("_", bucketName); + GetManagedFolderRequest getManagedFolderRequest = + GetManagedFolderRequest.newBuilder() + .setName( + ManagedFolderName.format( + resourceBucketName.getProject(), + resourceBucketName.getBucket(), + managedFolderId)) + .build(); + ManagedFolder managedFolder = storageControlClient.getManagedFolder(getManagedFolderRequest); + System.out.printf("Got Managed Folder %s%n", managedFolder.getName()); + } + } +} + +// [END storage_control_managed_folder_get] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/ListManagedFolders.java b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/ListManagedFolders.java new file mode 100644 index 000000000000..bb7ce977c340 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/managedfolders/ListManagedFolders.java @@ -0,0 +1,45 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +// [START storage_control_managed_folder_list] + +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.ListManagedFoldersRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.StorageControlClient; + +class ListManagedFolders { + + public static void managedFolderList(String bucketName) throws Exception { + // Instantiates a client in a try-with-resource to automatically cleanup underlying resources + try (StorageControlClient storageControlClient = StorageControlClient.create()) { + ListManagedFoldersRequest listManagedFoldersRequest = + ListManagedFoldersRequest.newBuilder() + // Set project to "_" to signify global bucket + .setParent(BucketName.format("_", bucketName)) + .build(); + Iterable managedFolders = + storageControlClient.listManagedFolders(listManagedFoldersRequest).iterateAll(); + for (ManagedFolder folder : managedFolders) { + System.out.printf("%s bucket has managed folder %s%n", bucketName, folder.getName()); + } + } + } +} + +// [END storage_control_managed_folder_list] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/AbortMultipartUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/AbortMultipartUpload.java new file mode 100644 index 000000000000..92a5f7f9db1e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/AbortMultipartUpload.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.multipartupload; + +// [START storage_abort_multipart_upload] + +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.multipartupload.model.AbortMultipartUploadRequest; + +public class AbortMultipartUpload { + public static void abortMultipartUpload( + String projectId, String bucketName, String objectName, String uploadId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The ID of the multipart upload + // String uploadId = "your-upload-id"; + + HttpStorageOptions storageOptions = + HttpStorageOptions.newBuilder().setProjectId(projectId).build(); + MultipartUploadSettings mpuSettings = MultipartUploadSettings.of(storageOptions); + MultipartUploadClient mpuClient = MultipartUploadClient.create(mpuSettings); + + System.out.println("Aborting multipart upload: " + uploadId); + AbortMultipartUploadRequest abortRequest = + AbortMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(uploadId) + .build(); + + mpuClient.abortMultipartUpload(abortRequest); + + System.out.println("Multipart upload with ID " + uploadId + " has been successfully aborted."); + } +} +// [END storage_abort_multipart_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CompleteMultipartUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CompleteMultipartUpload.java new file mode 100644 index 000000000000..5daa9ebe53cb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CompleteMultipartUpload.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.multipartupload; + +// [START storage_complete_multipart_upload] + +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CompleteMultipartUploadResponse; +import com.google.cloud.storage.multipartupload.model.CompletedMultipartUpload; +import com.google.cloud.storage.multipartupload.model.CompletedPart; +import java.util.List; + +public class CompleteMultipartUpload { + public static void completeMultipartUpload( + String projectId, + String bucketName, + String objectName, + String uploadId, + List completedParts) { + + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The ID of the multipart upload + // String uploadId = "your-upload-id"; + + // The list of completed parts from the UploadPart responses. + // List completedParts = ...; + + HttpStorageOptions storageOptions = + HttpStorageOptions.newBuilder().setProjectId(projectId).build(); + MultipartUploadSettings mpuSettings = MultipartUploadSettings.of(storageOptions); + MultipartUploadClient mpuClient = MultipartUploadClient.create(mpuSettings); + + System.out.println("Completing multipart upload for " + objectName); + + CompletedMultipartUpload completedMultipartUpload = + CompletedMultipartUpload.builder().parts(completedParts).build(); + + CompleteMultipartUploadRequest completeRequest = + CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(uploadId) + .multipartUpload(completedMultipartUpload) + .build(); + + CompleteMultipartUploadResponse completeResponse = + mpuClient.completeMultipartUpload(completeRequest); + + System.out.println( + "Upload complete for " + + completeResponse.key() + + " in bucket " + + completeResponse.bucket()); + System.out.println("Final ETag: " + completeResponse.etag()); + } +} +// [END storage_complete_multipart_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CreateMultipartUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CreateMultipartUpload.java new file mode 100644 index 000000000000..b00c34868c3e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/CreateMultipartUpload.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.multipartupload; + +// [START storage_create_multipart_upload] + +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadRequest; +import com.google.cloud.storage.multipartupload.model.CreateMultipartUploadResponse; +import java.util.HashMap; +import java.util.Map; + +public class CreateMultipartUpload { + public static void createMultipartUpload(String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String sourceBucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String sourceObjectName = "your-object-name"; + + HttpStorageOptions storageOptions = + HttpStorageOptions.newBuilder().setProjectId(projectId).build(); + MultipartUploadSettings mpuSettings = MultipartUploadSettings.of(storageOptions); + MultipartUploadClient mpuClient = MultipartUploadClient.create(mpuSettings); + + System.out.println("Initiating multipart upload for " + objectName); + + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + String contentType = "text/plain"; + CreateMultipartUploadRequest createRequest = + CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .metadata(metadata) + .contentType(contentType) + .build(); + + CreateMultipartUploadResponse createResponse = mpuClient.createMultipartUpload(createRequest); + String uploadId = createResponse.uploadId(); + System.out.println("Upload ID: " + uploadId); + } +} +// [END storage_create_multipart_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/ListParts.java b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/ListParts.java new file mode 100644 index 000000000000..fb1acee86ca0 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/ListParts.java @@ -0,0 +1,69 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.multipartupload; + +// [START storage_list_parts] + +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.multipartupload.model.ListPartsRequest; +import com.google.cloud.storage.multipartupload.model.ListPartsResponse; +import com.google.cloud.storage.multipartupload.model.Part; + +public class ListParts { + public static void listParts( + String projectId, String bucketName, String objectName, String uploadId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The ID of the multipart upload + // String uploadId = "your-upload-id"; + + HttpStorageOptions storageOptions = + HttpStorageOptions.newBuilder().setProjectId(projectId).build(); + MultipartUploadSettings mpuSettings = MultipartUploadSettings.of(storageOptions); + MultipartUploadClient mpuClient = MultipartUploadClient.create(mpuSettings); + + System.out.println("Listing parts for upload ID: " + uploadId); + + ListPartsRequest listPartsRequest = + ListPartsRequest.builder().bucket(bucketName).key(objectName).uploadId(uploadId).build(); + + ListPartsResponse listPartsResponse = mpuClient.listParts(listPartsRequest); + + if (listPartsResponse.parts() == null || listPartsResponse.parts().isEmpty()) { + System.out.println("No parts have been uploaded yet."); + return; + } + + System.out.println("Uploaded Parts:"); + for (Part part : listPartsResponse.parts()) { + System.out.println(" - Part Number: " + part.partNumber()); + System.out.println(" ETag: " + part.eTag()); + System.out.println(" Size: " + part.size() + " bytes"); + System.out.println(" Last Modified: " + part.lastModified()); + } + } +} +// [END storage_list_parts] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/UploadPart.java b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/UploadPart.java new file mode 100644 index 000000000000..c1d90055d5b2 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/multipartupload/UploadPart.java @@ -0,0 +1,74 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.multipartupload; + +// [START storage_upload_part] + +import com.google.cloud.storage.HttpStorageOptions; +import com.google.cloud.storage.MultipartUploadClient; +import com.google.cloud.storage.MultipartUploadSettings; +import com.google.cloud.storage.RequestBody; +import com.google.cloud.storage.multipartupload.model.UploadPartRequest; +import com.google.cloud.storage.multipartupload.model.UploadPartResponse; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Random; + +public class UploadPart { + public static void uploadPart( + String projectId, String bucketName, String objectName, String uploadId, int partNumber) + throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The ID of the multipart upload + // String uploadId = "your-upload-id"; + + // The part number of the part being uploaded + // int partNumber = 1; + + HttpStorageOptions storageOptions = + HttpStorageOptions.newBuilder().setProjectId(projectId).build(); + MultipartUploadSettings mpuSettings = MultipartUploadSettings.of(storageOptions); + MultipartUploadClient mpuClient = MultipartUploadClient.create(mpuSettings); + + // The minimum part size for a multipart upload is 5 MiB, except for the last part. + byte[] bytes = new byte[5 * 1024 * 1024]; + new Random().nextBytes(bytes); + RequestBody requestBody = RequestBody.of(ByteBuffer.wrap(bytes)); + + System.out.println("Uploading part " + partNumber); + UploadPartRequest uploadPartRequest = + UploadPartRequest.builder() + .bucket(bucketName) + .key(objectName) + .partNumber(partNumber) + .uploadId(uploadId) + .build(); + + UploadPartResponse uploadPartResponse = mpuClient.uploadPart(uploadPartRequest, requestBody); + + System.out.println("Part " + partNumber + " uploaded with ETag: " + uploadPartResponse.eTag()); + } +} +// [END storage_upload_part] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/AddBlobOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/AddBlobOwner.java new file mode 100644 index 000000000000..75c4c88d53c7 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/AddBlobOwner.java @@ -0,0 +1,59 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_add_file_owner] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class AddBlobOwner { + + public static void addBlobOwner( + String projectId, String bucketName, String userEmail, String blobName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // Email of the user you wish to add as a file owner + // String userEmail = "someuser@domain.com" + + // The name of the blob/file that you wish to modify permissions on + // String blobName = "your-blob-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Blob blob = storage.get(BlobId.of(bucketName, blobName)); + Acl newOwner = Acl.of(new User(userEmail), Role.OWNER); + + blob.createAcl(newOwner); + System.out.println( + "Added user " + + userEmail + + " as an owner on blob " + + blobName + + " in bucket " + + bucketName); + } +} +// [END storage_add_file_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/AtomicMoveObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/AtomicMoveObject.java new file mode 100644 index 000000000000..a4f0d9cdd41b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/AtomicMoveObject.java @@ -0,0 +1,80 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_move_object] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.MoveBlobRequest; +import com.google.cloud.storage.StorageOptions; + +public final class AtomicMoveObject { + + public static void moveObject( + String projectId, String bucketName, String sourceObjectName, String targetObjectName) { + + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String sourceObjectName = "your-object-name"; + + // The ID of your GCS object + // String targetObjectName = "your-new-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId source = BlobId.of(bucketName, sourceObjectName); + BlobId target = BlobId.of(bucketName, targetObjectName); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + BlobInfo existingTarget = storage.get(target); + if (existingTarget == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = Storage.BlobTargetOption.generationMatch(existingTarget.getGeneration()); + } + + // Atomically move source object to target object within the bucket + MoveBlobRequest moveBlobRequest = + MoveBlobRequest.newBuilder() + .setSource(source) + .setTarget(target) + .setTargetOptions(precondition) + .build(); + BlobInfo movedBlob = storage.moveBlob(moveBlobRequest); + + System.out.println( + "Moved object " + + source.toGsUtilUri() + + " to " + + movedBlob.getBlobId().toGsUtilUriWithGeneration()); + } +} +// [END storage_move_object] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/BatchSetObjectMetadata.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/BatchSetObjectMetadata.java new file mode 100644 index 000000000000..90c6a136f007 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/BatchSetObjectMetadata.java @@ -0,0 +1,97 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_batch_request] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageBatchResult; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +public class BatchSetObjectMetadata { + public static void batchSetObjectMetadata( + String projectId, String bucketName, String pathPrefix) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The directory prefix. All objects in the bucket with this prefix will have their metadata + // updated + // String pathPrefix = "yourPath/"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Map newMetadata = new HashMap<>(); + newMetadata.put("keyToAddOrUpdate", "value"); + Page blobs = + storage.list( + bucketName, + Storage.BlobListOption.prefix(pathPrefix), + Storage.BlobListOption.delimiter("/")); + StorageBatch batchRequest = storage.batch(); + + // Add all blobs with the given prefix to the batch request + List> batchResults = + blobs + .streamAll() + .map(blob -> batchRequest.update(blob.toBuilder().setMetadata(newMetadata).build())) + .collect(Collectors.toList()); + + // Execute the batch request + batchRequest.submit(); + List failures = + batchResults.stream() + .map( + r -> { + try { + BlobInfo blob = r.get(); + return null; + } catch (StorageException e) { + return e; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + System.out.println( + (batchResults.size() - failures.size()) + + " blobs in bucket " + + bucketName + + " with prefix '" + + pathPrefix + + "' had their metadata updated successfully."); + + if (!failures.isEmpty()) { + System.out.println("While processing, there were " + failures.size() + " failures"); + + for (StorageException failure : failures) { + failure.printStackTrace(System.out); + } + } + } +} +// [END storage_batch_request] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectCsekToKms.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectCsekToKms.java new file mode 100644 index 000000000000..730d320a811d --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectCsekToKms.java @@ -0,0 +1,81 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_object_csek_to_cmek] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ChangeObjectCsekToKms { + public static void changeObjectFromCsekToKms( + String projectId, + String bucketName, + String objectName, + String decryptionKey, + String kmsKeyName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The Base64 encoded decryption key, which should be the same key originally used to encrypt + // the object + // String decryptionKey = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g="; + + // The name of the KMS key to manage this object with + // String kmsKeyName = + // "projects/your-project-id/locations/global/keyRings/your-key-ring/cryptoKeys/your-key"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " wasn't found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobSourceOption precondition = + Storage.BlobSourceOption.generationMatch(blob.getGeneration()); + + Storage.CopyRequest request = + Storage.CopyRequest.newBuilder() + .setSource(blobId) + .setSourceOptions(Storage.BlobSourceOption.decryptionKey(decryptionKey), precondition) + .setTarget(blobId, Storage.BlobTargetOption.kmsKeyName(kmsKeyName)) + .build(); + storage.copy(request); + + System.out.println( + "Object " + + objectName + + " in bucket " + + bucketName + + " is now managed by the KMS key " + + kmsKeyName + + " instead of a customer-supplied encryption key"); + } +} +// [END storage_object_csek_to_cmek] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectStorageClass.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectStorageClass.java new file mode 100644 index 000000000000..4e2d78d68e60 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ChangeObjectStorageClass.java @@ -0,0 +1,79 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_change_file_storage_class] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.StorageOptions; + +public class ChangeObjectStorageClass { + public static void changeObjectStorageClass( + String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob sourceBlob = storage.get(blobId); + if (sourceBlob == null) { + System.out.println("The object " + objectName + " wasn't found in " + bucketName); + return; + } + + // See the StorageClass documentation for other valid storage classes: + // https://googleapis.dev/java/google-cloud-clients/latest/com/google/cloud/storage/StorageClass.html + StorageClass storageClass = StorageClass.COLDLINE; + + // You can't change an object's storage class directly, the only way is to rewrite the object + // with the desired storage class + + BlobInfo targetBlob = BlobInfo.newBuilder(blobId).setStorageClass(storageClass).build(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobSourceOption precondition = + Storage.BlobSourceOption.generationMatch(sourceBlob.getGeneration()); + + Storage.CopyRequest request = + Storage.CopyRequest.newBuilder() + .setSource(blobId) + .setSourceOptions(precondition) // delete this line to run without preconditions + .setTarget(targetBlob) + .build(); + Blob updatedBlob = storage.copy(request).getResult(); + + System.out.println( + "Object " + + objectName + + " in bucket " + + bucketName + + " had its storage class set to " + + updatedBlob.getStorageClass().name()); + } +} +// [END storage_change_file_storage_class] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ComposeObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ComposeObject.java new file mode 100644 index 000000000000..a1707ce26ce8 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ComposeObject.java @@ -0,0 +1,86 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_compose_file] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ComposeObject { + public static void composeObject( + String bucketName, + String firstObjectName, + String secondObjectName, + String targetObjectName, + String projectId) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of the first GCS object to compose + // String firstObjectName = "your-first-object-name"; + + // The ID of the second GCS object to compose + // String secondObjectName = "your-second-object-name"; + + // The ID to give the new composite object + // String targetObjectName = "new-composite-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + if (storage.get(bucketName, targetObjectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobTargetOption.generationMatch( + storage.get(bucketName, targetObjectName).getGeneration()); + } + + Storage.ComposeRequest composeRequest = + Storage.ComposeRequest.newBuilder() + // addSource takes varargs, so you can put as many objects here as you want, up to the + // max of 32 + .addSource(firstObjectName, secondObjectName) + .setTarget(BlobInfo.newBuilder(bucketName, targetObjectName).build()) + .setTargetOptions(precondition) + .build(); + + Blob compositeObject = storage.compose(composeRequest); + + System.out.println( + "New composite object " + + compositeObject.getName() + + " was created by combining " + + firstObjectName + + " and " + + secondObjectName); + } +} +// [END storage_compose_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyDeleteObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyDeleteObject.java new file mode 100644 index 000000000000..5caa756053a2 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyDeleteObject.java @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_move_file] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class CopyDeleteObject { + public static void copyDeleteObject( + String projectId, + String sourceBucketName, + String sourceObjectName, + String targetBucketName, + String targetObjectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String sourceBucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String sourceObjectName = "your-object-name"; + + // The ID of the bucket to move the object objectName to + // String targetBucketName = "target-object-bucket" + + // The ID of your GCS object + // String targetObjectName = "your-new-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId source = BlobId.of(sourceBucketName, sourceObjectName); + BlobId target = BlobId.of(targetBucketName, targetObjectName); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + BlobInfo existingTarget = storage.get(targetBucketName, targetObjectName); + if (existingTarget == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = Storage.BlobTargetOption.generationMatch(existingTarget.getGeneration()); + } + + // Copy source object to target object + storage.copy( + Storage.CopyRequest.newBuilder().setSource(source).setTarget(target, precondition).build()); + Blob copiedObject = storage.get(target); + // Delete the original blob now that we've copied to where we want it, finishing the "move" + // operation + storage.get(source).delete(); + + System.out.println( + "Moved object " + + sourceObjectName + + " from bucket " + + sourceBucketName + + " to " + + targetObjectName + + " in bucket " + + copiedObject.getBucket()); + } +} +// [END storage_move_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyObject.java new file mode 100644 index 000000000000..e704cebb55a4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyObject.java @@ -0,0 +1,86 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_copy_file] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.StorageOptions; + +public class CopyObject { + public static void copyObject( + String projectId, String sourceBucketName, String objectName, String targetBucketName) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of the bucket the original object is in + // String sourceBucketName = "your-source-bucket"; + + // The ID of the GCS object to copy + // String objectName = "your-object-name"; + + // The ID of the bucket to copy the object to + // String targetBucketName = "target-object-bucket"; + + try (Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).build().getService()) { + BlobId sourceId = BlobId.of(sourceBucketName, objectName); + // you could change "objectName" to rename the object + BlobId targetId = BlobId.of(targetBucketName, objectName); + + // Recommended: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + BlobInfo existingTarget = storage.get(targetBucketName, objectName); + if (existingTarget == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = Storage.BlobTargetOption.generationMatch(existingTarget.getGeneration()); + } + + CopyRequest copyRequest = + CopyRequest.newBuilder() + .setSource(sourceId) + .setTarget(targetId, precondition) + // limit the number of bytes Cloud Storage will attempt to copy before responding to + // an individual request. + // If you see Read Timeout errors, try reducing this value. + .setMegabytesCopiedPerChunk(2048L) // 2GiB + .build(); + CopyWriter copyWriter = storage.copy(copyRequest); + BlobInfo successfulCopyResult = copyWriter.getResult(); + + System.out.printf( + "Copied object gs://%s/%s to %s%n", + sourceBucketName, + objectName, + successfulCopyResult.getBlobId().toGsUtilUriWithGeneration()); + } + } +} +// [END storage_copy_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyOldVersionOfObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyOldVersionOfObject.java new file mode 100644 index 000000000000..0c227ad1ab62 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CopyOldVersionOfObject.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_copy_file_archived_generation] +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class CopyOldVersionOfObject { + public static void copyOldVersionOfObject( + String projectId, + String bucketName, + String objectToCopy, + long generationToCopy, + String newObjectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of the GCS object to copy an old version of + // String objectToCopy = "your-object-name"; + + // The generation of objectToCopy to copy + // long generationToCopy = 1579287380533984; + + // What to name the new object with the old data from objectToCopy + // String newObjectName = "your-new-object"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + if (storage.get(bucketName, newObjectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobTargetOption.generationMatch( + storage.get(bucketName, newObjectName).getGeneration()); + } + + Storage.CopyRequest copyRequest = + Storage.CopyRequest.newBuilder() + .setSource(BlobId.of(bucketName, objectToCopy, generationToCopy)) + .setTarget(BlobId.of(bucketName, newObjectName), precondition) + .build(); + storage.copy(copyRequest); + + System.out.println( + "Generation " + + generationToCopy + + " of object " + + objectToCopy + + " in bucket " + + bucketName + + " was copied to " + + newObjectName); + } +} +// [END storage_copy_file_archived_generation] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/CreateAndWriteAppendableObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CreateAndWriteAppendableObject.java new file mode 100644 index 000000000000..29495d0ec6ae --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/CreateAndWriteAppendableObject.java @@ -0,0 +1,77 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_create_and_write_appendable_object_upload] + +import com.google.cloud.storage.BlobAppendableUpload; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.FlushPolicy; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.channels.ReadableByteChannel; +import java.nio.file.Paths; +import java.util.Locale; + +public class CreateAndWriteAppendableObject { + public static void createAndWriteAppendableObject( + String bucketName, String objectName, String filePath) throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to the file to upload + // String filePath = "path/to/your/file"; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + int flushSize = 64 * 1000; + FlushPolicy.MaxFlushSizeFlushPolicy flushPolicy = FlushPolicy.maxFlushSize(flushSize); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING) + .withFlushPolicy(flushPolicy); + BlobAppendableUpload uploadSession = storage.blobAppendableUpload(blobInfo, config); + try (AppendableUploadWriteableByteChannel channel = uploadSession.open(); + ReadableByteChannel readableByteChannel = FileChannel.open(Paths.get(filePath))) { + ByteStreams.copy(readableByteChannel, channel); + // Since the channel is in a try-with-resources block, channel.close() + // will be implicitly called here, which triggers the finalization. + } catch (IOException ex) { + throw new IOException("Failed to upload to object " + blobId.toGsUtilUri(), ex); + } + BlobInfo result = storage.get(blobId); + System.out.printf( + Locale.US, + "Object %s successfully uploaded", + result.getBlobId().toGsUtilUriWithGeneration()); + } + } +} + +// [END storage_create_and_write_appendable_object_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteObject.java new file mode 100644 index 000000000000..3e8d7d331f5b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteObject.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_delete_file] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DeleteObject { + public static void deleteObject(String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Blob blob = storage.get(bucketName, objectName); + if (blob == null) { + System.out.println("The object " + objectName + " wasn't found in " + bucketName); + return; + } + BlobId idWithGeneration = blob.getBlobId(); + // Deletes the blob specified by its id. When the generation is present and non-null it will be + // specified in the request. + // If versioning is enabled on the bucket and the generation is present in the delete request, + // only the version of the object with the matching generation will be deleted. + // If instead you want to delete the current version, the generation should be dropped by + // performing the following. + // BlobId idWithoutGeneration = + // BlobId.of(idWithGeneration.getBucket(), idWithGeneration.getName()); + // storage.delete(idWithoutGeneration); + storage.delete(idWithGeneration); + + System.out.println("Object " + objectName + " was permanently deleted from " + bucketName); + } +} +// [END storage_delete_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteOldVersionOfObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteOldVersionOfObject.java new file mode 100644 index 000000000000..5c0d6e1f7ba3 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DeleteOldVersionOfObject.java @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_delete_file_archived_generation] +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class DeleteOldVersionOfObject { + public static void deleteOldVersionOfObject( + String projectId, String bucketName, String objectName, long generationToDelete) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The generation of objectName to delete + // long generationToDelete = 1579287380533984; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + storage.delete(BlobId.of(bucketName, objectName, generationToDelete)); + + System.out.println( + "Generation " + + generationToDelete + + " of object " + + objectName + + " was deleted from " + + bucketName); + } +} +// [END storage_delete_file_archived_generation] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadByteRange.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadByteRange.java new file mode 100644 index 000000000000..e64bbb7c80c5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadByteRange.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_download_byte_range] + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +public class DownloadByteRange { + + public static void downloadByteRange( + String projectId, + String bucketName, + String blobName, + long startByte, + long endBytes, + String destFileName) + throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the blob/file that you wish to modify permissions on + // String blobName = "your-blob-name"; + + // The starting byte at which to begin the download + // long startByte = 0; + + // The ending byte at which to end the download + // long endByte = 20; + + // The path to which the file should be downloaded + // String destFileName = '/local/path/to/file.txt'; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, blobName); + try (ReadChannel from = storage.reader(blobId); + FileChannel to = FileChannel.open(Paths.get(destFileName), StandardOpenOption.WRITE)) { + from.seek(startByte); + from.limit(endBytes); + + ByteStreams.copy(from, to); + + System.out.printf( + "%s downloaded to %s from byte %d to byte %d%n", + blobId.toGsUtilUri(), destFileName, startByte, endBytes); + } + } +} +// [END storage_download_byte_range] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadEncryptedObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadEncryptedObject.java new file mode 100644 index 000000000000..5fd72e75f147 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadEncryptedObject.java @@ -0,0 +1,67 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_download_encrypted_file] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.file.Path; + +public class DownloadEncryptedObject { + public static void downloadEncryptedObject( + String projectId, + String bucketName, + String objectName, + Path destFilePath, + String decryptionKey) + throws IOException { + + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to which the file should be downloaded + // Path destFilePath = Paths.get("/local/path/to/file.txt"); + + // The Base64 encoded decryption key, which should be the same key originally used to encrypt + // the object + // String decryptionKey = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g="; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + Blob blob = storage.get(bucketName, objectName); + blob.downloadTo(destFilePath, Blob.BlobSourceOption.decryptionKey(decryptionKey)); + + System.out.println( + "Downloaded object " + + objectName + + " from bucket name " + + bucketName + + " to " + + destFilePath + + " using customer-supplied encryption key"); + } +} +// [END storage_download_encrypted_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java new file mode 100644 index 000000000000..d7982f6c5362 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObject.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_download_file] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.file.Paths; + +public class DownloadObject { + public static void downloadObject( + String projectId, String bucketName, String objectName, String destFilePath) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to which the file should be downloaded + // String destFilePath = "/local/path/to/file.txt"; + + StorageOptions storageOptions = StorageOptions.newBuilder().setProjectId(projectId).build(); + try (Storage storage = storageOptions.getService()) { + + storage.downloadTo(BlobId.of(bucketName, objectName), Paths.get(destFilePath)); + + System.out.println( + "Downloaded object " + + objectName + + " from bucket name " + + bucketName + + " to " + + destFilePath); + } + } +} +// [END storage_download_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObjectIntoMemory.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObjectIntoMemory.java new file mode 100644 index 000000000000..57d1efc72d98 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadObjectIntoMemory.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_file_download_into_memory] + +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.charset.StandardCharsets; + +public class DownloadObjectIntoMemory { + public static void downloadObjectIntoMemory( + String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + byte[] content = storage.readAllBytes(bucketName, objectName); + System.out.println( + "The contents of " + + objectName + + " from bucket name " + + bucketName + + " are: " + + new String(content, StandardCharsets.UTF_8)); + } +} +// [END storage_file_download_into_memory] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadPublicObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadPublicObject.java new file mode 100644 index 000000000000..feb966b1e067 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadPublicObject.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_download_public_file] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.file.Path; + +public class DownloadPublicObject { + public static void downloadPublicObject( + String bucketName, String publicObjectName, Path destFilePath) { + // The name of the bucket to access + // String bucketName = "my-bucket"; + + // The name of the remote public file to download + // String publicObjectName = "publicfile.txt"; + + // The path to which the file should be downloaded + // Path destFilePath = Paths.get("/local/path/to/file.txt"); + + // Instantiate an anonymous Google Cloud Storage client, which can only access public files + Storage storage = StorageOptions.getUnauthenticatedInstance().getService(); + + storage.downloadTo(BlobId.of(bucketName, publicObjectName), destFilePath); + + System.out.println( + "Downloaded public object " + + publicObjectName + + " from bucket name " + + bucketName + + " to " + + destFilePath); + } +} +// [END storage_download_public_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadRequesterPaysObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadRequesterPaysObject.java new file mode 100644 index 000000000000..83d993853d7a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/DownloadRequesterPaysObject.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_download_file_requester_pays] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.file.Path; + +public class DownloadRequesterPaysObject { + public static void downloadRequesterPaysObject( + String projectId, String bucketName, String objectName, Path destFilePath) { + // The project ID to bill + // String projectId = "my-billable-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to which the file should be downloaded + // Path destFilePath = Paths.get("/local/path/to/file.txt"); + + Storage storage = StorageOptions.getDefaultInstance().getService(); + Blob blob = + storage.get( + BlobId.of(bucketName, objectName), Storage.BlobGetOption.userProject(projectId)); + blob.downloadTo(destFilePath, Blob.BlobSourceOption.userProject(projectId)); + + System.out.println( + "Object " + objectName + " downloaded to " + destFilePath + " and billed to " + projectId); + } +} +// [END storage_download_file_requester_pays] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/FinalizeAppendableObjectUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/FinalizeAppendableObjectUpload.java new file mode 100644 index 000000000000..161a2272cff5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/FinalizeAppendableObjectUpload.java @@ -0,0 +1,64 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_finalize_appendable_object_upload] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobAppendableUpload; +import com.google.cloud.storage.BlobAppendableUploadConfig; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class FinalizeAppendableObjectUpload { + public static void finalizeAppendableObjectUpload(String bucketName, String objectName) + throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS unfinalized appendable object + // String objectName = "your-object-name"; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + Blob existingBlob = storage.get(blobId); + + if (existingBlob == null) { + System.out.println("Object " + objectName + " not found in bucket " + bucketName); + return; + } + + BlobInfo blobInfoForTakeover = BlobInfo.newBuilder(existingBlob.getBlobId()).build(); + BlobAppendableUpload finalizingSession = + storage.blobAppendableUpload( + blobInfoForTakeover, + BlobAppendableUploadConfig.of() + .withCloseAction(BlobAppendableUploadConfig.CloseAction.FINALIZE_WHEN_CLOSING)); + + try (BlobAppendableUpload.AppendableUploadWriteableByteChannel channel = + finalizingSession.open()) { + channel.finalizeAndClose(); + } + + System.out.println( + "Successfully finalized object " + objectName + " in bucket " + bucketName); + } + } +} +// [END storage_finalize_appendable_object_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateEncryptionKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateEncryptionKey.java new file mode 100644 index 000000000000..cab227480430 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateEncryptionKey.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_generate_encryption_key] + +import com.google.common.io.BaseEncoding; +import java.security.SecureRandom; + +public class GenerateEncryptionKey { + /** + * Generates a 256 bit (32 byte) AES encryption key and prints the base64 representation. This is + * included for demonstration purposes only. You should generate your own key, and consult your + * security team about best practices. Please remember that encryption keys should be handled with + * a comprehensive security policy. + */ + public static void generateEncryptionKey() { + byte[] key = new byte[32]; + new SecureRandom().nextBytes(key); + String encryptionKey = BaseEncoding.base64().encode(key); + + System.out.println("Generated Base64-encoded AES-256 encryption key: " + encryptionKey); + } +} +// [END storage_generate_encryption_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4GetObjectSignedUrl.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4GetObjectSignedUrl.java new file mode 100644 index 000000000000..eed58bcccfd4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4GetObjectSignedUrl.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_generate_signed_url_v4] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.net.URL; +import java.util.concurrent.TimeUnit; + +public class GenerateV4GetObjectSignedUrl { + /** + * Signing a URL requires Credentials which implement ServiceAccountSigner. These can be set + * explicitly using the Storage.SignUrlOption.signWith(ServiceAccountSigner) option. If you don't, + * you could also pass a service account signer to StorageOptions, i.e. + * StorageOptions().newBuilder().setCredentials(ServiceAccountSignerCredentials). In this example, + * neither of these options are used, which means the following code only works when the + * credentials are defined via the environment variable GOOGLE_APPLICATION_CREDENTIALS, and those + * credentials are authorized to sign a URL. See the documentation for Storage.signUrl for more + * details. + */ + public static void generateV4GetObjectSignedUrl( + String projectId, String bucketName, String objectName) throws StorageException { + // String projectId = "my-project-id"; + // String bucketName = "my-bucket"; + // String objectName = "my-object"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Define resource + BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of(bucketName, objectName)).build(); + + URL url = + storage.signUrl(blobInfo, 15, TimeUnit.MINUTES, Storage.SignUrlOption.withV4Signature()); + + System.out.println("Generated GET signed URL:"); + System.out.println(url); + System.out.println("You can use this URL with any user agent, for example:"); + System.out.println("curl '" + url + "'"); + } +} +// [END storage_generate_signed_url_v4] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4PutObjectSignedUrl.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4PutObjectSignedUrl.java new file mode 100644 index 000000000000..c51b96d01b05 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GenerateV4PutObjectSignedUrl.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_generate_upload_signed_url_v4] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class GenerateV4PutObjectSignedUrl { + /** + * Signing a URL requires Credentials which implement ServiceAccountSigner. These can be set + * explicitly using the Storage.SignUrlOption.signWith(ServiceAccountSigner) option. If you don't, + * you could also pass a service account signer to StorageOptions, i.e. + * StorageOptions().newBuilder().setCredentials(ServiceAccountSignerCredentials). In this example, + * neither of these options are used, which means the following code only works when the + * credentials are defined via the environment variable GOOGLE_APPLICATION_CREDENTIALS, and those + * credentials are authorized to sign a URL. See the documentation for Storage.signUrl for more + * details. + */ + public static void generateV4PutObjectSignedUrl( + String projectId, String bucketName, String objectName) throws StorageException { + // String projectId = "my-project-id"; + // String bucketName = "my-bucket"; + // String objectName = "my-object"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Define Resource + BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of(bucketName, objectName)).build(); + + // Generate Signed URL + Map extensionHeaders = new HashMap<>(); + extensionHeaders.put("Content-Type", "application/octet-stream"); + + URL url = + storage.signUrl( + blobInfo, + 15, + TimeUnit.MINUTES, + Storage.SignUrlOption.httpMethod(HttpMethod.PUT), + Storage.SignUrlOption.withExtHeaders(extensionHeaders), + Storage.SignUrlOption.withV4Signature()); + + System.out.println("Generated PUT signed URL:"); + System.out.println(url); + System.out.println("You can use this URL with any user agent, for example:"); + System.out.println( + "curl -X PUT -H 'Content-Type: application/octet-stream' --upload-file my-file '" + + url + + "'"); + } +} +// [END storage_generate_upload_signed_url_v4] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectContexts.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectContexts.java new file mode 100644 index 000000000000..a0ab3777633a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectContexts.java @@ -0,0 +1,65 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_get_object_contexts] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.Map; + +public class GetObjectContexts { + public static void getObjectContexts(String projectId, String bucketName, String objectName) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + try (Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).build().getService()) { + + Blob blob = storage.get(bucketName, objectName); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + ObjectContexts objectContexts = blob.getContexts(); + + if (objectContexts != null) { + Map customContexts = objectContexts.getCustom(); + if (customContexts == null) { + System.out.println("No custom contexts found for object: " + objectName); + return; + } + // Print blob's object contexts + System.out.println("\nCustom Contexts:"); + for (Map.Entry custom : customContexts.entrySet()) { + System.out.println(custom.getKey() + "=" + custom.getValue()); + } + } + } + } +} +// [END storage_get_object_contexts] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectMetadata.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectMetadata.java new file mode 100644 index 000000000000..4b6eb42ecc62 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/GetObjectMetadata.java @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_get_metadata] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import java.util.Date; +import java.util.Map; + +public class GetObjectMetadata { + public static void getObjectMetadata(String projectId, String bucketName, String blobName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + // Select all fields + // Fields can be selected individually e.g. Storage.BlobField.CACHE_CONTROL + Blob blob = + storage.get(bucketName, blobName, Storage.BlobGetOption.fields(Storage.BlobField.values())); + + // Print blob metadata + System.out.println("Bucket: " + blob.getBucket()); + System.out.println("CacheControl: " + blob.getCacheControl()); + System.out.println("ComponentCount: " + blob.getComponentCount()); + System.out.println("ContentDisposition: " + blob.getContentDisposition()); + System.out.println("ContentEncoding: " + blob.getContentEncoding()); + System.out.println("ContentLanguage: " + blob.getContentLanguage()); + System.out.println("ContentType: " + blob.getContentType()); + System.out.println("CustomTime: " + blob.getCustomTime()); + System.out.println("Crc32c: " + blob.getCrc32c()); + System.out.println("Crc32cHexString: " + blob.getCrc32cToHexString()); + System.out.println("ETag: " + blob.getEtag()); + System.out.println("Generation: " + blob.getGeneration()); + System.out.println("Id: " + blob.getBlobId()); + System.out.println("KmsKeyName: " + blob.getKmsKeyName()); + System.out.println("Md5Hash: " + blob.getMd5()); + System.out.println("Md5HexString: " + blob.getMd5ToHexString()); + System.out.println("MediaLink: " + blob.getMediaLink()); + System.out.println("Metageneration: " + blob.getMetageneration()); + System.out.println("Name: " + blob.getName()); + System.out.println("Size: " + blob.getSize()); + System.out.println("StorageClass: " + blob.getStorageClass()); + System.out.println("TimeCreated: " + new Date(blob.getCreateTime())); + System.out.println("Last Metadata Update: " + new Date(blob.getUpdateTime())); + System.out.println("Object Retention Policy: " + blob.getRetention()); + Boolean temporaryHoldIsEnabled = (blob.getTemporaryHold() != null && blob.getTemporaryHold()); + System.out.println("temporaryHold: " + (temporaryHoldIsEnabled ? "enabled" : "disabled")); + Boolean eventBasedHoldIsEnabled = + (blob.getEventBasedHold() != null && blob.getEventBasedHold()); + System.out.println("eventBasedHold: " + (eventBasedHoldIsEnabled ? "enabled" : "disabled")); + if (blob.getRetentionExpirationTime() != null) { + System.out.println("retentionExpirationTime: " + new Date(blob.getRetentionExpirationTime())); + } + if (blob.getMetadata() != null) { + System.out.println("\n\n\nUser metadata:"); + for (Map.Entry userMetadata : blob.getMetadata().entrySet()) { + System.out.println(userMetadata.getKey() + "=" + userMetadata.getValue()); + } + } + } +} +// [END storage_get_metadata] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectContexts.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectContexts.java new file mode 100644 index 000000000000..3becd448a442 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectContexts.java @@ -0,0 +1,64 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_object_contexts] + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListObjectContexts { + public static void listObjectContexts(String projectId, String bucketName, String key) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The context key you want to filter + // String key = "your-context-key"; + + try (Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).build().getService()) { + /* + * List any object that has a context with the specified key attached + * String filter = "contexts.\"KEY\":*"; + * + * List any object that that does not have a context with the specified key attached + * String filter = "NOT contexts.\"KEY\":*"; + * + * List any object that has a context with the specified key and value attached + * String filter = "contexts.\"KEY\"=\"VALUE\""; + * + * List any object that does not have a context with the specified key and value attached + * String filter = "NOT contexts.\"KEY\"=\"VALUE\""; + */ + + String filter = "contexts.\"" + key + "\":*"; + + System.out.println("Listing objects for bucket: " + bucketName + "with context key: " + key); + Page blobs = storage.list(bucketName, Storage.BlobListOption.filter(filter)); + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getBlobId().toGsUtilUri()); + } + } + } +} +// [END storage_list_object_contexts] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java new file mode 100644 index 000000000000..c9eebfafe287 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjects.java @@ -0,0 +1,41 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_files] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListObjects { + public static void listObjects(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Page blobs = storage.list(bucketName); + + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getName()); + } + } +} +// [END storage_list_files] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithOldVersions.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithOldVersions.java new file mode 100644 index 000000000000..093942c7fd31 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithOldVersions.java @@ -0,0 +1,42 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_file_archived_generations] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListObjectsWithOldVersions { + public static void listObjectsWithOldVersions(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Bucket bucket = storage.get(bucketName); + Page blobs = bucket.list(Storage.BlobListOption.versions(true)); + + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getName() + "," + blob.getGeneration()); + } + } +} +// [END storage_list_file_archived_generations] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithPrefix.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithPrefix.java new file mode 100644 index 000000000000..3a9f564a9c02 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListObjectsWithPrefix.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_files_with_prefix] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListObjectsWithPrefix { + public static void listObjectsWithPrefix( + String projectId, String bucketName, String directoryPrefix) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The directory prefix to search for + // String directoryPrefix = "myDirectory/" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + /** + * Using the Storage.BlobListOption.currentDirectory() option here causes the results to display + * in a "directory-like" mode, showing what objects are in the directory you've specified, as + * well as what other directories exist in that directory. For example, given these blobs: + * + *

a/1.txt a/b/2.txt a/b/3.txt + * + *

If you specify prefix = "a/" and don't use Storage.BlobListOption.currentDirectory(), + * you'll get back: + * + *

a/1.txt a/b/2.txt a/b/3.txt + * + *

However, if you specify prefix = "a/" and do use + * Storage.BlobListOption.currentDirectory(), you'll get back: + * + *

a/1.txt a/b/ + * + *

Because a/1.txt is the only file in the a/ directory and a/b/ is a directory inside the + * /a/ directory. + */ + Page blobs = + storage.list( + bucketName, + Storage.BlobListOption.prefix(directoryPrefix), + Storage.BlobListOption.currentDirectory()); + + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getName()); + } + } +} +// [END storage_list_files_with_prefix] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedObjects.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedObjects.java new file mode 100644 index 000000000000..eb3b5d158bd4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedObjects.java @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_soft_deleted_objects] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListSoftDeletedObjects { + public static void listSoftDeletedObjects(String projectId, String bucketName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Page blobs = storage.list(bucketName, Storage.BlobListOption.softDeleted(true)); + + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getName()); + } + } +} +// [END storage_list_soft_deleted_objects] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedVersionsOfObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedVersionsOfObject.java new file mode 100644 index 000000000000..87532cdc8e1e --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ListSoftDeletedVersionsOfObject.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_list_soft_deleted_object_versions] +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class ListSoftDeletedVersionsOfObject { + + public static void listSoftDeletedVersionOfObject( + String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Page blobs = + storage.list( + bucketName, + Storage.BlobListOption.softDeleted(true), + // See https://cloud.google.com/storage/docs/json_api/v1/objects/list#matchGlob + Storage.BlobListOption.matchGlob(objectName)); + + for (Blob blob : blobs.iterateAll()) { + System.out.println(blob.getName()); + } + } +} +// [END storage_list_soft_deleted_object_versions] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/MakeObjectPublic.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/MakeObjectPublic.java new file mode 100644 index 000000000000..245c030a6b1b --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/MakeObjectPublic.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_make_public] +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class MakeObjectPublic { + public static void makeObjectPublic(String projectId, String bucketName, String objectName) { + // String projectId = "your-project-id"; + // String bucketName = "your-bucket-name"; + // String objectName = "your-object-name"; + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + storage.createAcl(blobId, Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER)); + + System.out.println( + "Object " + objectName + " in bucket " + bucketName + " was made publicly readable"); + } +} +// [END storage_make_public] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenMultipleObjectsRangedRead.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenMultipleObjectsRangedRead.java new file mode 100644 index 000000000000..33dfb916b3df --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenMultipleObjectsRangedRead.java @@ -0,0 +1,87 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_open_multiple_objects_ranged_read] + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.RangeSpec; +import com.google.cloud.storage.ReadAsFutureBytes; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class OpenMultipleObjectsRangedRead { + public static void multipleObjectsSingleRangedRead( + String bucketName, List objectNames, long startOffset, int length) throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS objects to read + // List objectName = Arrays.asList("object-1", "object-2", "object-3"); + + RangeSpec singleRange = RangeSpec.of(startOffset, length); + ReadAsFutureBytes rangeConfig = + ReadProjectionConfigs.asFutureBytes().withRangeSpec(singleRange); + + try (Storage storage = StorageOptions.grpc().build().getService()) { + List> futuresToWaitOn = new ArrayList<>(); + + System.out.printf( + "Initiating single ranged read [%d, %d] on %d objects...%n", + startOffset, startOffset + length - 1, objectNames.size()); + + for (String objectName : objectNames) { + BlobId blobId = BlobId.of(bucketName, objectName); + ApiFuture futureReadSession = storage.blobReadSession(blobId); + + ApiFuture readAndCloseFuture = + ApiFutures.transformAsync( + futureReadSession, + (BlobReadSession session) -> { + ApiFuture readFuture = session.readAs(rangeConfig); + + readFuture.addListener( + () -> { + try { + session.close(); + } catch (java.io.IOException e) { + System.err.println( + "WARN: Background error while closing session: " + e.getMessage()); + } + }, + MoreExecutors.directExecutor()); + return readFuture; + }, + MoreExecutors.directExecutor()); + + futuresToWaitOn.add(readAndCloseFuture); + } + ApiFutures.allAsList(futuresToWaitOn).get(30, TimeUnit.SECONDS); + + System.out.println("All concurrent single-ranged read operations are complete."); + } + } +} +// [END storage_open_multiple_objects_ranged_read] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectMultipleRangedRead.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectMultipleRangedRead.java new file mode 100644 index 000000000000..f3e823341875 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectMultipleRangedRead.java @@ -0,0 +1,83 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_open_object_multiple_ranged_read] + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.RangeSpec; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class OpenObjectMultipleRangedRead { + public static void openObjectMultipleRangedRead( + String bucketName, String objectName, long offset1, int length1, long offset2, int length2) + throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The beginning of the range 1 + // long offset = 0 + + // The maximum number of bytes to read in range 1 + // int length = 16; + + // The beginning of the range 2 + // long offset = 16 + + // The maximum number of bytes to read in range 2 + // int length = 32; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + ApiFuture futureBlobReadSession = storage.blobReadSession(blobId); + RangeSpec rangeSpec1 = RangeSpec.of(offset1, length1); + RangeSpec rangeSpec2 = RangeSpec.of(offset2, length2); + + try (BlobReadSession blobReadSession = futureBlobReadSession.get(10, TimeUnit.SECONDS)) { + ApiFuture future1 = + blobReadSession.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(rangeSpec1)); + ApiFuture future2 = + blobReadSession.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(rangeSpec2)); + + List allBytes = ApiFutures.allAsList(ImmutableList.of(future1, future2)).get(); + + byte[] bytes1 = allBytes.get(0); + byte[] bytes2 = allBytes.get(1); + + System.out.println( + "Successfully read " + + bytes1.length + + " bytes from range 1 and " + + bytes2.length + + " bytes from range 2."); + } + } + } +} + +// [END storage_open_object_multiple_ranged_read] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectReadFullObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectReadFullObject.java new file mode 100644 index 000000000000..a1a22ffd2698 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectReadFullObject.java @@ -0,0 +1,69 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_open_object_read_full_object] + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.ReadAsChannel; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.nio.ByteBuffer; +import java.nio.channels.ScatteringByteChannel; +import java.util.Locale; +import java.util.concurrent.TimeUnit; + +public class OpenObjectReadFullObject { + public static void openObjectReadFullObject(String bucketName, String objectName) + throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object to read + // String objectName = "your-object-name"; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + ApiFuture futureBlobReadSession = storage.blobReadSession(blobId); + + try (BlobReadSession blobReadSession = futureBlobReadSession.get(10, TimeUnit.SECONDS)) { + + ReadAsChannel readAsChannelConfig = ReadProjectionConfigs.asChannel(); + try (ScatteringByteChannel channel = blobReadSession.readAs(readAsChannelConfig)) { + long totalBytesRead = 0; + ByteBuffer buffer = ByteBuffer.allocate(64 * 1024); + int bytesRead; + + while ((bytesRead = channel.read(buffer)) != -1) { + totalBytesRead += bytesRead; + buffer.clear(); + } + + System.out.printf( + Locale.US, + "Successfully read a total of %d bytes from object %s%n", + totalBytesRead, + blobId.toGsUtilUri()); + } + } + } + } +} +// [END storage_open_object_read_full_object] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectSingleRangedRead.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectSingleRangedRead.java new file mode 100644 index 000000000000..55446ea266e3 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/OpenObjectSingleRangedRead.java @@ -0,0 +1,69 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_open_object_single_ranged_read] + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.RangeSpec; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.concurrent.TimeUnit; + +public class OpenObjectSingleRangedRead { + public static void openObjectSingleRangedRead( + String bucketName, String objectName, long offset, int length) throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The beginning of the range + // long offset = 0 + + // The maximum number of bytes to read from the object. + // int length = 64; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + ApiFuture futureBlobReadSession = storage.blobReadSession(blobId); + + try (BlobReadSession blobReadSession = futureBlobReadSession.get(10, TimeUnit.SECONDS)) { + // Define the range of bytes to read. + RangeSpec rangeSpec = RangeSpec.of(offset, length); + ApiFuture future = + blobReadSession.readAs(ReadProjectionConfigs.asFutureBytes().withRangeSpec(rangeSpec)); + + // Wait for the read to complete. + byte[] bytes = future.get(); + + System.out.println( + "Successfully read " + + bytes.length + + " bytes from object " + + objectName + + " in bucket " + + bucketName); + } + } + } +} +// [END storage_open_object_single_ranged_read] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/PauseAndResumeAppendableObjectUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PauseAndResumeAppendableObjectUpload.java new file mode 100644 index 000000000000..c364ee093763 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PauseAndResumeAppendableObjectUpload.java @@ -0,0 +1,107 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_pause_and_resume_appendable_object_upload] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobAppendableUpload; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageChannelUtils; +import com.google.cloud.storage.StorageOptions; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Paths; +import java.util.Locale; + +public class PauseAndResumeAppendableObjectUpload { + public static void pauseAndResumeAppendableObjectUpload( + String bucketName, String objectName, String filePath) throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to the file to upload + // String filePath = "path/to/your/file"; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + // --- Step 1: Initial string write (PAUSE) --- + // Default close action will be CLOSE_WITHOUT_FINALIZING + BlobAppendableUploadConfig initialConfig = BlobAppendableUploadConfig.of(); + BlobAppendableUpload initialUploadSession = + storage.blobAppendableUpload(blobInfo, initialConfig); + + try (AppendableUploadWriteableByteChannel channel = initialUploadSession.open()) { + String initialData = "Initial data segment.\n"; + ByteBuffer buffer = ByteBuffer.wrap(initialData.getBytes(StandardCharsets.UTF_8)); + long totalBytesWritten = StorageChannelUtils.blockingEmptyTo(buffer, channel); + channel.flush(); + + System.out.printf( + Locale.US, "Wrote %d bytes (initial string) in first segment.\n", totalBytesWritten); + } catch (IOException ex) { + throw new IOException("Failed initial upload to object " + blobId.toGsUtilUri(), ex); + } + + Blob existingBlob = storage.get(blobId); + long currentObjectSize = existingBlob.getSize(); + System.out.printf( + Locale.US, + "Initial upload paused. Currently uploaded size: %d bytes\n", + currentObjectSize); + + // --- Step 2: Resume upload with file content and finalize --- + // Use FINALIZE_WHEN_CLOSING to ensure the object is finalized on channel closure. + BlobAppendableUploadConfig resumeConfig = + BlobAppendableUploadConfig.of().withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); + BlobAppendableUpload resumeUploadSession = + storage.blobAppendableUpload(existingBlob.toBuilder().build(), resumeConfig); + + try (FileChannel fileChannel = FileChannel.open(Paths.get(filePath)); + AppendableUploadWriteableByteChannel channel = resumeUploadSession.open()) { + long bytesToAppend = fileChannel.size(); + System.out.printf( + Locale.US, + "Appending the entire file (%d bytes) after the initial string.\n", + bytesToAppend); + + ByteStreams.copy(fileChannel, channel); + } + + BlobInfo result = storage.get(blobId); + System.out.printf( + Locale.US, + "\nObject %s successfully resumed and finalized. Total size: %d bytes\n", + result.getBlobId().toGsUtilUriWithGeneration(), + result.getSize()); + } + } +} +// [END storage_pause_and_resume_appendable_object_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAcl.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAcl.java new file mode 100644 index 000000000000..34089a56f9fb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAcl.java @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_print_file_acl] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.List; + +public class PrintBlobAcl { + + public static void printBlobAcl(String bucketName, String blobName) { + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the blob/file that you wish to view Acls of + // String blobName = "your-blob-name"; + + Storage storage = StorageOptions.newBuilder().build().getService(); + Blob blob = storage.get(BlobId.of(bucketName, blobName)); + List blobAcls = blob.getAcl(); + + for (Acl acl : blobAcls) { + + // This will give you the role. + // See https://cloud.google.com/storage/docs/access-control/lists#permissions + String role = acl.getRole().name(); + + // This will give you the Entity type (i.e. User, Group, Project etc.) + // See https://cloud.google.com/storage/docs/access-control/lists#scopes + String entityType = acl.getEntity().getType().name(); + + System.out.printf("%s: %s %n", role, entityType); + } + } +} +// [END storage_print_file_acl] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAclForUser.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAclForUser.java new file mode 100644 index 000000000000..60d7bc22e9cc --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/PrintBlobAclForUser.java @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_print_file_acl_for_user] + +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class PrintBlobAclForUser { + + public static void printBlobAclForUser(String bucketName, String blobName, String userEmail) + throws Exception { + + // The ID to give your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of the blob/file that you wish to view Acls of + // String blobName = "your-blob-name"; + + // The email of the user whose acl is being retrieved. + // String userEmail = "someuser@domain.com" + + try (Storage storage = StorageOptions.newBuilder().build().getService()) { + Blob blob = storage.get(BlobId.of(bucketName, blobName)); + Acl blobAcl = blob.getAcl(new User(userEmail)); + if (blobAcl != null) { + String userRole = blobAcl.getRole().name(); + System.out.println("User " + userEmail + " has role " + userRole); + } else { + System.out.println("User " + userEmail + " not found"); + } + } + } +} +// [END storage_print_file_acl_for_user] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReadAppendableObjectTail.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReadAppendableObjectTail.java new file mode 100644 index 000000000000..98cf31e96739 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReadAppendableObjectTail.java @@ -0,0 +1,135 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_read_appendable_object_tail] + +import com.google.api.core.ApiFuture; +import com.google.cloud.storage.BlobAppendableUpload; +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobReadSession; +import com.google.cloud.storage.FlushPolicy; +import com.google.cloud.storage.RangeSpec; +import com.google.cloud.storage.ReadProjectionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageChannelUtils; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; + +public class ReadAppendableObjectTail { + public static void readAppendableObjectTail(String bucketName, String objectName) + throws Exception { + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + try (Storage storage = StorageOptions.grpc().build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo info = BlobInfo.newBuilder(blobId).build(); + int totalToWrite = 64 * 1000; + // Define our flush policy to flush small increments + // This is useful for demonstration purposes, but you should use more appropriate values for + // your workload. + int flushSize = totalToWrite / 8; + FlushPolicy.MinFlushSizeFlushPolicy flushPolicy = + FlushPolicy.minFlushSize(flushSize).withMaxPendingBytes(flushSize); + BlobAppendableUploadConfig appendableUploadConfig = + BlobAppendableUploadConfig.of().withFlushPolicy(flushPolicy); + BlobAppendableUpload upload = + storage.blobAppendableUpload( + info, appendableUploadConfig, Storage.BlobWriteOption.doesNotExist()); + // Create the object, we'll takeover to write for our example. + upload.open().closeWithoutFinalizing(); + BlobInfo gen1 = upload.getResult().get(); + BlobAppendableUpload takeover = storage.blobAppendableUpload(gen1, appendableUploadConfig); + + try (AppendableUploadWriteableByteChannel channel = takeover.open()) { + // Start a background thread to write some data on a periodic basis + // In reality, you're application would probably be doing thing in another scope + Thread writeThread = startWriteThread(totalToWrite, channel, flushPolicy); + try (BlobReadSession readSession = + storage.blobReadSession(gen1.getBlobId()).get(10, TimeUnit.SECONDS)) { + int zeroCnt = 0; + long read = 0; + while (read < totalToWrite) { + if (zeroCnt >= 30 && !channel.isOpen()) { + System.out.println("breaking"); + break; + } + ApiFuture future = + readSession.readAs( + ReadProjectionConfigs.asFutureBytes() + .withRangeSpec(RangeSpec.of(read, flushPolicy.getMinFlushSize()))); + byte[] bytes = future.get(20, TimeUnit.SECONDS); + + read += bytes.length; + long defaultSleep = 1_500L; + if (bytes.length == 0) { + zeroCnt++; + long millis = defaultSleep * zeroCnt; + System.out.println("millis = " + millis); + Thread.sleep(millis); + } else { + zeroCnt = 0; + System.out.println("bytes.length = " + bytes.length + " read = " + read); + Thread.sleep(defaultSleep); + } + } + assert read == totalToWrite : "not enough bytes"; + } + writeThread.join(); + } + } + } + + private static Thread startWriteThread( + int totalToWrite, + AppendableUploadWriteableByteChannel channel, + FlushPolicy.MinFlushSizeFlushPolicy flushPolicy) { + Thread writeThread = + new Thread( + () -> { + try { + for (long written = 0; written < totalToWrite; ) { + byte alphaOffset = (byte) (written % 0x1a); + + ByteBuffer buf = ByteBuffer.wrap(new byte[] {(byte) (0x41 + alphaOffset)}); + int w = StorageChannelUtils.blockingEmptyTo(buf, channel); + written += w; + if (written % flushPolicy.getMinFlushSize() == 0) { + channel.flush(); + Thread.sleep(40); + } + } + channel.closeWithoutFinalizing(); + + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + }); + writeThread.start(); + return writeThread; + } +} +// [END storage_read_appendable_object_tail] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseEventBasedHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseEventBasedHold.java new file mode 100644 index 000000000000..6ce3e1337907 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseEventBasedHold.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_release_event_based_hold] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class ReleaseEventBasedHold { + public static void releaseEventBasedHold(String projectId, String bucketName, String objectName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + blob.toBuilder().setEventBasedHold(false).build().update(precondition); + + System.out.println("Event-based hold was released for " + objectName); + } +} +// [END storage_release_event_based_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseTemporaryHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseTemporaryHold.java new file mode 100644 index 000000000000..eae54db3680c --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/ReleaseTemporaryHold.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_release_temporary_hold] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class ReleaseTemporaryHold { + public static void releaseTemporaryHold(String projectId, String bucketName, String objectName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + blob.toBuilder().setTemporaryHold(false).build().update(precondition); + + System.out.println("Temporary hold was released for " + objectName); + } +} +// [END storage_release_temporary_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/RemoveBlobOwner.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RemoveBlobOwner.java new file mode 100644 index 000000000000..ecbb0e41dfb9 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RemoveBlobOwner.java @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_remove_file_owner] + +import com.google.cloud.storage.Acl.User; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class RemoveBlobOwner { + + public static void removeBlobOwner( + String projectId, String bucketName, String userEmail, String blobName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // Email of the user you wish to remove as a file owner + // String userEmail = "someuser@domain.com" + + // The name of the blob/file that you wish to modify permissions on + // String blobName = "your-blob-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Blob blob = storage.get(BlobId.of(bucketName, blobName)); + User ownerToRemove = new User(userEmail); + + boolean success = blob.deleteAcl(ownerToRemove); + if (success) { + System.out.println( + "Removed user " + + userEmail + + " as an owner on file " + + blobName + + " in bucket " + + bucketName); + } else { + System.out.println("User " + userEmail + " was not found"); + } + } +} +// [END storage_remove_file_owner] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/RestoreSoftDeletedObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RestoreSoftDeletedObject.java new file mode 100644 index 000000000000..86f09a39ecc0 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RestoreSoftDeletedObject.java @@ -0,0 +1,43 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_restore_object] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class RestoreSoftDeletedObject { + public static void restoreSoftDeletedObject( + String projectId, String bucketName, String objectName, long generation) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The name of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Blob blob = storage.restore(BlobId.of(bucketName, objectName, generation)); + + System.out.println("Restored previously soft-deleted object " + blob.getName()); + } +} +// [END storage_restore_object] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/RotateObjectEncryptionKey.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RotateObjectEncryptionKey.java new file mode 100644 index 000000000000..3fbd8c5e56cb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/RotateObjectEncryptionKey.java @@ -0,0 +1,78 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_rotate_encryption_key] +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class RotateObjectEncryptionKey { + public static void rotateObjectEncryptionKey( + String projectId, + String bucketName, + String objectName, + String oldEncryptionKey, + String newEncryptionKey) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The Base64 encoded AES-256 encryption key originally used to encrypt the object. See the + // documentation + // on Customer-Supplied Encryption keys for more info: + // https://cloud.google.com/storage/docs/encryption/using-customer-supplied-keys + // String oldEncryptionKey = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g=" + + // The new encryption key to use + // String newEncryptionKey = "0mMWhFvQOdS4AmxRpo8SJxXn5MjFhbz7DkKBUdUIef8=" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " wasn't found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobSourceOption precondition = + Storage.BlobSourceOption.generationMatch(blob.getGeneration()); + + // You can't change an object's encryption key directly, the only way is to overwrite the object + Storage.CopyRequest request = + Storage.CopyRequest.newBuilder() + .setSource(blobId) + .setSourceOptions( + Storage.BlobSourceOption.decryptionKey(oldEncryptionKey), precondition) + .setTarget(blobId, Storage.BlobTargetOption.encryptionKey(newEncryptionKey)) + .build(); + storage.copy(request); + + System.out.println( + "Rotated encryption key for object " + objectName + "in bucket " + bucketName); + } +} +// [END storage_rotate_encryption_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetEventBasedHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetEventBasedHold.java new file mode 100644 index 000000000000..c0f8f088d4eb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetEventBasedHold.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_set_event_based_hold] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class SetEventBasedHold { + public static void setEventBasedHold(String projectId, String bucketName, String objectName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + blob.toBuilder().setEventBasedHold(true).build().update(precondition); + + System.out.println("Event-based hold was set for " + objectName); + } +} +// [END storage_set_event_based_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectContexts.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectContexts.java new file mode 100644 index 000000000000..169399a710db --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectContexts.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_set_object_contexts] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.collect.Maps; +import java.util.Map; + +public class SetObjectContexts { + public static void setObjectContexts( + String projectId, String bucketName, String objectName, String key, String value) + throws Exception { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The context key-value you want to add + // String key = "your-context-key"; + // String value = "your-context-value"; + + try (Storage storage = + StorageOptions.newBuilder().setProjectId(projectId).build().getService()) { + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Recommended: Set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to update returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + // This section demonstrates how to upsert, delete all, and delete a specific context. + + // To upsert a context (if the key already exists, its value is replaced; + // otherwise, a new key-value pair is added): + ObjectCustomContextPayload payload = + ObjectCustomContextPayload.newBuilder().setValue(value).build(); + Map custom = Maps.newHashMap(); + custom.put(key, payload); + ObjectContexts contexts = ObjectContexts.newBuilder().setCustom(custom).build(); + + /* + * To delete all existing contexts: + * ObjectContexts contexts = ObjectContexts.newBuilder().setCustom(null).build(); + */ + + /* + * To delete a specific key from the context: + * Map custom = Maps.newHashMap(); + * custom.put(key, null); + * ObjectContexts contexts = ObjectContexts.newBuilder().setCustom(custom).build(); + */ + BlobInfo pendingUpdate = blob.toBuilder().setContexts(contexts).build(); + storage.update(pendingUpdate, precondition); + + System.out.println( + "Updated custom contexts for object " + objectName + " in bucket " + bucketName); + } + } +} +// [END storage_set_object_contexts] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectMetadata.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectMetadata.java new file mode 100644 index 000000000000..ae05011e8ea5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectMetadata.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_set_metadata] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.util.HashMap; +import java.util.Map; + +public class SetObjectMetadata { + public static void setObjectMetadata(String projectId, String bucketName, String objectName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + Map newMetadata = new HashMap<>(); + newMetadata.put("keyToAddOrUpdate", "value"); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + // Does an upsert operation, if the key already exists it's replaced by the new value, otherwise + // it's added. + BlobInfo pendingUpdate = blob.toBuilder().setMetadata(newMetadata).build(); + storage.update(pendingUpdate, precondition); + + System.out.println( + "Updated custom metadata for object " + objectName + " in bucket " + bucketName); + } +} +// [END storage_set_metadata] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectRetentionPolicy.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectRetentionPolicy.java new file mode 100644 index 000000000000..6903445af0ec --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetObjectRetentionPolicy.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_set_object_retention_policy] + +import static java.time.OffsetDateTime.now; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo.Retention; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class SetObjectRetentionPolicy { + public static void setObjectRetentionPolicy( + String projectId, String bucketName, String objectName) throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket that has object retention enabled + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + Blob updated = + blob.toBuilder() + .setRetention( + Retention.newBuilder() + .setMode(Retention.Mode.UNLOCKED) + .setRetainUntilTime(now().plusDays(10)) + .build()) + .build() + .update(); + + System.out.println("Retention policy for object " + objectName + " was set to:"); + System.out.println(updated.getRetention().toString()); + + // To modify an existing policy on an Unlocked object, pass in the override parameter + blob.toBuilder() + .setRetention( + updated.getRetention().toBuilder().setRetainUntilTime(now().plusDays(9)).build()) + .build() + .update(Storage.BlobTargetOption.overrideUnlockedRetention(true)); + + System.out.println("Retention policy for object " + objectName + " was updated to:"); + System.out.println(storage.get(blobId).getRetention().toString()); + } +} + +// [END storage_set_object_retention_policy] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetTemporaryHold.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetTemporaryHold.java new file mode 100644 index 000000000000..c22de2f39091 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/SetTemporaryHold.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_set_temporary_hold] + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; + +public class SetTemporaryHold { + public static void setTemporaryHold(String projectId, String bucketName, String objectName) + throws StorageException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + Blob blob = storage.get(blobId); + if (blob == null) { + System.out.println("The object " + objectName + " was not found in " + bucketName); + return; + } + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload returns a 412 error if + // the object's generation number does not match your precondition. + Storage.BlobTargetOption precondition = Storage.BlobTargetOption.generationMatch(); + + blob.toBuilder().setTemporaryHold(true).build().update(precondition); + + System.out.println("Temporary hold was set for " + objectName); + } +} +// [END storage_set_temporary_hold] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectDownload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectDownload.java new file mode 100644 index 000000000000..4dd38bafd174 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectDownload.java @@ -0,0 +1,68 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_stream_file_download] + +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.io.ByteStreams; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +public class StreamObjectDownload { + + public static void streamObjectDownload( + String projectId, String bucketName, String objectName, String targetFile) + throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to the file to download the object to + // String targetFile = "path/to/your/file"; + Path targetFilePath = Paths.get(targetFile); + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + try (ReadChannel reader = storage.reader(BlobId.of(bucketName, objectName)); + FileChannel targetFileChannel = + FileChannel.open(targetFilePath, StandardOpenOption.WRITE)) { + + ByteStreams.copy(reader, targetFileChannel); + + System.out.println( + "Downloaded object " + + objectName + + " from bucket " + + bucketName + + " to " + + targetFile + + " using a ReadChannel."); + } + } +} +// [END storage_stream_file_download] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectUpload.java new file mode 100644 index 000000000000..b5b82d82c59c --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/StreamObjectUpload.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_stream_file_upload] + +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +public class StreamObjectUpload { + + public static void streamObjectUpload( + String projectId, String bucketName, String objectName, String contents) throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The string of contents you wish to upload + // String contents = "Hello world!"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + byte[] content = contents.getBytes(StandardCharsets.UTF_8); + try (WriteChannel writer = storage.writer(blobInfo)) { + writer.write(ByteBuffer.wrap(content)); + System.out.println( + "Wrote to " + objectName + " in bucket " + bucketName + " using a WriteChannel."); + } + } +} + +// [END storage_stream_file_upload] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadEncryptedObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadEncryptedObject.java new file mode 100644 index 000000000000..e51067c2643a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadEncryptedObject.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_upload_encrypted_file] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; + +public class UploadEncryptedObject { + public static void uploadEncryptedObject( + String projectId, String bucketName, String objectName, String filePath, String encryptionKey) + throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to your file to upload + // String filePath = "path/to/your/file" + + // The key to encrypt the object with + // String encryptionKey = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g="; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + if (storage.get(bucketName, objectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobTargetOption.generationMatch( + storage.get(bucketName, objectName).getGeneration()); + } + + storage.create( + blobInfo, + Files.readAllBytes(Paths.get(filePath)), + Storage.BlobTargetOption.encryptionKey(encryptionKey), + precondition); + + System.out.println( + "File " + + filePath + + " uploaded to bucket " + + bucketName + + " as " + + objectName + + " with supplied encryption key"); + } +} +// [END storage_upload_encrypted_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadKmsEncryptedObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadKmsEncryptedObject.java new file mode 100644 index 000000000000..3876971ca808 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadKmsEncryptedObject.java @@ -0,0 +1,77 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_upload_with_kms_key] + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + +public class UploadKmsEncryptedObject { + public static void uploadKmsEncryptedObject( + String projectId, String bucketName, String objectName, String kmsKeyName) { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The name of the KMS key to encrypt with + // String kmsKeyName = "projects/my-project/locations/us/keyRings/my_key_ring/cryptoKeys/my_key" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + byte[] data = "Hello, World!".getBytes(UTF_8); + + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + if (storage.get(bucketName, objectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobTargetOption.generationMatch( + storage.get(bucketName, objectName).getGeneration()); + } + + storage.create(blobInfo, data, Storage.BlobTargetOption.kmsKeyName(kmsKeyName), precondition); + + System.out.println( + "Uploaded object " + + objectName + + " in bucket " + + bucketName + + " encrypted with " + + kmsKeyName); + } +} +// [END storage_upload_with_kms_key] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java new file mode 100644 index 000000000000..6fa796d62f36 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObject.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_upload_file] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.file.Paths; + +public class UploadObject { + public static void uploadObject( + String projectId, String bucketName, String objectName, String filePath) throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The path to your file to upload + // String filePath = "path/to/your/file" + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobWriteOption precondition; + if (storage.get(bucketName, objectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobWriteOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobWriteOption.generationMatch( + storage.get(bucketName, objectName).getGeneration()); + } + storage.createFrom(blobInfo, Paths.get(filePath), precondition); + + System.out.println( + "File " + filePath + " uploaded to bucket " + bucketName + " as " + objectName); + } +} +// [END storage_upload_file] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObjectFromMemory.java b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObjectFromMemory.java new file mode 100644 index 000000000000..98d0020200c4 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/object/UploadObjectFromMemory.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +// [START storage_file_upload_from_memory] + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class UploadObjectFromMemory { + public static void uploadObjectFromMemory( + String projectId, String bucketName, String objectName, String contents) throws IOException { + // The ID of your GCP project + // String projectId = "your-project-id"; + + // The ID of your GCS bucket + // String bucketName = "your-unique-bucket-name"; + + // The ID of your GCS object + // String objectName = "your-object-name"; + + // The string of contents you wish to upload + // String contents = "Hello world!"; + + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + byte[] content = contents.getBytes(StandardCharsets.UTF_8); + + // Optional: set a generation-match precondition to enable automatic retries, avoid potential + // race + // conditions and data corruptions. The request returns a 412 error if the + // preconditions are not met. + Storage.BlobTargetOption precondition; + if (storage.get(bucketName, objectName) == null) { + // For a target object that does not yet exist, set the DoesNotExist precondition. + // This will cause the request to fail if the object is created before the request runs. + precondition = Storage.BlobTargetOption.doesNotExist(); + } else { + // If the destination already exists in your bucket, instead set a generation-match + // precondition. This will cause the request to fail if the existing object's generation + // changes before the request runs. + precondition = + Storage.BlobTargetOption.generationMatch( + storage.get(bucketName, objectName).getGeneration()); + } + storage.create(blobInfo, content, precondition); + + System.out.println( + "Object " + + objectName + + " uploaded to bucket " + + bucketName + + " with contents " + + contents); + } +} +// [END storage_file_upload_from_memory] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowDivideAndConquerDownload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowDivideAndConquerDownload.java new file mode 100644 index 000000000000..7b41ce2fb5c5 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowDivideAndConquerDownload.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_download_chunks_concurrently] +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.transfermanager.DownloadResult; +import com.google.cloud.storage.transfermanager.ParallelDownloadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import java.nio.file.Path; +import java.util.List; + +class AllowDivideAndConquerDownload { + + public static void divideAndConquerDownloadAllowed( + List blobs, String bucketName, Path destinationDirectory) { + TransferManager transferManager = + TransferManagerConfig.newBuilder() + .setAllowDivideAndConquerDownload(true) + .build() + .getService(); + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(destinationDirectory) + .build(); + List results = + transferManager.downloadBlobs(blobs, parallelDownloadConfig).getDownloadResults(); + + for (DownloadResult result : results) { + System.out.println( + "Download of " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } +} +// [END storage_transfer_manager_download_chunks_concurrently] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowParallelCompositeUpload.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowParallelCompositeUpload.java new file mode 100644 index 000000000000..1aab8f4b677c --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/AllowParallelCompositeUpload.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_upload_chunks_concurrently] +import com.google.cloud.storage.transfermanager.ParallelUploadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import com.google.cloud.storage.transfermanager.UploadResult; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +class AllowParallelCompositeUpload { + + public static void parallelCompositeUploadAllowed(String bucketName, List files) + throws IOException { + TransferManager transferManager = + TransferManagerConfig.newBuilder() + .setAllowParallelCompositeUpload(true) + .build() + .getService(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + List results = + transferManager.uploadFiles(files, parallelUploadConfig).getUploadResults(); + for (UploadResult result : results) { + System.out.println( + "Upload for " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } +} +// [END storage_transfer_manager_upload_chunks_concurrently] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadBucket.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadBucket.java new file mode 100644 index 000000000000..b77a6fee670a --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadBucket.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_download_bucket] +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.transfermanager.DownloadResult; +import com.google.cloud.storage.transfermanager.ParallelDownloadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Collectors; + +class DownloadBucket { + + public static void downloadBucketContents( + String projectId, String bucketName, Path destinationDirectory) { + Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); + List blobs = + storage + .list(bucketName) + .streamAll() + .map(blob -> blob.asBlobInfo()) + .collect(Collectors.toList()); + TransferManager transferManager = TransferManagerConfig.newBuilder().build().getService(); + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(destinationDirectory) + .build(); + + List results = + transferManager.downloadBlobs(blobs, parallelDownloadConfig).getDownloadResults(); + + for (DownloadResult result : results) { + System.out.println( + "Download of " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } +} +// [END storage_transfer_manager_download_bucket] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadMany.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadMany.java new file mode 100644 index 000000000000..a79e18cdcffb --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/DownloadMany.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_download_many] +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.transfermanager.DownloadResult; +import com.google.cloud.storage.transfermanager.ParallelDownloadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import java.nio.file.Path; +import java.util.List; + +class DownloadMany { + + public static void downloadManyBlobs( + String bucketName, List blobs, Path destinationDirectory) throws Exception { + + try (TransferManager transferManager = + TransferManagerConfig.newBuilder().build().getService()) { + ParallelDownloadConfig parallelDownloadConfig = + ParallelDownloadConfig.newBuilder() + .setBucketName(bucketName) + .setDownloadDirectory(destinationDirectory) + .build(); + + List results = + transferManager.downloadBlobs(blobs, parallelDownloadConfig).getDownloadResults(); + + for (DownloadResult result : results) { + System.out.println( + "Download of " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } + } +} +// [END storage_transfer_manager_download_many] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadDirectory.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadDirectory.java new file mode 100644 index 000000000000..0a14bec049c6 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadDirectory.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_upload_directory] +import com.google.cloud.storage.transfermanager.ParallelUploadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import com.google.cloud.storage.transfermanager.UploadResult; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Stream; + +class UploadDirectory { + + public static void uploadDirectoryContents(String bucketName, Path sourceDirectory) + throws IOException { + TransferManager transferManager = TransferManagerConfig.newBuilder().build().getService(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + + // Create a list to store the file paths + List filePaths = new ArrayList<>(); + // Get all files in the directory + // try-with-resource to ensure pathStream is closed + try (Stream pathStream = Files.walk(sourceDirectory)) { + pathStream.filter(Files::isRegularFile).forEach(filePaths::add); + } + List results = + transferManager.uploadFiles(filePaths, parallelUploadConfig).getUploadResults(); + for (UploadResult result : results) { + System.out.println( + "Upload for " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } +} +// [END storage_transfer_manager_upload_directory] diff --git a/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadMany.java b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadMany.java new file mode 100644 index 000000000000..b477f87bdd32 --- /dev/null +++ b/java-storage/samples/snippets/src/main/java/com/example/storage/transfermanager/UploadMany.java @@ -0,0 +1,45 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.transfermanager; + +// [START storage_transfer_manager_upload_many] +import com.google.cloud.storage.transfermanager.ParallelUploadConfig; +import com.google.cloud.storage.transfermanager.TransferManager; +import com.google.cloud.storage.transfermanager.TransferManagerConfig; +import com.google.cloud.storage.transfermanager.UploadResult; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +class UploadMany { + + public static void uploadManyFiles(String bucketName, List files) throws IOException { + TransferManager transferManager = TransferManagerConfig.newBuilder().build().getService(); + ParallelUploadConfig parallelUploadConfig = + ParallelUploadConfig.newBuilder().setBucketName(bucketName).build(); + List results = + transferManager.uploadFiles(files, parallelUploadConfig).getUploadResults(); + for (UploadResult result : results) { + System.out.println( + "Upload for " + + result.getInput().getName() + + " completed with status " + + result.getStatus()); + } + } +} +// [END storage_transfer_manager_upload_many] diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ConfigureRetriesTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ConfigureRetriesTest.java new file mode 100644 index 000000000000..8b40189b8e79 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ConfigureRetriesTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public final class ConfigureRetriesTest { + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + private String bucketName; + private Storage storage; + private String blobName; + + private Blob blob; + + @Before + public void setUp() { + blobName = "blob"; + bucketName = RemoteStorageHelper.generateBucketName(); + storage = StorageOptions.getDefaultInstance().getService(); + storage.create(BucketInfo.of(bucketName)); + blob = storage.create(BlobInfo.newBuilder(bucketName, blobName).build()); + } + + @After + public void tearDown() throws Exception { + try (Storage ignore = storage) { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testConfigureRetries() { + ConfigureRetries.deleteBlob(bucketName, blobName); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Deletion"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("successfully"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).doesNotContain("unsuccessfully"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/Env.java b/java-storage/samples/snippets/src/test/java/com/example/storage/Env.java new file mode 100644 index 000000000000..91ab9d725c54 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/Env.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import com.google.cloud.storage.Acl.User; + +public final class Env { + + public static final String IT_SERVICE_ACCOUNT_EMAIL = System.getenv("IT_SERVICE_ACCOUNT_EMAIL"); + public static final User IT_SERVICE_ACCOUNT_USER = new User(IT_SERVICE_ACCOUNT_EMAIL); + public static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + public static final String GOOGLE_CLOUD_PROJECT_NUMBER = + System.getenv("GOOGLE_CLOUD_PROJECT_NUMBER"); + public static final String JOB_TYPE = System.getenv("JOB_TYPE"); + + private Env() {} +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ITBucketSnippets.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ITBucketSnippets.java new file mode 100644 index 000000000000..6d649ef96562 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ITBucketSnippets.java @@ -0,0 +1,715 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.example.storage.bucket.AddBucketIamConditionalBinding; +import com.example.storage.bucket.AddBucketIamMember; +import com.example.storage.bucket.AddBucketLabel; +import com.example.storage.bucket.ChangeDefaultStorageClass; +import com.example.storage.bucket.ConfigureBucketCors; +import com.example.storage.bucket.CreateBucket; +import com.example.storage.bucket.CreateBucketWithObjectRetention; +import com.example.storage.bucket.CreateBucketWithStorageClassAndLocation; +import com.example.storage.bucket.CreateBucketWithTurboReplication; +import com.example.storage.bucket.DeleteBucket; +import com.example.storage.bucket.DisableBucketVersioning; +import com.example.storage.bucket.DisableDefaultEventBasedHold; +import com.example.storage.bucket.DisableLifecycleManagement; +import com.example.storage.bucket.DisableRequesterPays; +import com.example.storage.bucket.DisableSoftDelete; +import com.example.storage.bucket.DisableUniformBucketLevelAccess; +import com.example.storage.bucket.EnableBucketVersioning; +import com.example.storage.bucket.EnableDefaultEventBasedHold; +import com.example.storage.bucket.EnableLifecycleManagement; +import com.example.storage.bucket.EnableRequesterPays; +import com.example.storage.bucket.EnableUniformBucketLevelAccess; +import com.example.storage.bucket.GetBucketMetadata; +import com.example.storage.bucket.GetBucketRpo; +import com.example.storage.bucket.GetDefaultEventBasedHold; +import com.example.storage.bucket.GetPublicAccessPrevention; +import com.example.storage.bucket.GetRetentionPolicy; +import com.example.storage.bucket.GetUniformBucketLevelAccess; +import com.example.storage.bucket.ListBucketIamMembers; +import com.example.storage.bucket.ListBuckets; +import com.example.storage.bucket.LockRetentionPolicy; +import com.example.storage.bucket.MakeBucketPublic; +import com.example.storage.bucket.RemoveBucketCors; +import com.example.storage.bucket.RemoveBucketDefaultKmsKey; +import com.example.storage.bucket.RemoveBucketIamConditionalBinding; +import com.example.storage.bucket.RemoveBucketIamMember; +import com.example.storage.bucket.RemoveBucketLabel; +import com.example.storage.bucket.RemoveRetentionPolicy; +import com.example.storage.bucket.SetAsyncTurboRpo; +import com.example.storage.bucket.SetBucketDefaultKmsKey; +import com.example.storage.bucket.SetBucketWebsiteInfo; +import com.example.storage.bucket.SetClientEndpoint; +import com.example.storage.bucket.SetDefaultRpo; +import com.example.storage.bucket.SetPublicAccessPreventionEnforced; +import com.example.storage.bucket.SetPublicAccessPreventionInherited; +import com.example.storage.bucket.SetRetentionPolicy; +import com.example.storage.bucket.SetSoftDeletePolicy; +import com.example.storage.object.DownloadRequesterPaysObject; +import com.example.storage.object.ReleaseEventBasedHold; +import com.example.storage.object.ReleaseTemporaryHold; +import com.example.storage.object.SetEventBasedHold; +import com.example.storage.object.SetTemporaryHold; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.Identity; +import com.google.cloud.ServiceOptions; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.PublicAccessPrevention; +import com.google.cloud.storage.Cors; +import com.google.cloud.storage.HttpMethod; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageRoles; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Map; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; +import org.threeten.bp.Duration; + +public class ITBucketSnippets { + + private static final Logger log = Logger.getLogger(ITBucketSnippets.class.getName()); + private static final String BUCKET = RemoteStorageHelper.generateBucketName(); + private static final String PROJECT_ID = Env.GOOGLE_CLOUD_PROJECT; + private static final String KMS_KEY_NAME = + "projects/cloud-java-ci-sample/locations/us/keyRings/" + + "gcs_test_kms_key_ring/cryptoKeys/gcs_kms_key_one"; + private static final RetrySettings RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofSeconds(2)) + .setRetryDelayMultiplier(1.75) + .setTotalTimeout(Duration.ofSeconds(90)) + .setMaxRetryDelay(Duration.ofSeconds(10)) + .build(); + + private static Storage storage; + + @Rule public final StdOutCaptureRule stdOutCaptureRule = new StdOutCaptureRule(); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + @Rule public Timeout globalTimeout = Timeout.seconds(300); + + @BeforeClass + public static void beforeClass() { + RemoteStorageHelper helper = RemoteStorageHelper.create(); + storage = + helper.getOptions().toBuilder() + .setRetrySettings( + helper.getOptions().getRetrySettings().toBuilder() + .setRetryDelayMultiplier(3.0) + .build()) + .build() + .getService(); + storage.create(BucketInfo.of(BUCKET)); + } + + @AfterClass + public static void afterClass() throws Exception { + try (Storage ignore = storage) { + BucketCleaner.doCleanup(BUCKET, storage); + } + } + + @After + public void after() throws Exception { + // This avoids 429 errors + Thread.sleep(3000); + } + + @Test + public void testAddBucketLabel() { + int oldSize = storage.get(BUCKET).getLabels().size(); + AddBucketLabel.addBucketLabel(PROJECT_ID, BUCKET, "key", "value"); + assertEquals(oldSize + 1, storage.get(BUCKET).getLabels().size()); + } + + @Test + public void testChangeDefaultStorageClass() throws Throwable { + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals("STANDARD", storage.get(BUCKET).getStorageClass().name())); + ChangeDefaultStorageClass.changeDefaultStorageClass(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals("COLDLINE", storage.get(BUCKET).getStorageClass().name())); + } + + @Test + public void testCreateBucket() { + String newBucket = RemoteStorageHelper.generateBucketName(); + CreateBucket.createBucket(PROJECT_ID, newBucket); + try { + Bucket remoteBucket = storage.get(newBucket); + assertNotNull(remoteBucket); + } finally { + storage.delete(newBucket); + } + } + + @Test + public void testCreateBucketWithStorageClassAndLocation() { + String newBucket = RemoteStorageHelper.generateBucketName(); + CreateBucketWithStorageClassAndLocation.createBucketWithStorageClassAndLocation( + PROJECT_ID, newBucket); + try { + Bucket remoteBucket = storage.get(newBucket); + assertNotNull(remoteBucket); + assertEquals("COLDLINE", remoteBucket.getStorageClass().name()); + assertEquals("ASIA", remoteBucket.getLocation()); + } finally { + storage.delete(newBucket); + } + } + + @Test + public void testDeleteBucket() { + String newBucket = RemoteStorageHelper.generateBucketName(); + storage.create(BucketInfo.newBuilder(newBucket).build()); + assertNotNull(storage.get(newBucket)); + try { + DeleteBucket.deleteBucket(PROJECT_ID, newBucket); + assertNull(storage.get(newBucket)); + } finally { + storage.delete(newBucket); + } + } + + @Test + public void testGetBucketMetadata() { + Bucket bucket = + storage.get(BUCKET, Storage.BucketGetOption.fields(Storage.BucketField.values())); + bucket = + bucket.toBuilder() + .setLabels(ImmutableMap.of("k", "v")) + .setLifecycleRules( + ImmutableList.of( + new BucketInfo.LifecycleRule( + BucketInfo.LifecycleRule.LifecycleAction.newDeleteAction(), + BucketInfo.LifecycleRule.LifecycleCondition.newBuilder() + .setAge(5) + .build()))) + .build() + .update(); + + GetBucketMetadata.getBucketMetadata(PROJECT_ID, BUCKET); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains(("BucketName: " + bucket.getName()))); + assertTrue( + snippetOutput.contains(("DefaultEventBasedHold: " + bucket.getDefaultEventBasedHold()))); + assertTrue(snippetOutput.contains(("DefaultKmsKeyName: " + bucket.getDefaultKmsKeyName()))); + assertTrue(snippetOutput.contains(("Id: " + bucket.getGeneratedId()))); + assertTrue(snippetOutput.contains(("IndexPage: " + bucket.getIndexPage()))); + assertTrue(snippetOutput.contains(("Location: " + bucket.getLocation()))); + assertTrue(snippetOutput.contains(("LocationType: " + bucket.getLocationType()))); + assertTrue(snippetOutput.contains(("Metageneration: " + bucket.getMetageneration()))); + assertTrue(snippetOutput.contains(("NotFoundPage: " + bucket.getNotFoundPage()))); + assertTrue( + snippetOutput.contains(("RetentionEffectiveTime: " + bucket.getRetentionEffectiveTime()))); + assertTrue(snippetOutput.contains(("RetentionPeriod: " + bucket.getRetentionPeriod()))); + assertTrue( + snippetOutput.contains(("RetentionPolicyIsLocked: " + bucket.retentionPolicyIsLocked()))); + assertTrue(snippetOutput.contains(("RequesterPays: " + bucket.requesterPays()))); + assertTrue(snippetOutput.contains(("SelfLink: " + bucket.getSelfLink()))); + assertTrue(snippetOutput.contains(("StorageClass: " + bucket.getStorageClass().name()))); + assertTrue(snippetOutput.contains(("TimeCreated: " + bucket.getCreateTime()))); + assertTrue(snippetOutput.contains(("VersioningEnabled: " + bucket.versioningEnabled()))); + assertTrue(snippetOutput.contains("Labels:")); + assertTrue(snippetOutput.contains("k=v")); + assertTrue(snippetOutput.contains("Lifecycle Rules:")); + assertTrue(snippetOutput.contains("ObjectRetention: " + bucket.getObjectRetention())); + } + + @Test + public void testListBuckets() { + ListBuckets.listBuckets(PROJECT_ID); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains(BUCKET)); + } + + @Test + public void testRemoveBucketLabel() { + storage.get(BUCKET).toBuilder().setLabels(ImmutableMap.of("k", "v")).build().update(); + int oldSize = storage.get(BUCKET).getLabels().size(); + RemoveBucketLabel.removeBucketLabel(PROJECT_ID, BUCKET, "k"); + Map labels = storage.get(BUCKET).getLabels(); + if (labels != null) { + assertEquals(oldSize - 1, labels.size()); + } + } + + @Test + public void testEnableLifecycleManagement() throws Throwable { + EnableLifecycleManagement.enableLifecycleManagement(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertEquals(1, storage.get(BUCKET).getLifecycleRules().size())); + } + + @Test + public void testDisableLifecycleManagement() throws Throwable { + storage.get(BUCKET).toBuilder() + .setLifecycleRules( + ImmutableList.of( + new BucketInfo.LifecycleRule( + BucketInfo.LifecycleRule.LifecycleAction.newDeleteAction(), + BucketInfo.LifecycleRule.LifecycleCondition.newBuilder().setAge(5).build()))) + .build() + .update(); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertEquals(1, storage.get(BUCKET).getLifecycleRules().size())); + DisableLifecycleManagement.disableLifecycleManagement(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertEquals(0, storage.get(BUCKET).getLifecycleRules().size())); + } + + @Test + public void testGetPublicAccessPrevention() throws Throwable { + try { + // By default a bucket PAP state is INHERITED and we are changing the state to validate + // non-default state. + storage.get(BUCKET).toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.ENFORCED) + .build()) + .build() + .update(); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertThat(storage.get(BUCKET).getIamConfiguration().getPublicAccessPrevention()) + .isEqualTo(PublicAccessPrevention.ENFORCED)); + GetPublicAccessPrevention.getPublicAccessPrevention(PROJECT_ID, BUCKET); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("enforced")); + } finally { + // No matter what happens make sure test set bucket back to INHERITED + storage.get(BUCKET).toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.INHERITED) + .build()) + .build() + .update(); + } + } + + @Test + public void testSetPublicAccessPreventionEnforced() throws Throwable { + try { + SetPublicAccessPreventionEnforced.setPublicAccessPreventionEnforced(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertEquals( + storage.get(BUCKET).getIamConfiguration().getPublicAccessPrevention(), + BucketInfo.PublicAccessPrevention.ENFORCED)); + } finally { + // No matter what happens make sure test set bucket back to INHERITED + storage.get(BUCKET).toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.INHERITED) + .build()) + .build() + .update(); + } + } + + @Test + public void testSetPublicAccessPreventionInherited() throws Throwable { + try { + storage.get(BUCKET).toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.ENFORCED) + .build()) + .build() + .update(); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertThat(storage.get(BUCKET).getIamConfiguration().getPublicAccessPrevention()) + .isEqualTo(PublicAccessPrevention.ENFORCED)); + + SetPublicAccessPreventionInherited.setPublicAccessPreventionInherited(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertEquals( + storage.get(BUCKET).getIamConfiguration().getPublicAccessPrevention(), + BucketInfo.PublicAccessPrevention.INHERITED)); + } finally { + // No matter what happens make sure test set bucket back to INHERITED + storage.get(BUCKET).toBuilder() + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setPublicAccessPrevention(BucketInfo.PublicAccessPrevention.INHERITED) + .build()) + .build() + .update(); + } + } + + @Test + public void testAddListRemoveBucketIamMembers() throws Throwable { + storage.update( + BucketInfo.newBuilder(BUCKET) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(true) + .build()) + .build()); + // todo: + int originalSize = storage.getIamPolicy(BUCKET).getBindingsList().size(); + AddBucketIamMember.addBucketIamMember(PROJECT_ID, BUCKET); + assertEquals(originalSize + 1, storage.getIamPolicy(BUCKET).getBindingsList().size()); + ListBucketIamMembers.listBucketIamMembers(PROJECT_ID, BUCKET); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("example@google.com")); + RemoveBucketIamMember.removeBucketIamMember(PROJECT_ID, BUCKET); + assertEquals(originalSize, storage.getIamPolicy(BUCKET).getBindingsList().size()); + AddBucketIamConditionalBinding.addBucketIamConditionalBinding(PROJECT_ID, BUCKET); + assertEquals(originalSize + 1, storage.getIamPolicy(BUCKET).getBindingsList().size()); + RemoveBucketIamConditionalBinding.removeBucketIamConditionalBinding(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals(originalSize, storage.getIamPolicy(BUCKET).getBindingsList().size())); + storage.update( + BucketInfo.newBuilder(BUCKET) + .setIamConfiguration( + BucketInfo.IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(false) + .build()) + .build()); + } + + @Ignore("TODO(b/456381873): Test fails in CI due to project's public access prevention policy.") + @Test + public void testMakeBucketPublic() throws Throwable { + MakeBucketPublic.makeBucketPublic(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertTrue( + storage + .getIamPolicy(BUCKET) + .getBindings() + .get(StorageRoles.objectViewer()) + .contains(Identity.allUsers()))); + } + + @Test + public void deleteBucketDefaultKmsKey() throws Throwable { + storage.get(BUCKET).toBuilder() + .setDefaultKmsKeyName( + "projects/cloud-java-ci-sample/locations/us/keyRings/" + + "gcs_test_kms_key_ring/cryptoKeys/gcs_kms_key_one") + .build() + .update(); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertNotNull(storage.get(BUCKET).getDefaultKmsKeyName())); + RemoveBucketDefaultKmsKey.removeBucketDefaultKmsKey(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertNull(storage.get(BUCKET).getDefaultKmsKeyName())); + } + + @Test + public void testEnableDisableVersioning() throws Throwable { + EnableBucketVersioning.enableBucketVersioning(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertTrue(storage.get(BUCKET).versioningEnabled())); + DisableBucketVersioning.disableBucketVersioning(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertFalse(storage.get(BUCKET).versioningEnabled())); + } + + @Test + public void testSetBucketWebsiteInfo() throws Throwable { + SetBucketWebsiteInfo.setBucketWesbiteInfo(PROJECT_ID, BUCKET, "index.html", "404.html"); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> { + Bucket bucket = storage.get(BUCKET); + assertEquals("index.html", bucket.getIndexPage()); + assertEquals("404.html", bucket.getNotFoundPage()); + }); + } + + @Test + public void testSetClientEndpoint() { + SetClientEndpoint.setClientEndpoint(PROJECT_ID, "https://storage.googleapis.com"); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("https://storage.googleapis.com")); + } + + @Test + public void testConfigureBucketCors() throws Throwable { + ConfigureBucketCors.configureBucketCors( + PROJECT_ID, BUCKET, "http://example.appspot.com", "Content-Type", 3600); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> { + Cors cors = storage.get(BUCKET).getCors().get(0); + assertTrue(cors.getOrigins().get(0).toString().contains("example.appspot.com")); + assertTrue(cors.getResponseHeaders().contains("Content-Type")); + assertEquals(3600, cors.getMaxAgeSeconds().intValue()); + assertTrue(cors.getMethods().get(0).toString().equalsIgnoreCase("GET")); + }); + } + + @Test + public void testRemoveBucketCors() throws Throwable { + storage.get(BUCKET).toBuilder() + .setCors( + ImmutableList.of( + Cors.newBuilder() + .setOrigins(ImmutableList.of(Cors.Origin.of("http://example.appspot.com"))) + .setMethods(ImmutableList.of(HttpMethod.GET)) + .setResponseHeaders(ImmutableList.of("Content-Type")) + .setMaxAgeSeconds(3600) + .build())) + .build() + .update(); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> { + Cors cors = storage.get(BUCKET).getCors().get(0); + assertNotNull(cors); + assertTrue(cors.getOrigins().get(0).toString().contains("example.appspot.com")); + assertTrue(cors.getResponseHeaders().contains("Content-Type")); + assertEquals(3600, cors.getMaxAgeSeconds().intValue()); + assertTrue(cors.getMethods().get(0).toString().equalsIgnoreCase("GET")); + }); + RemoveBucketCors.removeBucketCors(PROJECT_ID, BUCKET); + TestUtils.retryAssert(RETRY_SETTINGS, () -> assertNull(storage.get(BUCKET).getCors())); + } + + @Test + public void testRequesterPays() throws Throwable { + EnableRequesterPays.enableRequesterPays(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> + assertTrue( + storage + .get(BUCKET, Storage.BucketGetOption.userProject(PROJECT_ID)) + .requesterPays())); + String projectId = ServiceOptions.getDefaultProjectId(); + String blobName = "test-create-empty-blob-requester-pays"; + byte[] content = {0xD, 0xE, 0xA, 0xD}; + Blob remoteBlob = + storage.create( + BlobInfo.newBuilder(BUCKET, blobName).build(), + content, + BlobTargetOption.userProject(projectId)); + assertNotNull(remoteBlob); + DownloadRequesterPaysObject.downloadRequesterPaysObject( + projectId, BUCKET, blobName, Paths.get(blobName)); + byte[] readBytes = Files.readAllBytes(Paths.get(blobName)); + assertArrayEquals(content, readBytes); + DisableRequesterPays.disableRequesterPays(PROJECT_ID, BUCKET); + TestUtils.retryAssert(RETRY_SETTINGS, () -> assertFalse(storage.get(BUCKET).requesterPays())); + } + + @Test + public void testRpo() throws Throwable { + String rpoBucket = RemoteStorageHelper.generateBucketName(); + try { + CreateBucketWithTurboReplication.createBucketWithTurboReplication( + PROJECT_ID, rpoBucket, "NAM4"); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals("ASYNC_TURBO", storage.get(rpoBucket).getRpo().toString())); + + SetDefaultRpo.setDefaultRpo(PROJECT_ID, rpoBucket); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals("DEFAULT", storage.get(rpoBucket).getRpo().toString())); + + SetAsyncTurboRpo.setAsyncTurboRpo(PROJECT_ID, rpoBucket); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals("ASYNC_TURBO", storage.get(rpoBucket).getRpo().toString())); + + GetBucketRpo.getBucketRpo(PROJECT_ID, rpoBucket); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("ASYNC_TURBO")); + } finally { + storage.delete(rpoBucket); + } + } + + @Test + public void testDefaultKMSKey() throws Throwable { + SetBucketDefaultKmsKey.setBucketDefaultKmsKey(PROJECT_ID, BUCKET, KMS_KEY_NAME); + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> assertEquals(KMS_KEY_NAME, storage.get(BUCKET).getDefaultKmsKeyName())); + + RemoveBucketDefaultKmsKey.removeBucketDefaultKmsKey(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertNull(storage.get(BUCKET).getDefaultKmsKeyName())); + } + + @Test + public void testBucketRetention() throws Throwable { + Long retention = 5L; + SetRetentionPolicy.setRetentionPolicy(PROJECT_ID, BUCKET, retention); + Bucket bucket = storage.get(BUCKET); + assertEquals(retention, bucket.getRetentionPeriod()); + assertNotNull(bucket.getRetentionEffectiveTime()); + + GetRetentionPolicy.getRetentionPolicy(PROJECT_ID, BUCKET); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("5")); + + EnableDefaultEventBasedHold.enableDefaultEventBasedHold(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertTrue(storage.get(BUCKET).getDefaultEventBasedHold())); + + GetDefaultEventBasedHold.getDefaultEventBasedHold(PROJECT_ID, BUCKET); + snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("enabled")); + + byte[] content = {0xD, 0xE, 0xA, 0xD}; + String blobName = "test-create-empty-blob-retention-policy"; + bucket.create(blobName, content); + SetEventBasedHold.setEventBasedHold(PROJECT_ID, BUCKET, blobName); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertTrue(storage.get(BUCKET, blobName).getEventBasedHold())); + ReleaseEventBasedHold.releaseEventBasedHold(PROJECT_ID, BUCKET, blobName); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertFalse(storage.get(BUCKET, blobName).getEventBasedHold())); + RemoveRetentionPolicy.removeRetentionPolicy(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertNull(storage.get(BUCKET).getRetentionPeriod())); + DisableDefaultEventBasedHold.disableDefaultEventBasedHold(PROJECT_ID, BUCKET); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertFalse(storage.get(BUCKET).getDefaultEventBasedHold())); + SetTemporaryHold.setTemporaryHold(PROJECT_ID, BUCKET, blobName); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertTrue(storage.get(BUCKET, blobName).getTemporaryHold())); + ReleaseTemporaryHold.releaseTemporaryHold(PROJECT_ID, BUCKET, blobName); + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertFalse(storage.get(BUCKET, blobName).getTemporaryHold())); + } + + @Test + public void testLockRetentionPolicy() { + String tempBucket = RemoteStorageHelper.generateBucketName(); + Bucket bucket = storage.create(BucketInfo.of(tempBucket)); + assertNotNull(bucket); + try { + SetRetentionPolicy.setRetentionPolicy(PROJECT_ID, tempBucket, 5L); + assertEquals(5L, (long) storage.get(tempBucket).getRetentionPeriod()); + LockRetentionPolicy.lockRetentionPolicy(PROJECT_ID, tempBucket); + assertTrue(storage.get(tempBucket).retentionPolicyIsLocked()); + } finally { + storage.delete(tempBucket); + } + } + + @Test + public void testUniformBucketLevelAccess() { + EnableUniformBucketLevelAccess.enableUniformBucketLevelAccess(PROJECT_ID, BUCKET); + Bucket bucket = storage.get(BUCKET); + assertTrue(bucket.getIamConfiguration().isUniformBucketLevelAccessEnabled()); + assertNotNull(bucket.getIamConfiguration().getUniformBucketLevelAccessLockedTime()); + + GetUniformBucketLevelAccess.getUniformBucketLevelAccess(PROJECT_ID, BUCKET); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("enabled")); + + DisableUniformBucketLevelAccess.disableUniformBucketLevelAccess(PROJECT_ID, BUCKET); + assertFalse(storage.get(BUCKET).getIamConfiguration().isUniformBucketLevelAccessEnabled()); + } + + @Test + public void testCreateBucketWithObjectRetention() { + String tempBucket = RemoteStorageHelper.generateBucketName(); + + try { + CreateBucketWithObjectRetention.createBucketWithObjectRetention(PROJECT_ID, tempBucket); + assertNotNull(storage.get(tempBucket).getObjectRetention()); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("Enabled")); + } finally { + storage.delete(tempBucket); + } + } + + @Test + public void testSetSoftDeletePolicy() { + String tempBucket = RemoteStorageHelper.generateBucketName(); + Bucket bucket = storage.create(BucketInfo.of(tempBucket)); + try { + assertNotEquals( + java.time.Duration.ofDays(10), bucket.getSoftDeletePolicy().getRetentionDuration()); + SetSoftDeletePolicy.setSoftDeletePolicy(PROJECT_ID, tempBucket); + assertEquals( + java.time.Duration.ofDays(10), + storage.get(tempBucket).getSoftDeletePolicy().getRetentionDuration()); + } finally { + storage.delete(tempBucket); + } + } + + @Test + public void testDisableSoftDelete() { + String tempBucket = RemoteStorageHelper.generateBucketName(); + Bucket bucket = storage.create(BucketInfo.of(tempBucket)); + try { + assertNotEquals( + java.time.Duration.ofDays(0), bucket.getSoftDeletePolicy().getRetentionDuration()); + DisableSoftDelete.disableSoftDelete(PROJECT_ID, tempBucket); + assertEquals( + java.time.Duration.ofSeconds(0), + storage.get(tempBucket).getSoftDeletePolicy().getRetentionDuration()); + } finally { + storage.delete(tempBucket); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ITCleanupOldBucketsTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ITCleanupOldBucketsTest.java new file mode 100644 index 000000000000..ba21e58ea81f --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ITCleanupOldBucketsTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import com.google.api.gax.paging.Page; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketListOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.common.collect.ImmutableList; +import com.google.storage.control.v2.StorageControlClient; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public final class ITCleanupOldBucketsTest { + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public StorageControlClient ctrl; + @Inject public BucketInfo bucket; + + @Test + public void cleanupOldBuckets() { + Page page = + storage.list(BucketListOption.fields(BucketField.NAME, BucketField.TIME_CREATED)); + + String bucketNamePrefix = bucket.getName().substring(0, UUID.randomUUID().toString().length()); + + OffsetDateTime now = Instant.now().atOffset(ZoneOffset.UTC); + OffsetDateTime twentyFourHoursAgo = now.minusHours(24); + + ImmutableList bucketsToClean = + page.streamAll() + .map(Bucket::asBucketInfo) + .filter( + bucket -> { + OffsetDateTime ctime = bucket.getCreateTimeOffsetDateTime(); + String name = bucket.getName(); + return ctime.isBefore(twentyFourHoursAgo) + && (name.startsWith("gcloud") || name.startsWith(bucketNamePrefix)); + }) + .map(BucketInfo::getName) + .collect(ImmutableList.toImmutableList()); + + for (String bucketName : bucketsToClean) { + BucketCleaner.doCleanup(bucketName, storage, ctrl); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ITHmacSnippets.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ITHmacSnippets.java new file mode 100644 index 000000000000..632142a618a4 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ITHmacSnippets.java @@ -0,0 +1,140 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.example.storage.hmac.ActivateHmacKey; +import com.example.storage.hmac.CreateHmacKey; +import com.example.storage.hmac.DeactivateHmacKey; +import com.example.storage.hmac.DeleteHmacKey; +import com.example.storage.hmac.GetHmacKey; +import com.example.storage.hmac.ListHmacKeys; +import com.google.api.gax.paging.Page; +import com.google.cloud.ServiceOptions; +import com.google.cloud.storage.HmacKey; +import com.google.cloud.storage.HmacKey.HmacKeyMetadata; +import com.google.cloud.storage.HmacKey.HmacKeyState; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class ITHmacSnippets extends TestBase { + private static final String PROJECT_ID = ServiceOptions.getDefaultProjectId(); + private static final String HMAC_KEY_TEST_SERVICE_ACCOUNT = + PROJECT_ID + "@" + PROJECT_ID + ".iam.gserviceaccount.com"; + + @Before + public void before() { + // Skip running Hmac snippet tests in CI + // All of our samples CI uses a single service account. Each service account can only have 5 + // HMAC keys. + // We have 6 test scenarios defined. + // If more than one build is running at the same time they will fight with each other's limit + // These samples have not materially changed since 2022-03 (as of 2025-05). + // Additionally, we have more robust integration tests for HMAC operations in the library + // itself. + assumeFalse( + "skipping hmac snippet tests in CI due to racy interactions", + "samples".equals(Env.JOB_TYPE)); + cleanUpHmacKeys(ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + } + + private void cleanUpHmacKeys(ServiceAccount serviceAccount) { + Page metadatas = + storage.listHmacKeys(Storage.ListHmacKeysOption.serviceAccount(serviceAccount)); + for (HmacKey.HmacKeyMetadata hmacKeyMetadata : metadatas.iterateAll()) { + if (hmacKeyMetadata.getState() == HmacKeyState.ACTIVE) { + hmacKeyMetadata = storage.updateHmacKeyState(hmacKeyMetadata, HmacKeyState.INACTIVE); + } + if (hmacKeyMetadata.getState() == HmacKeyState.INACTIVE) { + storage.deleteHmacKey(hmacKeyMetadata); + } + } + } + + @Test + public void testCreateHmacKey() throws Exception { + CreateHmacKey.createHmacKey(HMAC_KEY_TEST_SERVICE_ACCOUNT, PROJECT_ID); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + String accessId = snippetOutput.split("Access ID: ")[1].split("\n")[0]; + Thread.sleep(5000); + assertNotNull(storage.getHmacKey(accessId)); + } + + @Test + public void testGetHmacKey() throws Exception { + HmacKey hmacKey = storage.createHmacKey(ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + Thread.sleep(5000); + GetHmacKey.getHmacKey(hmacKey.getMetadata().getAccessId(), PROJECT_ID); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + Assert.assertTrue(snippetOutput.contains(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + } + + @Test + public void testActivateHmacKey() throws Exception { + HmacKey hmacKey = storage.createHmacKey(ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + HmacKeyMetadata metadata = + storage.updateHmacKeyState(hmacKey.getMetadata(), HmacKeyState.INACTIVE); + + ActivateHmacKey.activateHmacKey(metadata.getAccessId(), PROJECT_ID); + Thread.sleep(5000); + assertEquals(HmacKeyState.ACTIVE, storage.getHmacKey(metadata.getAccessId()).getState()); + } + + @Test + public void testDeactivateHmacKey() throws Exception { + HmacKey hmacKey = storage.createHmacKey(ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + Thread.sleep(5000); + DeactivateHmacKey.deactivateHmacKey(hmacKey.getMetadata().getAccessId(), PROJECT_ID); + assertEquals( + HmacKeyState.INACTIVE, storage.getHmacKey(hmacKey.getMetadata().getAccessId()).getState()); + } + + @Test + public void testDeleteHmacKey() { + HmacKey hmacKey = storage.createHmacKey(ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT)); + HmacKeyMetadata metadata = + storage.updateHmacKeyState(hmacKey.getMetadata(), HmacKeyState.INACTIVE); + + DeleteHmacKey.deleteHmacKey(metadata.getAccessId(), PROJECT_ID); + assertEquals(HmacKeyState.DELETED, storage.getHmacKey(metadata.getAccessId()).getState()); + } + + @Test + public void testListHmacKeys() { + // Create 2 HMAC keys + final HmacKey one = + storage.createHmacKey( + ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT), + Storage.CreateHmacKeyOption.projectId(PROJECT_ID)); + final HmacKey two = + storage.createHmacKey( + ServiceAccount.of(HMAC_KEY_TEST_SERVICE_ACCOUNT), + Storage.CreateHmacKeyOption.projectId(PROJECT_ID)); + + ListHmacKeys.listHmacKeys(PROJECT_ID); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains(one.getMetadata().getAccessId())); + assertTrue(snippetOutput.contains(two.getMetadata().getAccessId())); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ITObjectSnippets.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ITObjectSnippets.java new file mode 100644 index 000000000000..4789c02524cf --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ITObjectSnippets.java @@ -0,0 +1,712 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.example.storage.object.AtomicMoveObject; +import com.example.storage.object.BatchSetObjectMetadata; +import com.example.storage.object.ChangeObjectCsekToKms; +import com.example.storage.object.ChangeObjectStorageClass; +import com.example.storage.object.ComposeObject; +import com.example.storage.object.CopyDeleteObject; +import com.example.storage.object.CopyObject; +import com.example.storage.object.CopyOldVersionOfObject; +import com.example.storage.object.DeleteObject; +import com.example.storage.object.DeleteOldVersionOfObject; +import com.example.storage.object.DownloadEncryptedObject; +import com.example.storage.object.DownloadObject; +import com.example.storage.object.DownloadObjectIntoMemory; +import com.example.storage.object.DownloadPublicObject; +import com.example.storage.object.GenerateEncryptionKey; +import com.example.storage.object.GenerateV4GetObjectSignedUrl; +import com.example.storage.object.GenerateV4PutObjectSignedUrl; +import com.example.storage.object.GetObjectContexts; +import com.example.storage.object.GetObjectMetadata; +import com.example.storage.object.ListObjectContexts; +import com.example.storage.object.ListObjects; +import com.example.storage.object.ListObjectsWithOldVersions; +import com.example.storage.object.ListObjectsWithPrefix; +import com.example.storage.object.ListSoftDeletedObjects; +import com.example.storage.object.ListSoftDeletedVersionsOfObject; +import com.example.storage.object.MakeObjectPublic; +import com.example.storage.object.RestoreSoftDeletedObject; +import com.example.storage.object.RotateObjectEncryptionKey; +import com.example.storage.object.SetObjectContexts; +import com.example.storage.object.SetObjectMetadata; +import com.example.storage.object.SetObjectRetentionPolicy; +import com.example.storage.object.StreamObjectDownload; +import com.example.storage.object.StreamObjectUpload; +import com.example.storage.object.UploadEncryptedObject; +import com.example.storage.object.UploadKmsEncryptedObject; +import com.example.storage.object.UploadObject; +import com.example.storage.object.UploadObjectFromMemory; +import com.google.cloud.kms.v1.CryptoKey; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobInfo.ObjectContexts; +import com.google.cloud.storage.BlobInfo.ObjectCustomContextPayload; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.Storage.BlobWriteOption; +import com.google.cloud.storage.Storage.BucketTargetOption; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.TmpFile; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.it.TemporaryBucket; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.KmsFixture; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.google.common.io.BaseEncoding; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.util.Date; +import java.util.Map; +import java.util.Random; +import javax.net.ssl.HttpsURLConnection; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class ITObjectSnippets extends TestBase { + + private static final String STRING_CONTENT = "Hello, World!"; + private static final byte[] CONTENT = STRING_CONTENT.getBytes(UTF_8); + + @Rule public final TemporaryFolder tmpDir = new TemporaryFolder(); + + @Inject public KmsFixture kmsFixture; + + @Test + public void testChangeObjectStorageClass() { + String objectName = generator.randomObjectName(); + BlobInfo gen1 = storage.create(info(objectName), CONTENT, BlobTargetOption.doesNotExist()); + Assert.assertNotEquals(StorageClass.COLDLINE, gen1.getStorageClass()); + ChangeObjectStorageClass.changeObjectStorageClass( + GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName); + Blob gen2 = storage.get(bucket.getName(), objectName); + assertEquals(StorageClass.COLDLINE, gen2.getStorageClass()); + assertArrayEquals(CONTENT, gen2.getContent()); + } + + @Test + public void testCopyObject() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String newBucket = tmpBucket.getBucket().getName(); + + String objectName = generator.randomObjectName(); + storage.create(info(objectName)); + CopyObject.copyObject(GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName, newBucket); + assertNotNull(storage.get(newBucket, objectName)); + } + } + + @Test + public void testDeleteObject() { + String blob = generator.randomObjectName(); + storage.create(BlobInfo.newBuilder(BlobId.of(bucket.getName(), blob)).build()); + assertNotNull(storage.get(bucket.getName(), blob)); + DeleteObject.deleteObject(GOOGLE_CLOUD_PROJECT, bucket.getName(), blob); + assertNull(storage.get(bucket.getName(), blob)); + } + + @Test + public void testDownloadObject() throws Exception { + Path baseDir = tmpDir.getRoot().toPath(); + try (TmpFile file1 = DataGenerator.base64Characters().tempFile(baseDir, 13)) { + String objectName = generator.randomObjectName(); + storage.createFrom(info(objectName), file1.getPath(), BlobWriteOption.doesNotExist()); + DownloadObject.downloadObject( + GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName, file1.getPath().toString()); + byte[] expected = Files.readAllBytes(file1.getPath()); + byte[] actual = storage.readAllBytes(bucket.getName(), objectName); + assertArrayEquals(expected, actual); + } + } + + @Test + public void testDownloadObjectIntoMemory() throws IOException { + String objectName = generator.randomObjectName(); + storage.create(info(objectName), CONTENT, BlobTargetOption.doesNotExist()); + DownloadObjectIntoMemory.downloadObjectIntoMemory( + GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertThat(snippetOutput).contains("The contents of " + objectName); + } + + @Ignore("TODO(b/456381873): Test fails in CI due to project's public access prevention policy.") + @Test + public void testDownloadPublicObject() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(generator.randomBucketName()) + .setIamConfiguration( + IamConfiguration.newBuilder() + .setIsUniformBucketLevelAccessEnabled(false) + .build()) + .build()) + .setStorage(storage) + .build()) { + String bucketName = tmpBucket.getBucket().getName(); + + String publicBlob = generator.randomObjectName(); + BlobId publicBlobId = BlobId.of(bucketName, publicBlob); + Blob gen1 = + storage.create( + BlobInfo.newBuilder(publicBlobId).build(), CONTENT, BlobTargetOption.doesNotExist()); + storage.createAcl(gen1.getBlobId(), Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER)); + File tempFile = tmpDir.newFile("file.txt"); + DownloadPublicObject.downloadPublicObject(bucketName, publicBlob, tempFile.toPath()); + assertEquals("Hello, World!", new String(Files.readAllBytes(tempFile.toPath()))); + } + } + + @Test + public void testGetObjectMetadata() { + String blobName = generator.randomObjectName(); + BlobId blobId = BlobId.of(bucket.getName(), blobName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setMetadata(ImmutableMap.of("k", "v")).build(); + Blob remoteBlob = storage.create(blobInfo, CONTENT); + assertNotNull(remoteBlob); + GetObjectMetadata.getObjectMetadata(GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("Bucket: " + remoteBlob.getBucket())); + assertTrue(snippetOutput.contains("Bucket: " + remoteBlob.getBucket())); + assertTrue(snippetOutput.contains("CacheControl: " + remoteBlob.getCacheControl())); + assertTrue(snippetOutput.contains("ComponentCount: " + remoteBlob.getComponentCount())); + assertTrue(snippetOutput.contains("ContentDisposition: " + remoteBlob.getContentDisposition())); + assertTrue(snippetOutput.contains("ContentEncoding: " + remoteBlob.getContentEncoding())); + assertTrue(snippetOutput.contains("ContentLanguage: " + remoteBlob.getContentLanguage())); + assertTrue(snippetOutput.contains("ContentType: " + remoteBlob.getContentType())); + assertTrue(snippetOutput.contains("CustomTime: " + remoteBlob.getCustomTime())); + assertTrue(snippetOutput.contains("Crc32c: " + remoteBlob.getCrc32c())); + assertTrue(snippetOutput.contains("Crc32cHexString: " + remoteBlob.getCrc32cToHexString())); + assertTrue(snippetOutput.contains("ETag: " + remoteBlob.getEtag())); + assertTrue(snippetOutput.contains("Generation: " + remoteBlob.getGeneration())); + assertTrue(snippetOutput.contains("Id: " + remoteBlob.getBlobId())); + assertTrue(snippetOutput.contains("KmsKeyName: " + remoteBlob.getKmsKeyName())); + assertTrue(snippetOutput.contains("Md5Hash: " + remoteBlob.getMd5())); + assertTrue(snippetOutput.contains("Md5HexString: " + remoteBlob.getMd5ToHexString())); + assertTrue(snippetOutput.contains("MediaLink: " + remoteBlob.getMediaLink())); + assertTrue(snippetOutput.contains("Metageneration: " + remoteBlob.getMetageneration())); + assertTrue(snippetOutput.contains("Name: " + remoteBlob.getName())); + assertTrue(snippetOutput.contains("Size: " + remoteBlob.getSize())); + assertTrue(snippetOutput.contains("StorageClass: " + remoteBlob.getStorageClass())); + assertTrue(snippetOutput.contains("TimeCreated: " + new Date(remoteBlob.getCreateTime()))); + assertTrue( + snippetOutput.contains("Last Metadata Update: " + new Date(remoteBlob.getUpdateTime()))); + assertTrue(snippetOutput.contains("temporaryHold: disabled")); + assertTrue(snippetOutput.contains("eventBasedHold: disabled")); + assertTrue(snippetOutput.contains("User metadata:")); + assertTrue(snippetOutput.contains("k=v")); + assertTrue(snippetOutput.contains("Object Retention Policy: " + remoteBlob.getRetention())); + } + + @Test + public void testListObjects() { + String name1 = generator.randomObjectName(); + storage.create(info(name1), BlobTargetOption.doesNotExist()); + ListObjects.listObjects(GOOGLE_CLOUD_PROJECT, bucket.getName()); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains(name1)); + } + + @Test + public void testListObjectsWithPrefix() { + String prefix = generator.randomObjectName(); + storage.create(BlobInfo.newBuilder(bucket.getName(), prefix + "a/1.txt").build()); + storage.create(BlobInfo.newBuilder(bucket.getName(), prefix + "a/b/2.txt").build()); + storage.create(BlobInfo.newBuilder(bucket.getName(), prefix + "a/b/3.txt").build()); + ListObjectsWithPrefix.listObjectsWithPrefix( + GOOGLE_CLOUD_PROJECT, bucket.getName(), prefix + "a/"); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertTrue(snippetOutput.contains("a/1.txt")); + assertTrue(snippetOutput.contains("a/b/")); + assertFalse(snippetOutput.contains("a/b/2.txt")); + } + + @Test + public void testCopyDeleteObject() throws Exception { + String blob = generator.randomObjectName(); + String newBlob = generator.randomObjectName(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String newBucket = tmpBucket.getBucket().getName(); + BlobInfo gen1 = storage.create(BlobInfo.newBuilder(BlobId.of(newBucket, blob)).build()); + CopyDeleteObject.copyDeleteObject(GOOGLE_CLOUD_PROJECT, newBucket, blob, newBucket, newBlob); + assertNotNull(storage.get(newBucket, newBlob)); + assertNull(storage.get(bucket.getName(), blob)); + } + } + + @Test + public void testAtomicMoveObject() { + String blob1 = generator.randomObjectName(); + String blob2 = generator.randomObjectName(); + + String bucketName = bucket.getName(); + BlobInfo gen1 = storage.create(BlobInfo.newBuilder(BlobId.of(bucketName, blob1)).build()); + AtomicMoveObject.moveObject(GOOGLE_CLOUD_PROJECT, bucketName, blob1, blob2); + assertThat(storage.get(bucketName, blob1)).isNull(); + assertThat(storage.get(bucketName, blob2)).isNotNull(); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Moved object"); + } + + @Test + public void testSetObjectMetadata() { + String bucketName = bucket.getName(); + String name1 = generator.randomObjectName(); + BlobInfo b1Gen1 = storage.create(BlobInfo.newBuilder(bucketName, name1).build()); + + SetObjectMetadata.setObjectMetadata(GOOGLE_CLOUD_PROJECT, bucket.getName(), name1); + BlobInfo b1Gen2 = storage.get(bucketName, name1); + assertThat(b1Gen2).isNotNull(); + assertThat(b1Gen2.getMetadata()).containsAtLeast("keyToAddOrUpdate", "value"); + } + + @Test + public void testUploadObject() throws IOException { + Path baseDir = tmpDir.getRoot().toPath(); + try (TmpFile file1 = DataGenerator.base64Characters().tempFile(baseDir, 13)) { + String objectName = generator.randomObjectName(); + UploadObject.uploadObject( + GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName, file1.getPath().toString()); + byte[] expected = Files.readAllBytes(file1.getPath()); + byte[] actual = storage.readAllBytes(bucket.getName(), objectName); + assertArrayEquals(expected, actual); + } + } + + @Test + public void testUploadObjectFromMemory() throws IOException { + String objectName = "uploadobjectfrommemorytest"; + UploadObjectFromMemory.uploadObjectFromMemory( + GOOGLE_CLOUD_PROJECT, bucket.getName(), objectName, STRING_CONTENT); + final byte[] output = storage.get(bucket.getName(), objectName).getContent(); + assertEquals(STRING_CONTENT, new String(output, UTF_8)); + } + + @Test + public void testObjectCSEKOperations() throws IOException { + GenerateEncryptionKey.generateEncryptionKey(); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + String encryptionKey = snippetOutput.split(": ")[1].trim(); + + File tempFile = tmpDir.newFile("file.txt"); + File downloadFile = tmpDir.newFile("dlfile.txt"); + String encryptedBlob = "uploadencryptedobjecttest"; + Files.write(tempFile.toPath(), CONTENT); + + UploadEncryptedObject.uploadEncryptedObject( + GOOGLE_CLOUD_PROJECT, bucket.getName(), encryptedBlob, tempFile.getPath(), encryptionKey); + DownloadEncryptedObject.downloadEncryptedObject( + GOOGLE_CLOUD_PROJECT, + bucket.getName(), + encryptedBlob, + downloadFile.toPath(), + encryptionKey); + assertArrayEquals(CONTENT, Files.readAllBytes(downloadFile.toPath())); + + byte[] key = new byte[32]; + new Random().nextBytes(key); + String newEncryptionKey = BaseEncoding.base64().encode(key); + RotateObjectEncryptionKey.rotateObjectEncryptionKey( + GOOGLE_CLOUD_PROJECT, bucket.getName(), encryptedBlob, encryptionKey, newEncryptionKey); + File newDownloadFile = tmpDir.newFile("newdownloadfile.txt"); + DownloadEncryptedObject.downloadEncryptedObject( + GOOGLE_CLOUD_PROJECT, + bucket.getName(), + encryptedBlob, + newDownloadFile.toPath(), + newEncryptionKey); + assertArrayEquals(CONTENT, Files.readAllBytes(newDownloadFile.toPath())); + + assertNull(storage.get(bucket.getName(), encryptedBlob).getKmsKeyName()); + CryptoKey key1 = kmsFixture.getKey1(); + ChangeObjectCsekToKms.changeObjectFromCsekToKms( + GOOGLE_CLOUD_PROJECT, bucket.getName(), encryptedBlob, newEncryptionKey, key1.getName()); + assertTrue( + storage.get(bucket.getName(), encryptedBlob).getKmsKeyName().contains(key1.getName())); + } + + @Test + public void testObjectVersioningOperations() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(generator.randomBucketName()) + .setVersioningEnabled(true) + .build()) + .setStorage(storage) + .build()) { + String bucketName = tmpBucket.getBucket().getName(); + + String versionedBlob = generator.randomObjectName(); + final Blob originalBlob = + storage.create(BlobInfo.newBuilder(bucketName, versionedBlob).build(), CONTENT); + byte[] content2 = "Hello, World 2".getBytes(UTF_8); + storage.create(BlobInfo.newBuilder(bucketName, versionedBlob).build(), content2); + + ListObjectsWithOldVersions.listObjectsWithOldVersions(GOOGLE_CLOUD_PROJECT, bucketName); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + + snippetOutput = snippetOutput.replaceFirst(versionedBlob, ""); + assertTrue(snippetOutput.contains(versionedBlob)); + + String copiedblob = generator.randomObjectName(); + CopyOldVersionOfObject.copyOldVersionOfObject( + GOOGLE_CLOUD_PROJECT, + bucketName, + versionedBlob, + originalBlob.getGeneration(), + copiedblob); + assertArrayEquals(CONTENT, storage.get(bucketName, copiedblob).getContent()); + + DeleteOldVersionOfObject.deleteOldVersionOfObject( + GOOGLE_CLOUD_PROJECT, bucketName, versionedBlob, originalBlob.getGeneration()); + assertNull(storage.get(BlobId.of(bucketName, versionedBlob, originalBlob.getGeneration()))); + assertNotNull(storage.get(bucketName, versionedBlob)); + } + } + + @Test + public void testV4SignedURLs() throws IOException { + String tempObject = "test-upload-signed-url-object"; + GenerateV4PutObjectSignedUrl.generateV4PutObjectSignedUrl( + GOOGLE_CLOUD_PROJECT, bucket.getName(), tempObject); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + String url = snippetOutput.split("\n")[1]; + URL uploadUrl = new URL(url); + HttpsURLConnection connection = (HttpsURLConnection) uploadUrl.openConnection(); + connection.setRequestMethod("PUT"); + connection.setDoOutput(true); + connection.setRequestProperty("Content-Type", "application/octet-stream"); + try (OutputStream out = connection.getOutputStream()) { + out.write(CONTENT); + assertEquals(connection.getResponseCode(), 200); + } + GenerateV4GetObjectSignedUrl.generateV4GetObjectSignedUrl( + GOOGLE_CLOUD_PROJECT, bucket.getName(), tempObject); + snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + url = snippetOutput.split("\n")[5]; + URL downloadUrl = new URL(url); + connection = (HttpsURLConnection) downloadUrl.openConnection(); + byte[] readBytes = new byte[CONTENT.length]; + try (InputStream responseStream = connection.getInputStream()) { + assertEquals(CONTENT.length, responseStream.read(readBytes)); + assertArrayEquals(CONTENT, readBytes); + } + } + + @Ignore("TODO(b/456381873): Test fails in CI due to project's public access prevention policy.") + @Test + public void testMakeObjectPublic() { + String aclBlob = generator.randomObjectName(); + assertNull( + storage + .create(BlobInfo.newBuilder(bucket.getName(), aclBlob).build()) + .getAcl(Acl.User.ofAllUsers())); + MakeObjectPublic.makeObjectPublic(GOOGLE_CLOUD_PROJECT, bucket.getName(), aclBlob); + assertNotNull(storage.get(bucket.getName(), aclBlob).getAcl(Acl.User.ofAllUsers())); + } + + @Test + public void testComposeObject() { + String firstObject = generator.randomObjectName(); + String secondObject = generator.randomObjectName(); + String targetObject = generator.randomObjectName(); + storage.create( + BlobInfo.newBuilder(bucket.getName(), firstObject).build(), firstObject.getBytes(UTF_8)); + storage.create( + BlobInfo.newBuilder(bucket.getName(), secondObject).build(), secondObject.getBytes(UTF_8)); + + ComposeObject.composeObject( + bucket.getName(), firstObject, secondObject, targetObject, GOOGLE_CLOUD_PROJECT); + + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(firstObject); + assertThat(got).contains(secondObject); + assertThat(got).contains(targetObject); + } + + @Test + public void testStreamUploadDownload() throws Exception { + String blobName = generator.randomObjectName(); + StreamObjectUpload.streamObjectUpload( + GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName, "hello world"); + String got1 = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got1).contains(blobName); + assertThat(got1).contains("WriteChannel"); + + File file = tmpDir.newFile(); + StreamObjectDownload.streamObjectDownload( + GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName, file.getAbsolutePath()); + String got2 = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got2).contains(blobName); + assertThat(got2).contains("ReadChannel"); + } + + @Test + public void testUploadKMSEncryptedObject() { + String blobName = generator.randomObjectName(); + UploadKmsEncryptedObject.uploadKmsEncryptedObject( + GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName, kmsFixture.getKey1().getName()); + assertNotNull(storage.get(bucket.getName(), blobName)); + } + + @Test + public void testBatchSetObjectMetadata() { + String prefix = generator.randomObjectName(); + String name1 = prefix + "/1.txt"; + String name2 = prefix + "/2.txt"; + String bucketName = bucket.getName(); + BlobInfo b1Gen1 = storage.create(BlobInfo.newBuilder(bucketName, name1).build()); + BlobInfo b2Gen1 = storage.create(BlobInfo.newBuilder(bucketName, name2).build()); + + BatchSetObjectMetadata.batchSetObjectMetadata(GOOGLE_CLOUD_PROJECT, bucketName, prefix + "/"); + + BlobInfo b1Gen2 = storage.get(bucketName, name1); + BlobInfo b2Gen2 = storage.get(bucketName, name2); + assertThat(b1Gen2).isNotNull(); + assertThat(b2Gen2).isNotNull(); + assertThat(b1Gen2.getMetadata()).containsAtLeast("keyToAddOrUpdate", "value"); + assertThat(b2Gen2.getMetadata()).containsAtLeast("keyToAddOrUpdate", "value"); + } + + @Test + public void testSetObjectRetentionPolicy() { + BucketInfo bucketInfo = BucketInfo.newBuilder(generator.randomBucketName()).build(); + Bucket tmpBucket = storage.create(bucketInfo, BucketTargetOption.enableObjectRetention(true)); + String tempBucket = tmpBucket.getName(); + try { + + String retentionBlob = generator.randomObjectName(); + BlobInfo gen1 = storage.create(BlobInfo.newBuilder(tempBucket, retentionBlob).build()); + assertNull(storage.get(tempBucket, retentionBlob).getRetention()); + try { + SetObjectRetentionPolicy.setObjectRetentionPolicy( + GOOGLE_CLOUD_PROJECT, tempBucket, retentionBlob); + assertNotNull(storage.get(tempBucket, retentionBlob).getRetention()); + } finally { + storage.update( + gen1.toBuilder().setRetention(null).build(), + BlobTargetOption.overrideUnlockedRetention(true)); + } + } finally { + BucketCleaner.doCleanup(tempBucket, storage); + } + } + + @Test + public void testListSoftDeletedObjects() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(generator.randomBucketName()) + // This is already the default, but we set it here in case the default ever + // changes + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(7)) + .build()) + .build()) + .setStorage(storage) + .build()) { + String bucketName = tmpBucket.getBucket().getName(); + + String blob = generator.randomObjectName(); + storage.create(BlobInfo.newBuilder(BlobId.of(bucketName, blob)).build()); + storage.delete(BlobId.of(bucketName, blob)); + + ListSoftDeletedObjects.listSoftDeletedObjects(GOOGLE_CLOUD_PROJECT, bucketName); + + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + + assertTrue(snippetOutput.contains(blob)); + } + } + + @Test + public void testListSoftDeletedVersionsOfObject() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(generator.randomBucketName()) + // This is already the default, but we set it here in case the default ever + // changes + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(7)) + .build()) + .build()) + .setStorage(storage) + .build()) { + String bucketName = tmpBucket.getBucket().getName(); + + System.out.println(storage.get(bucketName).getSoftDeletePolicy().toString()); + + String blob = generator.randomObjectName(); + storage.create(BlobInfo.newBuilder(BlobId.of(bucketName, blob)).build()); + storage.delete(BlobId.of(bucketName, blob)); + + String blob2 = generator.randomObjectName(); + storage.create(BlobInfo.newBuilder(BlobId.of(bucketName, blob2)).build()); + storage.delete(BlobId.of(bucketName, blob2)); + + ListSoftDeletedVersionsOfObject.listSoftDeletedVersionOfObject( + GOOGLE_CLOUD_PROJECT, bucketName, blob); + + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + + assertTrue(snippetOutput.contains(blob)); + assertFalse(snippetOutput.contains(blob2)); + } + } + + @Test + public void testRestoreSoftDeletedObject() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo( + BucketInfo.newBuilder(generator.randomBucketName()) + // This is already the default, but we set it here in case the default ever + // changes + .setSoftDeletePolicy( + BucketInfo.SoftDeletePolicy.newBuilder() + .setRetentionDuration(Duration.ofDays(7)) + .build()) + .build()) + .setStorage(storage) + .build()) { + String bucketName = tmpBucket.getBucket().getName(); + + String blob = generator.randomObjectName(); + + BlobInfo gen1 = storage.create(BlobInfo.newBuilder(BlobId.of(bucketName, blob)).build()); + storage.delete(BlobId.of(bucketName, blob)); + + assertNull(storage.get(BlobId.of(bucketName, blob))); + + RestoreSoftDeletedObject.restoreSoftDeletedObject( + GOOGLE_CLOUD_PROJECT, bucketName, blob, gen1.getGeneration()); + + assertNotNull(storage.get(BlobId.of(bucketName, blob))); + } + } + + @Test + public void testSetObjectContexts() throws Exception { + String blobName = generator.randomObjectName(); + String key = "test-key-get"; + String value = "test-value-get"; + + Blob initialBlob = storage.create(info(blobName), CONTENT, BlobTargetOption.doesNotExist()); + + SetObjectContexts.setObjectContexts( + GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName, key, value); + String setOutput = stdOut.getCapturedOutputAsUtf8String(); + assertThat(setOutput).contains("Updated custom contexts for object " + blobName); + + Blob updatedBlob = storage.get(bucket.getName(), blobName); + assertThat(updatedBlob.getContexts().getCustom().get(key).getValue()).isEqualTo(value); + } + + @Test + public void testGetObjectContexts() throws Exception { + String blobName = generator.randomObjectName(); + String key = "test-key-get"; + String value = "test-value-get"; + + storage.create(info(blobName), CONTENT, BlobTargetOption.doesNotExist()); + + ObjectCustomContextPayload payload = + ObjectCustomContextPayload.newBuilder().setValue(value).build(); + Map custom = Maps.newHashMap(); + custom.put(key, payload); + ObjectContexts contexts = ObjectContexts.newBuilder().setCustom(custom).build(); + BlobInfo pendingUpdate = + storage.get(bucket.getName(), blobName).toBuilder().setContexts(contexts).build(); + storage.update(pendingUpdate); + + GetObjectContexts.getObjectContexts(GOOGLE_CLOUD_PROJECT, bucket.getName(), blobName); + + String getOutput = stdOut.getCapturedOutputAsUtf8String(); + + assertThat(getOutput).contains("Custom Contexts:"); + assertThat(getOutput).contains(key + "=ObjectCustomContextPayload{"); + assertThat(getOutput).contains("value=" + value); + } + + @Test + public void testListObjectContexts() throws Exception { + String blobName = generator.randomObjectName(); + String key = "test-key-list"; + String value = "test-value-list"; + + storage.create(info(blobName), CONTENT, BlobTargetOption.doesNotExist()); + + ObjectCustomContextPayload payload = + ObjectCustomContextPayload.newBuilder().setValue(value).build(); + Map custom = Maps.newHashMap(); + custom.put(key, payload); + ObjectContexts contexts = ObjectContexts.newBuilder().setCustom(custom).build(); + BlobInfo pendingUpdate = + storage.get(bucket.getName(), blobName).toBuilder().setContexts(contexts).build(); + storage.update(pendingUpdate); + + ListObjectContexts.listObjectContexts(GOOGLE_CLOUD_PROJECT, bucket.getName(), key); + String listOutput = stdOut.getCapturedOutputAsUtf8String(); + + assertThat(listOutput).contains("gs://" + bucket.getName() + "/" + blobName); + + assertThat(listOutput) + .contains("Listing objects for bucket: " + bucket.getName() + "with context key: " + key); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/ITStorageSnippets.java b/java-storage/samples/snippets/src/test/java/com/example/storage/ITStorageSnippets.java new file mode 100644 index 000000000000..805eef2f2c45 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/ITStorageSnippets.java @@ -0,0 +1,130 @@ +/* + * Copyright 2016 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import java.io.File; +import java.io.FileInputStream; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Logger; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.mime.MultipartEntityBuilder; +import org.apache.http.impl.client.HttpClientBuilder; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; + +public class ITStorageSnippets { + + private static final Logger log = Logger.getLogger(ITStorageSnippets.class.getName()); + private static final String BUCKET = RemoteStorageHelper.generateBucketName(); + private static Storage storage; + private static final String PROJECT_ID = Env.GOOGLE_CLOUD_PROJECT; + + @Rule public final StdOutCaptureRule stdOutCaptureRule = new StdOutCaptureRule(); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + @Rule public Timeout globalTimeout = Timeout.seconds(300); + + @BeforeClass + public static void beforeClass() { + RemoteStorageHelper helper = RemoteStorageHelper.create(); + storage = helper.getOptions().getService(); + storage.create(BucketInfo.of(BUCKET)); + } + + @AfterClass + public static void afterClass() throws Exception { + try (Storage ignore = storage) { + BucketCleaner.doCleanup(BUCKET, storage); + } + } + + @Test + public void testGetServiceAccount() { + GetServiceAccount.getServiceAccount(PROJECT_ID); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + + assertTrue(snippetOutput.contains("service")); + assertTrue(snippetOutput.contains("@gs-project-accounts.iam.gserviceaccount.com")); + } + + @Test + public void testGenerateSignedPostPolicyV4() throws Exception { + GenerateSignedPostPolicyV4.generateSignedPostPolicyV4(PROJECT_ID, BUCKET, "my-object"); + String snippetOutput = stdOutCaptureRule.getCapturedOutputAsUtf8String(); + assertTrue( + snippetOutput.contains("

")); + + String[] output = snippetOutput.split("'"); + final HttpClient client = HttpClientBuilder.create().build(); + final HttpPost request = new HttpPost(output[1]); + MultipartEntityBuilder builder = MultipartEntityBuilder.create(); + + Map policy = new HashMap<>(); + /** + * When splitting by "'", any element in the form has its value two array elements ahead of it, + * for example ["x-goog-algorithm", "value=", "GOOG4-RSA-SHA256"] We take advantage of this to + * make a map which has any policy element easily accessible. The map also has a lot of noise, + * but we just use the parts we need + */ + for (int i = 3; i < output.length - 3; i += 2) { + policy.put(output[i], output[i + 2]); + } + + builder.addTextBody("x-goog-date", policy.get("x-goog-date")); + builder.addTextBody("x-goog-meta-test", "data"); + builder.addTextBody("x-goog-algorithm", "GOOG4-RSA-SHA256"); + builder.addTextBody("x-goog-credential", policy.get("x-goog-credential")); + builder.addTextBody("key", "my-object"); + builder.addTextBody("x-goog-signature", policy.get("x-goog-signature")); + builder.addTextBody("policy", policy.get("policy")); + + File file = File.createTempFile("temp", "file"); + Files.write(file.toPath(), "hello world".getBytes()); + builder.addBinaryBody( + "file", new FileInputStream(file), ContentType.APPLICATION_OCTET_STREAM, file.getName()); + request.setEntity(builder.build()); + + client.execute(request); + + assertEquals("hello world", new String(storage.get(BUCKET, "my-object").getContent())); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/QuickstartSampleIT.java b/java-storage/samples/snippets/src/test/java/com/example/storage/QuickstartSampleIT.java new file mode 100644 index 000000000000..4450ab8f273a --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/QuickstartSampleIT.java @@ -0,0 +1,92 @@ +/* + * Copyright 2015 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BucketField; +import com.google.cloud.storage.Storage.BucketGetOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.storage.control.v2.StorageLayoutName; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** Tests for quickstart sample. */ +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class QuickstartSampleIT extends TestBase { + + private String bucketName; + + @Before + public void setUp() { + bucketName = generator.randomBucketName(); + } + + @After + public void tearDown() { + Bucket bucket = storage.get(bucketName, BucketGetOption.fields(BucketField.NAME)); + if (bucket != null) { + BucketCleaner.doCleanup(bucketName, storage); + } + } + + @Test + public void testQuickstart() throws Exception { + QuickstartSample.main(bucketName); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format("Bucket %s created.", bucketName)); + } + + @Test + public void testQuickstartGrpc() throws Exception { + QuickstartGrpcSample.main(bucketName); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format("Bucket %s created.", bucketName)); + } + + @Test + public void testQuickstartGrpcDp() throws Exception { + QuickstartGrpcDpSample.main(bucketName); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format("Bucket %s created.", bucketName)); + } + + @Test + public void testQuickstartStorageControl() throws Exception { + Storage storageClient = StorageOptions.getDefaultInstance().getService(); + storageClient.create(BucketInfo.of(bucketName)); + QuickstartStorageControlSample.main(bucketName); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got) + .contains( + String.format( + "Performed getStorageLayout request for %s", + StorageLayoutName.format("_", bucketName))); + } + + @Test + public void testQuickstartOpenTelemetry() throws Exception { + QuickstartOpenTelemetrySample.main(); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains("Created an instance of storage with OpenTelemetry configured"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/TestBase.java b/java-storage/samples/snippets/src/test/java/com/example/storage/TestBase.java new file mode 100644 index 000000000000..2ca7c3702014 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/TestBase.java @@ -0,0 +1,55 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Rule; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.PROD) +public abstract class TestBase { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + @Inject public Generator generator; + + protected BlobInfo info(@NonNull String name) { + return BlobInfo.newBuilder(bucket, name).build(); + } + + protected BlobInfo createEmptyObject() { + return storage.create(info(generator.randomObjectName()), BlobTargetOption.doesNotExist()); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtils.java b/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtils.java new file mode 100644 index 000000000000..6530c1519cc3 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtils.java @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import com.google.api.core.CurrentMillisClock; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryHelper.RetryHelperException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +public final class TestUtils { + + private static final Logger log = Logger.getLogger(TestUtils.class.getName()); + private static final Object SENTINEL = new Object(); + + private TestUtils() {} + + public static void retryAssert(RetrySettings rs, RetryRunnable f) throws Throwable { + AtomicInteger counter = new AtomicInteger(1); + try { + RetryHelper.runWithRetries( + () -> { + try { + int c = counter.getAndIncrement(); + if (c > 1) { + log.warning(String.format("Retrying assertion for the %d time", c)); + } + f.run(); + return SENTINEL; + } catch (Throwable e) { + throw new TunnelThrowable(e); + } + }, + rs, + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) { + return previousResponse != SENTINEL && previousThrowable != null; + } + }, + CurrentMillisClock.getDefaultClock()); + } catch (RetryHelperException e) { + if (e.getCause() instanceof TunnelThrowable) { + TunnelThrowable cause = (TunnelThrowable) e.getCause(); + throw cause.getCause(); + } + throw e.getCause(); + } + } + + @FunctionalInterface + public interface RetryRunnable { + + void run() throws Throwable; + } + + private static final class TunnelThrowable extends Exception { + private TunnelThrowable(Throwable cause) { + super(cause); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtilsTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtilsTest.java new file mode 100644 index 000000000000..d4a76072b8b3 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/TestUtilsTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.retrying.RetrySettings; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public class TestUtilsTest { + + private static final RetrySettings RETRY_SETTINGS = + RetrySettings.newBuilder().setMaxAttempts(3).build(); + + @Test + public void noException() throws Throwable { + TestUtils.retryAssert(RETRY_SETTINGS, () -> assertThat(true).isTrue()); + } + + @Test + public void assertionFailureOnce() throws Throwable { + System.out.println("TestUtilsTest.assertionFailureOnce"); + AtomicInteger c = new AtomicInteger(1); + TestUtils.retryAssert(RETRY_SETTINGS, () -> assertThat(c.getAndIncrement()).isGreaterThan(1)); + } + + @Test + public void assertionError_exhausted() throws Throwable { + AtomicInteger c = new AtomicInteger(1); + try { + TestUtils.retryAssert( + RETRY_SETTINGS, () -> assertThat(c.getAndIncrement()).isGreaterThan(10)); + throw new Throwable("expected AssertionError"); + } catch (AssertionError ignore) { + // expected + } + } + + @Test + public void runtimeException_exhausted() { + assertThrows( + RuntimeException.class, + () -> + TestUtils.retryAssert( + RETRY_SETTINGS, + () -> { + throw new RuntimeException("kaboom"); + })); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketDefaultOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketDefaultOwnerTest.java new file mode 100644 index 000000000000..83975c1dfb4b --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketDefaultOwnerTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.it.TemporaryBucket; +import org.junit.Test; + +public class AddBucketDefaultOwnerTest extends TestBase { + + @Test + public void testAddBucketDefaultOwner() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + + AddBucketDefaultOwner.addBucketDefaultOwner(bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketOwnerTest.java new file mode 100644 index 000000000000..f95bacfbb812 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AddBucketOwnerTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.it.TemporaryBucket; +import org.junit.Test; + +public class AddBucketOwnerTest extends TestBase { + + @Test + public void testAddBucketOwner() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + + AddBucketOwner.addBucketOwner(GOOGLE_CLOUD_PROJECT, bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AutoclassTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AutoclassTest.java new file mode 100644 index 000000000000..aa39aae8836f --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/AutoclassTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.google.common.truth.Truth.assertThat; + +import com.example.storage.TestBase; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.Autoclass; +import com.google.cloud.storage.StorageClass; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import org.junit.Test; + +public class AutoclassTest extends TestBase { + + @Test + public void testSetGetBucketAutoclass() throws Exception { + String autoclassBucket = RemoteStorageHelper.generateBucketName(); + storage.create( + BucketInfo.newBuilder(autoclassBucket) + .setAutoclass(Autoclass.newBuilder().setEnabled(true).build()) + .build()); + try { + SetBucketAutoclass.setBucketAutoclass( + GOOGLE_CLOUD_PROJECT, autoclassBucket, StorageClass.NEARLINE); + Autoclass autoclass = storage.get(autoclassBucket).getAutoclass(); + assertThat(autoclass.getEnabled()).isTrue(); + + GetBucketAutoclass.getBucketAutoclass(GOOGLE_CLOUD_PROJECT, autoclassBucket); + assertThat(stdOut.getCapturedOutputAsUtf8String()) + .contains(autoclass.getToggleTime().toString()); + } finally { + RemoteStorageHelper.forceDelete(storage, autoclassBucket); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/CreateBucketDualRegionTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/CreateBucketDualRegionTest.java new file mode 100644 index 000000000000..9b1ce9c74ef0 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/CreateBucketDualRegionTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; + +import com.example.storage.TestBase; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import org.junit.Test; + +public class CreateBucketDualRegionTest extends TestBase { + + @Test + public void testCreateBucketDualRegion() { + assertNotNull("Unable to determine Project ID", GOOGLE_CLOUD_PROJECT); + String newBucket = RemoteStorageHelper.generateBucketName(); + CreateBucketDualRegion.createBucketDualRegion( + GOOGLE_CLOUD_PROJECT, newBucket, "US", "US-EAST1", "US-WEST1"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("US-WEST1"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("US-EAST1"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Created bucket"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PrintBucketAclTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PrintBucketAclTest.java new file mode 100644 index 000000000000..58872f403b17 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PrintBucketAclTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.it.TemporaryBucket; +import org.junit.Test; + +public class PrintBucketAclTest extends TestBase { + + @Test + public void testPrintBucketAcls() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + storage.createAcl(bucketName, Acl.of(IT_SERVICE_ACCOUNT_USER, Role.READER)); + PrintBucketAcl.printBucketAcl(GOOGLE_CLOUD_PROJECT, bucketName); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("READER: USER"); + } + } + + @Test + public void testPrintBucketAclByUser() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + storage.createAcl(bucketName, Acl.of(IT_SERVICE_ACCOUNT_USER, Role.READER)); + PrintBucketAclFilterByUser.printBucketAclFilterByUser(bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(Role.READER.name()); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PubSubNotificationTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PubSubNotificationTest.java new file mode 100644 index 000000000000..3cccbaa10ad2 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/PubSubNotificationTest.java @@ -0,0 +1,157 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT_NUMBER; +import static com.google.common.truth.Truth.assertThat; + +import com.example.storage.TestBase; +import com.google.cloud.pubsub.v1.TopicAdminClient; +import com.google.cloud.storage.Notification; +import com.google.cloud.storage.NotificationInfo; +import com.google.cloud.storage.NotificationInfo.EventType; +import com.google.common.collect.ImmutableMap; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.SetIamPolicyRequest; +import java.io.IOException; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class PubSubNotificationTest extends TestBase { + + private static final Map CUSTOM_ATTRIBUTES = ImmutableMap.of("label1", "value1"); + + private static final String OBJECT_NAME_PREFIX = "index.html"; + private static final EventType[] EVENT_TYPES = { + EventType.OBJECT_FINALIZE, EventType.OBJECT_METADATA_UPDATE + }; + private static TopicAdminClient topicAdminClient; + + private String bucketName; + private String topic; + private NotificationInfo notificationInfo; + + @BeforeClass + public static void configureTopicAdminClient() throws IOException { + topicAdminClient = TopicAdminClient.create(); + } + + @AfterClass + public static void deleteTopicAndClient() throws InterruptedException { + /* Delete the Pub/Sub topic */ + if (topicAdminClient != null) { + topicAdminClient.shutdownNow(); + topicAdminClient.awaitTermination(5, TimeUnit.SECONDS); + } + } + + @Before + public void setUp() { + assertThat(GOOGLE_CLOUD_PROJECT).isNotNull(); + + bucketName = bucket.getName(); + + String id = UUID.randomUUID().toString().substring(0, 8); + topic = String.format("projects/%s/topics/new-topic-%s", GOOGLE_CLOUD_PROJECT, id); + topicAdminClient.createTopic(topic); + GetIamPolicyRequest getIamPolicyRequest = + GetIamPolicyRequest.newBuilder().setResource(topic).build(); + com.google.iam.v1.Policy policy = topicAdminClient.getIamPolicy(getIamPolicyRequest); + // For available bindings identities, see + // https://cloud.google.com/iam/docs/overview#concepts_related_identity + String member = + GOOGLE_CLOUD_PROJECT_NUMBER != null + ? String.format( + "serviceAccount:service-%s@gs-project-accounts.iam.gserviceaccount.com", + GOOGLE_CLOUD_PROJECT_NUMBER) + : "allAuthenticatedUsers"; + Binding binding = Binding.newBuilder().setRole("roles/owner").addMembers(member).build(); + SetIamPolicyRequest setIamPolicyRequest = + SetIamPolicyRequest.newBuilder() + .setResource(topic) + .setPolicy(policy.toBuilder().addBindings(binding).build()) + .build(); + topicAdminClient.setIamPolicy(setIamPolicyRequest); + + notificationInfo = + NotificationInfo.newBuilder(topic) + .setCustomAttributes(CUSTOM_ATTRIBUTES) + .setPayloadFormat(Notification.PayloadFormat.JSON_API_V1) + .build(); + } + + @After + public void tearDown() { + if (topic != null) { + topicAdminClient.deleteTopic(topic); + } + } + + @Test + public void testCreateBucketPubSubNotification() { + CreateBucketPubSubNotification.createBucketPubSubNotification( + bucketName, + topic, + CUSTOM_ATTRIBUTES, + EVENT_TYPES, + OBJECT_NAME_PREFIX, + Notification.PayloadFormat.JSON_API_V1); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(topic); + } + + @Test + public void testDeleteBucketPubSubNotification() { + Notification notification = storage.createNotification(bucketName, notificationInfo); + DeleteBucketPubSubNotification.deleteBucketPubSubNotification( + bucketName, notification.getNotificationId()); + assertThat(stdOut.getCapturedOutputAsUtf8String()) + .contains("Successfully deleted notification"); + } + + @Test + public void testNotificationNotFound() { + Notification notification = storage.createNotification(bucketName, notificationInfo); + // Do a delete first. + storage.deleteNotification(bucketName, notification.getNotificationId()); + // Now try to delete again. + DeleteBucketPubSubNotification.deleteBucketPubSubNotification( + bucketName, notification.getNotificationId()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Failed to find notification"); + } + + @Test + public void testListBucketPubSubNotification() { + storage.createNotification(bucketName, notificationInfo); + ListPubSubNotifications.listPubSubNotifications(bucketName); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(topic); + } + + @Test + public void testPrintBucketPubSubNotification() { + Notification notification = storage.createNotification(bucketName, notificationInfo); + PrintPubSubNotification.printPubSubNotification(bucketName, notification.getNotificationId()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(topic); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketDefaultOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketDefaultOwnerTest.java new file mode 100644 index 000000000000..8aecd0f6c94f --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketDefaultOwnerTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.it.TemporaryBucket; +import org.junit.Test; + +public class RemoveBucketDefaultOwnerTest extends TestBase { + + @Test + public void testRemoveBucketDefaultOwner() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + + // Add User as Default Owner + Acl newDefaultOwner = Acl.of(IT_SERVICE_ACCOUNT_USER, Role.OWNER); + storage.createDefaultAcl(bucketName, newDefaultOwner); + + // Remove User as Default owner + RemoveBucketDefaultOwner.removeBucketDefaultOwner(bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Removed user"); + } + } + + @Test + public void testUserNotFound() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + // Remove User without Default Owner Permissions + RemoveBucketDefaultOwner.removeBucketDefaultOwner(bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("was not found"); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketOwnerTest.java new file mode 100644 index 000000000000..6008cab6a757 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/bucket/RemoveBucketOwnerTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.bucket; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.it.TemporaryBucket; +import org.junit.Test; + +public class RemoveBucketOwnerTest extends TestBase { + + @Test + public void testRemoveBucketOwner() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + // Add User as Owner + Acl newOwner = Acl.of(IT_SERVICE_ACCOUNT_USER, Role.OWNER); + storage.createAcl(bucketName, newOwner); + + // Remove User as owner + RemoveBucketOwner.removeBucketOwner( + GOOGLE_CLOUD_PROJECT, bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Removed user"); + } + } + + @Test + public void testUserNotFound() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + + String bucketName = tmpBucket.getBucket().getName(); + + // Remove User without Owner Permissions + RemoveBucketOwner.removeBucketOwner( + GOOGLE_CLOUD_PROJECT, bucketName, IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("was not found"); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/FoldersTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/FoldersTest.java new file mode 100644 index 000000000000..a677614a9a7c --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/FoldersTest.java @@ -0,0 +1,158 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.HierarchicalNamespace; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.Folder; +import com.google.storage.control.v2.FolderName; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public final class FoldersTest { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + private Storage storage; + private BucketInfo bucket; + private StorageControlClient storageControl; + + @Before + public void setUp() throws Exception { + storage = StorageOptions.http().build().getService(); + String bucketName = RemoteStorageHelper.generateBucketName(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + // Hierarchical namespace buckets must use uniform bucket-level access. + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .setHierarchicalNamespace(HierarchicalNamespace.newBuilder().setEnabled(true).build()) + .build(); + + bucket = storage.create(bucketInfo).asBucketInfo(); + storageControl = StorageControlClient.create(); + } + + @After + public void tearDown() throws Exception { + // Use try-with-resource to handle the dance closing multiple things + try (AutoCloseable ignore1 = storage; + AutoCloseable ignore2 = storageControl) { + BucketCleaner.doCleanup(bucket.getName(), storage, storageControl); + } + } + + @Test + public void createFolder() throws IOException { + String folderName = UUID.randomUUID().toString(); + CreateFolder.createFolder(bucket.getName(), folderName); + try { + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(folderName); + } finally { + storageControl.deleteFolder(FolderName.of("_", bucket.getName(), folderName)); + } + } + + @Test + public void getFolder() throws IOException { + FolderName folderName = FolderName.of("_", bucket.getName(), UUID.randomUUID().toString()); + Folder gen1 = + storageControl.createFolder( + BucketName.of("_", bucket.getName()), + Folder.getDefaultInstance(), + folderName.getFolder()); + + GetFolder.getFolder(bucket.getName(), folderName.getFolder()); + try { + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(folderName.toString()); + } finally { + storageControl.deleteFolder(folderName); + } + } + + @Test + public void renameFolder() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + FolderName srcFolderName = FolderName.of("_", bucket.getName(), UUID.randomUUID().toString()); + Folder gen1 = + storageControl.createFolder( + BucketName.of("_", bucket.getName()), + Folder.getDefaultInstance(), + srcFolderName.getFolder()); + + FolderName dstFolderName = FolderName.of("_", bucket.getName(), UUID.randomUUID().toString()); + + RenameFolder.renameFolder( + bucket.getName(), srcFolderName.getFolder(), dstFolderName.getFolder()); + try { + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(srcFolderName.toString()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(dstFolderName.toString()); + assertThrows(NotFoundException.class, () -> storageControl.getFolder(srcFolderName)); + } finally { + storageControl.deleteFolder(dstFolderName); + } + } + + @Test + public void deleteFolder() throws IOException { + FolderName folderName = FolderName.of("_", bucket.getName(), UUID.randomUUID().toString()); + Folder gen1 = + storageControl.createFolder( + BucketName.of("_", bucket.getName()), + Folder.getDefaultInstance(), + folderName.getFolder()); + + DeleteFolder.deleteFolder(bucket.getName(), folderName.getFolder()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(folderName.toString()); + assertThrows(NotFoundException.class, () -> storageControl.getFolder(folderName)); + } + + @Test + public void listFolder() throws IOException { + FolderName folderName = FolderName.of("_", bucket.getName(), UUID.randomUUID().toString()); + Folder gen1 = + storageControl.createFolder( + BucketName.of("_", bucket.getName()), + Folder.getDefaultInstance(), + folderName.getFolder()); + + ListFolders.listFolders(bucket.getName()); + try { + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(folderName.toString()); + } finally { + storageControl.deleteFolder(folderName); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/HNSTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/HNSTest.java new file mode 100644 index 000000000000..5d2ec83c1a33 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/HNSTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.storage.TestBase; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import org.junit.Test; + +public final class HNSTest extends TestBase { + + @Test + public void createHierarchicalNamespaceBucket() throws Exception { + String newBucketName = RemoteStorageHelper.generateBucketName(); + String projectId = storage.getOptions().getProjectId(); + CreateHierarchicalNamespaceBucket.createHierarchicalNamespaceBucket(projectId, newBucketName); + try { + Bucket remoteBucket = storage.get(newBucketName); + assertThat(remoteBucket).isNotNull(); + assertThat(remoteBucket.getHierarchicalNamespace().getEnabled()).isTrue(); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(newBucketName); + } finally { + storage.delete(newBucketName); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/ITAnywhereCacheTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/ITAnywhereCacheTest.java new file mode 100644 index 000000000000..8bd7b0ba4dda --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/control/v2/ITAnywhereCacheTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.control.v2; + +import static org.junit.Assume.assumeTrue; + +import com.example.storage.TestBase; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import com.google.cloud.storage.it.runner.registry.Zone; +import com.google.common.base.Strings; +import com.google.storage.control.v2.AnywhereCacheName; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.junit.Before; +import org.junit.Test; + +public final class ITAnywhereCacheTest extends TestBase { + + @Inject public Zone zone; + @Inject public Generator generator; + + private String cacheName; + + @Before + public void setUp() { + assumeTrue( + "AnywhereCache sample tests skipped in CI due to very long operation times.", + Strings.isNullOrEmpty(System.getenv("JOB_TYPE"))); + cacheName = generator.randomObjectName(); + } + + @Test + public void create() throws IOException, ExecutionException, InterruptedException { + AnywhereCacheCreate.anywhereCacheCreate(bucket.getName(), cacheName, zone.getZone()); + } + + @Test + public void get() throws IOException { + AnywhereCacheGet.anywhereCacheGet(AnywhereCacheName.format("_", bucket.getName(), cacheName)); + } + + @Test + public void list() throws IOException { + AnywhereCacheList.anywhereCacheList(bucket.getName()); + } + + @Test + public void update() throws IOException, ExecutionException, InterruptedException { + AnywhereCacheUpdate.anywhereCacheUpdate(cacheName, "admit-on-second-miss"); + } + + @Test + public void pause() throws IOException { + AnywhereCachePause.anywhereCachePause( + AnywhereCacheName.format("_", bucket.getName(), cacheName)); + } + + @Test + public void resume() throws IOException { + AnywhereCacheResume.anywhereCacheResume( + AnywhereCacheName.format("_", bucket.getName(), cacheName)); + } + + @Test + public void disable() throws IOException { + AnywhereCacheDisable.anywhereCacheDisable( + AnywhereCacheName.format("_", bucket.getName(), cacheName)); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/CreateManagedFolderTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/CreateManagedFolderTest.java new file mode 100644 index 000000000000..ea266a84fb69 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/CreateManagedFolderTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public class CreateManagedFolderTest { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + protected String bucketName; + protected Storage storage; + protected Bucket bucket; + protected String managedFolderId; + protected StorageControlClient storageControl; + + @Before + public void setUp() throws IOException { + bucketName = RemoteStorageHelper.generateBucketName(); + storageControl = StorageControlClient.create(); + storage = StorageOptions.getDefaultInstance().getService(); + managedFolderId = "new-managed-folder-" + UUID.randomUUID(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .build(); + bucket = storage.create(bucketInfo); + } + + @After + public void tearDown() throws Exception { + try (Storage ignore1 = storage; + StorageControlClient ignore2 = storageControl) { + BucketCleaner.doCleanup(bucketName, storage, storageControl); + } + } + + @Test + public void testCreateManagedFolder() throws Exception { + CreateManagedFolder.managedFolderCreate(bucketName, managedFolderId); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format(managedFolderId)); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/DeleteManagedFolderTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/DeleteManagedFolderTest.java new file mode 100644 index 000000000000..d635388de587 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/DeleteManagedFolderTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public class DeleteManagedFolderTest { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + protected String bucketName; + protected Storage storage; + protected Bucket bucket; + protected String managedFolderId; + protected StorageControlClient storageControl; + + @Before + public void setUp() throws IOException { + bucketName = RemoteStorageHelper.generateBucketName(); + storageControl = StorageControlClient.create(); + storage = StorageOptions.getDefaultInstance().getService(); + managedFolderId = "new-managed-folder-" + UUID.randomUUID(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .build(); + bucket = storage.create(bucketInfo); + storageControl.createManagedFolder( + CreateManagedFolderRequest.newBuilder() + // Set project to "_" to signify global bucket + .setParent(BucketName.format("_", bucketName)) + .setManagedFolder(ManagedFolder.newBuilder().build()) + .setManagedFolderId(managedFolderId) + .build()); + } + + @After + public void tearDown() throws Exception { + try (Storage ignore1 = storage; + StorageControlClient ignore2 = storageControl) { + BucketCleaner.doCleanup(bucketName, storage, storageControl); + } + } + + @Test + public void testDeleteManagedFolder() throws Exception { + DeleteManagedFolder.managedFolderDelete(bucketName, managedFolderId); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format(managedFolderId)); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/GetManagedFolderTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/GetManagedFolderTest.java new file mode 100644 index 000000000000..2dc9f640ebfd --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/GetManagedFolderTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public class GetManagedFolderTest { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + protected String bucketName; + protected Storage storage; + protected Bucket bucket; + protected String managedFolderId; + protected StorageControlClient storageControl; + + @Before + public void setUp() throws IOException { + bucketName = RemoteStorageHelper.generateBucketName(); + storageControl = StorageControlClient.create(); + storage = StorageOptions.getDefaultInstance().getService(); + managedFolderId = "new-managed-folder-" + UUID.randomUUID(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .build(); + bucket = storage.create(bucketInfo); + storageControl.createManagedFolder( + CreateManagedFolderRequest.newBuilder() + // Set project to "_" to signify global bucket + .setParent(BucketName.format("_", bucketName)) + .setManagedFolder(ManagedFolder.newBuilder().build()) + .setManagedFolderId(managedFolderId) + .build()); + } + + @After + public void tearDown() throws Exception { + try (Storage ignore1 = storage; + StorageControlClient ignore2 = storageControl) { + BucketCleaner.doCleanup(bucketName, storage, storageControl); + } + } + + @Test + public void testGetManagedFolder() throws Exception { + GetManagedFolder.managedFolderGet(bucketName, managedFolderId); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format(managedFolderId)); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/ListManagedFolderTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/ListManagedFolderTest.java new file mode 100644 index 000000000000..b7b3a4b694ec --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/managedfolders/ListManagedFolderTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.managedfolders; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.BucketInfo.IamConfiguration; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.it.BucketCleaner; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import com.google.cloud.testing.junit4.StdOutCaptureRule; +import com.google.storage.control.v2.BucketName; +import com.google.storage.control.v2.CreateManagedFolderRequest; +import com.google.storage.control.v2.ManagedFolder; +import com.google.storage.control.v2.StorageControlClient; +import java.io.IOException; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +public class ListManagedFolderTest { + + @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); + + protected String bucketName; + protected Storage storage; + protected Bucket bucket; + protected String managedFolderId; + protected StorageControlClient storageControl; + + @Before + public void setUp() throws IOException { + bucketName = RemoteStorageHelper.generateBucketName(); + storageControl = StorageControlClient.create(); + storage = StorageOptions.getDefaultInstance().getService(); + managedFolderId = "new-managed-folder-" + UUID.randomUUID(); + BucketInfo bucketInfo = + BucketInfo.newBuilder(bucketName) + .setIamConfiguration( + IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build()) + .build(); + bucket = storage.create(bucketInfo); + storageControl.createManagedFolder( + CreateManagedFolderRequest.newBuilder() + // Set project to "_" to signify global bucket + .setParent(BucketName.format("_", bucketName)) + .setManagedFolder(ManagedFolder.newBuilder().build()) + .setManagedFolderId(managedFolderId) + .build()); + } + + @After + public void tearDown() throws Exception { + try (Storage ignore1 = storage; + StorageControlClient ignore2 = storageControl) { + BucketCleaner.doCleanup(bucketName, storage, storageControl); + } + } + + @Test + public void testListManagedFolder() throws Exception { + ListManagedFolders.managedFolderList(bucketName); + String got = stdOut.getCapturedOutputAsUtf8String(); + assertThat(got).contains(String.format(managedFolderId)); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/object/AddBlobOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/object/AddBlobOwnerTest.java new file mode 100644 index 000000000000..09d3c76e53b9 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/object/AddBlobOwnerTest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import org.junit.Test; + +public class AddBlobOwnerTest extends TestBase { + + @Test + public void testAddBlobOwner() { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + // Add Ownership to the file. + AddBlobOwner.addBlobOwner( + GOOGLE_CLOUD_PROJECT, id.getBucket(), IT_SERVICE_ACCOUNT_EMAIL, id.getName()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(storage.getAcl(id, IT_SERVICE_ACCOUNT_USER)).isNotNull(); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/object/DownloadBytesRangeTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/object/DownloadBytesRangeTest.java new file mode 100644 index 000000000000..5ad25abe01b5 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/object/DownloadBytesRangeTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.storage.Env; +import com.example.storage.TestBase; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage.BlobTargetOption; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Arrays; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class DownloadBytesRangeTest extends TestBase { + + @Rule public final TemporaryFolder tmp = new TemporaryFolder(); + + @Test + public void testDownloadByteRange() throws IOException { + byte[] bytes = { // 18 elements per row + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' + }; + + BlobInfo gen1 = + storage.create( + BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), + bytes, + BlobTargetOption.doesNotExist()); + + File file = tmp.newFile(); + + int startByte = 14; + int endBytes = 37; + byte[] expectedBytes = Arrays.copyOfRange(bytes, startByte, endBytes); + + try { + String destFileName = file.getAbsolutePath(); + BlobId id = gen1.getBlobId(); + DownloadByteRange.downloadByteRange( + Env.GOOGLE_CLOUD_PROJECT, + id.getBucket(), + id.getName(), + startByte, + endBytes, + destFileName); + + byte[] readBytes = Files.readAllBytes(Paths.get(destFileName)); + + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("downloaded to"); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("from byte 14 to byte 37"); + assertThat(readBytes).isEqualTo(expectedBytes); + } finally { + file.delete(); + } + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclForUserTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclForUserTest.java new file mode 100644 index 000000000000..829396235c81 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclForUserTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.Env; +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import org.junit.Test; + +public class PrintBlobAclForUserTest extends TestBase { + + public static final String IT_SERVICE_ACCOUNT_EMAIL = Env.IT_SERVICE_ACCOUNT_EMAIL; + + @Test + public void testPrintBucketAclByUser() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + storage.createAcl(id, Acl.of(IT_SERVICE_ACCOUNT_USER, Role.READER)); + PrintBlobAclForUser.printBlobAclForUser(id.getBucket(), id.getName(), IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(Role.READER.name()); + } + + @Test + public void testUserNotFound() throws Exception { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + // Delete Acl just in case to make sure the User ACL is not present + storage.deleteAcl(id, IT_SERVICE_ACCOUNT_USER); + PrintBlobAclForUser.printBlobAclForUser(id.getBucket(), id.getName(), IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("not found"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclTest.java new file mode 100644 index 000000000000..04eb9e65bd67 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/object/PrintBlobAclTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.Env; +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import org.junit.Test; + +public class PrintBlobAclTest extends TestBase { + + public static final String IT_SERVICE_ACCOUNT_EMAIL = Env.IT_SERVICE_ACCOUNT_EMAIL; + + @Test + public void testPrintBlobAcls() { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + storage.createAcl(id, Acl.of(IT_SERVICE_ACCOUNT_USER, Role.READER)); + PrintBlobAcl.printBlobAcl(id.getBucket(), id.getName()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("READER: USER"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/object/RemoveBlobOwnerTest.java b/java-storage/samples/snippets/src/test/java/com/example/storage/object/RemoveBlobOwnerTest.java new file mode 100644 index 000000000000..325054041a2b --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/object/RemoveBlobOwnerTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.storage.object; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_EMAIL; +import static com.example.storage.Env.IT_SERVICE_ACCOUNT_USER; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.example.storage.TestBase; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Acl.Role; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import org.junit.Test; + +public class RemoveBlobOwnerTest extends TestBase { + + @Test + public void testRemoveBlobOwner() { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + // Add User as Owner + Acl newFileOwner = Acl.of(IT_SERVICE_ACCOUNT_USER, Role.OWNER); + storage.createAcl(id, newFileOwner); + + // Remove User as owner + RemoveBlobOwner.removeBlobOwner( + GOOGLE_CLOUD_PROJECT, id.getBucket(), IT_SERVICE_ACCOUNT_EMAIL, id.getName()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Removed user"); + } + + @Test + public void testUserNotFound() { + // Check for user email before the actual test. + assertWithMessage("Unable to determine user email").that(IT_SERVICE_ACCOUNT_EMAIL).isNotEmpty(); + + BlobInfo gen1 = createEmptyObject(); + BlobId id = gen1.getBlobId(); + // Remove User without Owner Permissions + RemoveBlobOwner.removeBlobOwner( + GOOGLE_CLOUD_PROJECT, id.getBucket(), IT_SERVICE_ACCOUNT_EMAIL, id.getName()); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains(IT_SERVICE_ACCOUNT_EMAIL); + assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("was not found"); + } +} diff --git a/java-storage/samples/snippets/src/test/java/com/example/storage/transfermanager/ITTransferManagerSamples.java b/java-storage/samples/snippets/src/test/java/com/example/storage/transfermanager/ITTransferManagerSamples.java new file mode 100644 index 000000000000..97fb2cea9c35 --- /dev/null +++ b/java-storage/samples/snippets/src/test/java/com/example/storage/transfermanager/ITTransferManagerSamples.java @@ -0,0 +1,150 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.example.storage.transfermanager; + +import static com.example.storage.Env.GOOGLE_CLOUD_PROJECT; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.example.storage.TestBase; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.TmpFile; +import com.google.cloud.storage.it.TemporaryBucket; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Stream; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class ITTransferManagerSamples extends TestBase { + + @Rule public final TemporaryFolder downloadDirectory = new TemporaryFolder(); + @Rule public final TemporaryFolder uploadDirectory = new TemporaryFolder(); + + @Test + public void uploadFiles() throws Exception { + Path baseDir = uploadDirectory.getRoot().toPath(); + try (TmpFile file1 = DataGenerator.base64Characters().tempFile(baseDir, 13); + TmpFile file2 = DataGenerator.base64Characters().tempFile(baseDir, 17); + TmpFile file3 = DataGenerator.base64Characters().tempFile(baseDir, 19)) { + List files = + Stream.of(file1, file2, file3) + .map(TmpFile::getPath) + .collect(ImmutableList.toImmutableList()); + UploadMany.uploadManyFiles(bucket.getName(), files); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertAll( + () -> assertThat(snippetOutput).contains(file1.getPath().getFileName().toString()), + () -> assertThat(snippetOutput).contains(file2.getPath().getFileName().toString()), + () -> assertThat(snippetOutput).contains(file3.getPath().getFileName().toString())); + } + } + + @Test + public void uploadDirectory() throws Exception { + Path baseDir = uploadDirectory.getRoot().toPath(); + try (TmpFile file1 = DataGenerator.base64Characters().tempFile(baseDir, 13); + TmpFile file2 = DataGenerator.base64Characters().tempFile(baseDir, 17); + TmpFile file3 = DataGenerator.base64Characters().tempFile(baseDir, 19)) { + UploadDirectory.uploadDirectoryContents(bucket.getName(), baseDir); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertAll( + () -> assertThat(snippetOutput).contains(file1.getPath().getFileName().toString()), + () -> assertThat(snippetOutput).contains(file2.getPath().getFileName().toString()), + () -> assertThat(snippetOutput).contains(file3.getPath().getFileName().toString())); + } + } + + @Test + public void downloadBucket() throws Exception { + try (TemporaryBucket tmpBucket = + TemporaryBucket.newBuilder() + .setBucketInfo(BucketInfo.newBuilder(generator.randomBucketName()).build()) + .setStorage(storage) + .build()) { + BucketInfo bucket = tmpBucket.getBucket(); + String name1 = generator.randomObjectName(); + String name2 = generator.randomObjectName(); + String name3 = generator.randomObjectName(); + Stream.of(name1, name2, name3) + .map(name -> BlobInfo.newBuilder(bucket, name).build()) + .forEach(info -> storage.create(info, BlobTargetOption.doesNotExist())); + DownloadBucket.downloadBucketContents( + GOOGLE_CLOUD_PROJECT, bucket.getName(), downloadDirectory.getRoot().toPath()); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertAll( + () -> assertThat(snippetOutput).contains(name1), + () -> assertThat(snippetOutput).contains(name2), + () -> assertThat(snippetOutput).contains(name3)); + } + } + + @Test + public void downloadBlobs() throws Exception { + String name1 = generator.randomObjectName(); + String name2 = generator.randomObjectName(); + String name3 = generator.randomObjectName(); + List blobs = + Stream.of(name1, name2, name3) + .map(this::info) + .map(info -> storage.create(info, BlobTargetOption.doesNotExist())) + .collect(ImmutableList.toImmutableList()); + DownloadMany.downloadManyBlobs(bucket.getName(), blobs, downloadDirectory.getRoot().toPath()); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertAll( + () -> assertThat(snippetOutput).contains(name1), + () -> assertThat(snippetOutput).contains(name2), + () -> assertThat(snippetOutput).contains(name3)); + } + + @Test + public void uploadAllowPCU() throws IOException { + Path baseDir = uploadDirectory.getRoot().toPath(); + try (TmpFile file1 = DataGenerator.base64Characters().tempFile(baseDir, 313 * 1024 * 1024)) { + AllowParallelCompositeUpload.parallelCompositeUploadAllowed( + bucket.getName(), ImmutableList.of(file1.getPath())); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertThat(snippetOutput).contains(file1.getPath().getFileName().toString()); + } + } + + @Test + public void downloadAllowDivideAndConquer() throws Exception { + String name1 = generator.randomObjectName(); + String name2 = generator.randomObjectName(); + String name3 = generator.randomObjectName(); + List blobs = + Stream.of(name1, name2, name3) + .map(this::info) + .map(info -> storage.create(info, BlobTargetOption.doesNotExist())) + .collect(ImmutableList.toImmutableList()); + AllowDivideAndConquerDownload.divideAndConquerDownloadAllowed( + blobs, bucket.getName(), downloadDirectory.getRoot().toPath()); + String snippetOutput = stdOut.getCapturedOutputAsUtf8String(); + assertAll( + () -> assertThat(snippetOutput).contains(name1), + () -> assertThat(snippetOutput).contains(name2), + () -> assertThat(snippetOutput).contains(name3)); + } +} diff --git a/java-storage/storage-shared-benchmarking/pom.xml b/java-storage/storage-shared-benchmarking/pom.xml new file mode 100644 index 000000000000..ce6ef8f5be4a --- /dev/null +++ b/java-storage/storage-shared-benchmarking/pom.xml @@ -0,0 +1,114 @@ + + + 4.0.0 + com.google.cloud + jar + storage-shared-benchmarking + 0.0.1-SNAPSHOT + + com.google.cloud + google-cloud-storage-parent + 2.64.1-SNAPSHOT + + + + 1.8 + 1.8 + UTF-8 + + + + info.picocli + picocli + 4.7.6 + + + com.google.cloud + google-cloud-storage + + + com.google.cloud + google-cloud-storage + 2.64.1-SNAPSHOT + tests + + + com.google.api + gax + + + com.google.api + api-common + + + com.google.guava + guava + + + com.google.cloud + google-cloud-core + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + ${uberjar.name} + + + com.google.cloud.storage.benchmarking.StorageSharedBenchmarkingCli + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.apache.maven.plugins + maven-deploy-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + + + \ No newline at end of file diff --git a/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/Bidi.java b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/Bidi.java new file mode 100644 index 000000000000..550a74678d05 --- /dev/null +++ b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/Bidi.java @@ -0,0 +1,82 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.benchmarking; + +import static com.google.cloud.storage.benchmarking.StorageSharedBenchmarkingUtils.generateCloudMonitoringResult; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BlobWriteSession; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobWriteOption; +import java.io.PrintWriter; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Callable; + +class Bidi implements Callable { + private final Storage storageClient; + private final String bucketName; + private final int objectSize; + private final PrintWriter pw; + private final String api; + private final int workers; + + Bidi( + Storage storageClient, + String bucketName, + int objectSize, + PrintWriter pw, + String api, + int workers) { + this.storageClient = storageClient; + this.bucketName = bucketName; + this.objectSize = objectSize; + this.pw = pw; + this.api = api; + this.workers = workers; + } + + @Override + public String call() throws Exception { + String blobName = DataGenerator.base64Characters().genBytes(20).toString(); + BlobWriteSession sess = + storageClient.blobWriteSession( + BlobInfo.newBuilder(bucketName, blobName).build(), BlobWriteOption.doesNotExist()); + byte[] bytes = DataGenerator.base64Characters().genBytes(objectSize); + Clock clock = Clock.systemDefaultZone(); + Instant startTime = clock.instant(); + try (WritableByteChannel w = sess.open()) { + w.write(ByteBuffer.wrap(bytes)); + } + BlobInfo created = sess.getResult().get(); + Instant endTime = clock.instant(); + Duration elapsedTimeWrite = Duration.between(startTime, endTime); + printResult("BIDI", created, elapsedTimeWrite); + StorageSharedBenchmarkingUtils.cleanupObject(storageClient, created); + return "OK"; + } + + private void printResult(String op, BlobInfo created, Duration duration) { + pw.println( + generateCloudMonitoringResult(op, duration.toMillis(), created, api, workers) + .formatAsCustomMetric()); + } +} diff --git a/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/CloudMonitoringResult.java b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/CloudMonitoringResult.java new file mode 100644 index 000000000000..2fff4c7ee213 --- /dev/null +++ b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/CloudMonitoringResult.java @@ -0,0 +1,308 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.benchmarking; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class CloudMonitoringResult { + @NonNull private final String library; + @NonNull private final String api; + @NonNull private final String op; + + private final int workers; + private final int objectSize; + private final int appBufferSize; + private final int chunksize; + private final boolean crc32CEnabled; + private final boolean md5Enabled; + private final int cpuTimeUs; + @NonNull private final String bucketName; + @NonNull private final String status; + @NonNull private final String transferSize; + @NonNull private final String transferOffset; + @NonNull private final String failureMsg; + private final double latency; + + CloudMonitoringResult( + String library, + String api, + String op, + int workers, + int objectSize, + int appBufferSize, + int chunksize, + boolean crc32cEnabled, + boolean md5Enabled, + int cpuTimeUs, + String bucketName, + String status, + String transferSize, + String transferOffset, + String failureMsg, + double latency) { + this.library = library; + this.api = api; + this.op = op; + this.workers = workers; + this.objectSize = objectSize; + this.appBufferSize = appBufferSize; + this.chunksize = chunksize; + this.crc32CEnabled = crc32cEnabled; + this.md5Enabled = md5Enabled; + this.cpuTimeUs = cpuTimeUs; + this.bucketName = bucketName; + this.status = status; + this.transferSize = transferSize; + this.transferOffset = transferOffset; + this.failureMsg = failureMsg; + this.latency = latency; + } + + public static Builder newBuilder() { + return new Builder(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("library", library) + .add("api", api) + .add("op", op) + .add("workers", workers) + .add("objectSize", objectSize) + .add("appBufferSize", appBufferSize) + .add("chunksize", chunksize) + .add("crc32CEnabled", crc32CEnabled) + .add("md5Enabled", md5Enabled) + .add("cpuTimeUs", cpuTimeUs) + .add("bucketName", bucketName) + .add("status", status) + .add("transferSize", transferSize) + .add("transferOffset", transferOffset) + .add("failureMsg", failureMsg) + .add("latency", latency) + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CloudMonitoringResult)) { + return false; + } + CloudMonitoringResult result = (CloudMonitoringResult) o; + return workers == result.workers + && objectSize == result.objectSize + && appBufferSize == result.appBufferSize + && chunksize == result.chunksize + && crc32CEnabled == result.crc32CEnabled + && md5Enabled == result.md5Enabled + && cpuTimeUs == result.cpuTimeUs + && Double.compare(result.latency, latency) == 0 + && Objects.equals(library, result.library) + && Objects.equals(api, result.api) + && Objects.equals(op, result.op) + && Objects.equals(bucketName, result.bucketName) + && Objects.equals(status, result.status) + && Objects.equals(transferSize, result.transferSize) + && Objects.equals(transferOffset, result.transferOffset) + && Objects.equals(failureMsg, result.failureMsg); + } + + @Override + public int hashCode() { + return Objects.hash( + library, + api, + op, + workers, + objectSize, + appBufferSize, + chunksize, + crc32CEnabled, + md5Enabled, + cpuTimeUs, + bucketName, + status, + transferSize, + transferOffset, + failureMsg, + latency); + } + + public String formatAsCustomMetric() { + return String.format( + "api_latency{library=%s,api=%s,op=%s,object_size=%d,chunksize=%d,workers=%d,crc32c_enabled=%b,md5_enabled=%b,bucket_name=%s,status=%s,app_buffer_size=%d}%.1f", + library, + api, + op, + objectSize, + chunksize, + workers, + crc32CEnabled, + md5Enabled, + bucketName, + status, + appBufferSize, + latency); + } + + public static class Builder { + + @NonNull private String library; + @NonNull private String api; + @NonNull private String op; + private int workers; + private int objectSize; + private int appBufferSize; + private int chunksize; + private boolean crc32cEnabled; + private boolean md5Enabled; + private int cpuTimeUs; + @NonNull private String bucketName; + @NonNull private String status; + @NonNull private String transferSize; + @NonNull private String transferOffset; + @NonNull private String failureMsg; + private double latency; + + Builder() { + library = ""; + api = ""; + op = ""; + bucketName = ""; + status = ""; + transferSize = ""; + transferOffset = ""; + failureMsg = ""; + } + + public Builder setLibrary(String library) { + this.library = library; + return this; + } + + public Builder setApi(String api) { + this.api = api; + return this; + } + + public Builder setOp(String op) { + this.op = op; + return this; + } + + public Builder setWorkers(int workers) { + this.workers = workers; + return this; + } + + public Builder setObjectSize(int objectSize) { + this.objectSize = objectSize; + return this; + } + + public Builder setAppBufferSize(int appBufferSize) { + this.appBufferSize = appBufferSize; + return this; + } + + public Builder setChunksize(int chunksize) { + this.chunksize = chunksize; + return this; + } + + public Builder setCrc32cEnabled(boolean crc32cEnabled) { + this.crc32cEnabled = crc32cEnabled; + return this; + } + + public Builder setMd5Enabled(boolean md5Enabled) { + this.md5Enabled = md5Enabled; + return this; + } + + public Builder setCpuTimeUs(int cpuTimeUs) { + this.cpuTimeUs = cpuTimeUs; + return this; + } + + public Builder setBucketName(String bucketName) { + this.bucketName = bucketName; + return this; + } + + public Builder setStatus(String status) { + this.status = status; + return this; + } + + public Builder setTransferSize(String transferSize) { + this.transferSize = transferSize; + return this; + } + + public Builder setTransferOffset(String transferOffset) { + this.transferOffset = transferOffset; + return this; + } + + public Builder setFailureMsg(String failureMsg) { + this.failureMsg = failureMsg; + return this; + } + + public Builder setLatency(double latency) { + this.latency = latency; + return this; + } + + public CloudMonitoringResult build() { + checkNotNull(library); + checkNotNull(api); + checkNotNull(op); + checkNotNull(bucketName); + checkNotNull(status); + checkNotNull(transferSize); + checkNotNull(transferOffset); + checkNotNull(failureMsg); + return new CloudMonitoringResult( + library, + api, + op, + workers, + objectSize, + appBufferSize, + chunksize, + crc32cEnabled, + md5Enabled, + cpuTimeUs, + bucketName, + status, + transferSize, + transferOffset, + failureMsg, + latency); + } + } +} diff --git a/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingCli.java b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingCli.java new file mode 100644 index 000000000000..7c9cfd65415e --- /dev/null +++ b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingCli.java @@ -0,0 +1,278 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.benchmarking; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ListenableFutureToApiFuture; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.storage.BlobWriteSessionConfigs; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.PrintWriter; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.regex.Pattern; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Command(name = "ssb") +public final class StorageSharedBenchmarkingCli implements Runnable { + // TODO: check what input validation is needed for option values. + @Option(names = "-project", description = "GCP Project Identifier", required = true) + String project; + + @Option(names = "-bucket", description = "Name of the bucket to use", required = true) + String bucket; + + @Option(names = "-samples", defaultValue = "8000", description = "Number of samples to report") + int samples; + + @Option( + names = "-workers", + defaultValue = "16", + description = "Number of workers to run in parallel for the workload") + int workers; + + @Option(names = "-api", description = "API to use", required = true) + String api; + + @Option( + names = "-object_size", + defaultValue = "1048576..1048576", + description = + "any positive integer, or an inclusive range such as min..max where min and max are" + + " positive integers") + String objectSize; + + @Option( + names = "-output_type", + defaultValue = "cloud-monitoring", + description = "Output results format") + String outputType; + + @Option( + names = "-test_type", + description = "Specify which workload the cli should run", + required = true) + String testType; + + @Option( + names = "-temp_dir_location", + description = "Specify the path where the temporary directory should be located") + String tempDirLocation; + + @Option( + names = "-warmup", + description = "Number of seconds a W1R3 warmup will run on all available processors", + defaultValue = "0") + int warmup; + + @Option(names = "-bidi_enabled", description = "If bidi should be enabled") + boolean bidiEnabled; + + Path tempDir; + + PrintWriter printWriter; + + public static void main(String[] args) { + CommandLine cmd = new CommandLine(StorageSharedBenchmarkingCli.class); + System.exit(cmd.execute(args)); + } + + @Override + public void run() { + tempDir = + tempDirLocation != null + ? Paths.get(tempDirLocation) + : Paths.get(System.getProperty("java.io.tmpdir")); + printWriter = new PrintWriter(System.out, true); + switch (testType) { + case "w1r3": + runWorkload1(); + break; + case "write-only": + runWorkloadWriteOnly(); + break; + default: + throw new IllegalStateException("Specify a workload to run"); + } + } + + private void runWorkload1() { + switch (api) { + case "JSON": + runWorkload1Json(); + break; + case "DirectPath": + runWorkload1DirectPath(); + break; + default: + throw new IllegalStateException("Specify an API to use"); + } + } + + private void runWorkload1Json() { + RetrySettings retrySettings = StorageOptions.getNoRetrySettings().toBuilder().build(); + + StorageOptions retryStorageOptions = + StorageOptions.newBuilder().setProjectId(project).setRetrySettings(retrySettings).build(); + Storage storageClient = retryStorageOptions.getService(); + try { + runW1R3(storageClient); + } catch (Exception e) { + System.err.println("Failed to run workload 1: " + e.getMessage()); + System.exit(1); + } + } + + private void runWorkload1DirectPath() { + RetrySettings retrySettings = StorageOptions.getNoRetrySettings().toBuilder().build(); + StorageOptions retryStorageOptions = + StorageOptions.grpc().setRetrySettings(retrySettings).setAttemptDirectPath(true).build(); + Storage storageClient = retryStorageOptions.getService(); + try { + runW1R3(storageClient); + } catch (Exception e) { + System.err.println("Failed to run workload 4: " + e.getMessage()); + System.exit(1); + } + } + + private void runWorkloadWriteOnly() { + try { + if (bidiEnabled) { + StorageOptions options = + StorageOptions.grpc() + .setProjectId(project) + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.bidiWrite()) + .build(); + Storage storageClient = options.getService(); + runBidi(storageClient); + } else { + StorageOptions options = + StorageOptions.grpc() + .setProjectId(project) + .setBlobWriteSessionConfig(BlobWriteSessionConfigs.getDefault()) + .build(); + Storage storageClient = options.getService(); + runBidi(storageClient); + } + } catch (Exception e) { + System.err.println("Failed to run workload bidi" + e.getMessage()); + System.exit(1); + } + } + + private void runW1R3(Storage storageClient) throws ExecutionException, InterruptedException { + ListeningExecutorService executorService = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(workers)); + runWarmup(storageClient); + for (int i = 0; i < samples; i++) { + Range objectSizeRange = Range.of(objectSize); + int objectSize = getRandomInt(objectSizeRange.min, objectSizeRange.max); + convert( + executorService.submit( + new W1R3( + storageClient, + workers, + api, + printWriter, + objectSize, + tempDir, + bucket, + false))) + .get(); + } + } + + private void runBidi(Storage storageClient) throws ExecutionException, InterruptedException { + ListeningExecutorService executorService = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(workers)); + for (int i = 0; i < samples; i++) { + Range objectSizeRange = Range.of(objectSize); + int objectSize = getRandomInt(objectSizeRange.min, objectSizeRange.max); + convert( + executorService.submit( + new Bidi(storageClient, bucket, objectSize, printWriter, api, workers))) + .get(); + } + } + + private void runWarmup(Storage storageClient) throws ExecutionException, InterruptedException { + if (warmup <= 0) { + return; + } + int numberProcessors = Runtime.getRuntime().availableProcessors(); + ListeningExecutorService executorService = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numberProcessors)); + long startTime = System.currentTimeMillis(); + long endTime = startTime + (warmup * 1000); + // Run Warmup + while (System.currentTimeMillis() < endTime) { + Range objectSizeRange = Range.of(objectSize); + int objectSize = getRandomInt(objectSizeRange.min, objectSizeRange.max); + convert( + executorService.submit( + new W1R3( + storageClient, workers, api, printWriter, objectSize, tempDir, bucket, true))) + .get(); + } + } + + public static int getRandomInt(int min, int max) { + if (min == max) return min; + Random random = new Random(); + return random.nextInt((max - min) + 1) + min; + } + + private static ApiFuture convert(ListenableFuture lf) { + return new ListenableFutureToApiFuture<>(lf); + } + + private static final class Range { + private final int min; + private final int max; + + private Range(int min, int max) { + this.min = min; + this.max = max; + } + + public static Range of(int min, int max) { + return new Range(min, max); + } + + // Takes an object size range of format min..max and creates a range object + public static Range of(String range) { + Pattern p = Pattern.compile("\\.\\."); + String[] splitRangeVals = p.split(range); + if (splitRangeVals.length == 2) { + String min = splitRangeVals[0]; + String max = splitRangeVals[1]; + return of(Integer.parseInt(min), Integer.parseInt(max)); + } + throw new IllegalStateException("Expected a size range of format min..max, but got " + range); + } + } +} diff --git a/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingUtils.java b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingUtils.java new file mode 100644 index 000000000000..8aa8128e0137 --- /dev/null +++ b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/StorageSharedBenchmarkingUtils.java @@ -0,0 +1,50 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.storage.benchmarking; + +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Storage; + +class StorageSharedBenchmarkingUtils { + public static long SSB_SIZE_THRESHOLD_BYTES = 1048576; + public static int DEFAULT_NUMBER_OF_READS = 3; + + public static void cleanupObject(Storage storage, BlobInfo created) { + storage.delete( + created.getBlobId(), Storage.BlobSourceOption.generationMatch(created.getGeneration())); + } + + public static CloudMonitoringResult generateCloudMonitoringResult( + String op, double latency, BlobInfo created, String api, int workers) { + CloudMonitoringResult result = + CloudMonitoringResult.newBuilder() + .setLibrary("java") + .setApi(api) + .setOp(op) + .setWorkers(workers) + .setObjectSize(created.getSize().intValue()) + .setChunksize(created.getSize().intValue()) + .setCrc32cEnabled(false) + .setMd5Enabled(false) + .setCpuTimeUs(-1) + .setBucketName(created.getBucket()) + .setStatus("OK") + .setTransferSize(created.getSize().toString()) + .setLatency(latency) + .build(); + return result; + } +} diff --git a/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/W1R3.java b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/W1R3.java new file mode 100644 index 000000000000..4d23470078c7 --- /dev/null +++ b/java-storage/storage-shared-benchmarking/src/main/java/com/google/cloud/storage/benchmarking/W1R3.java @@ -0,0 +1,115 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.benchmarking; + +import static com.google.cloud.storage.benchmarking.StorageSharedBenchmarkingUtils.generateCloudMonitoringResult; + +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.TmpFile; +import java.io.PrintWriter; +import java.nio.file.Path; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Callable; + +final class W1R3 implements Callable { + + private final Storage storage; + private final int workers; + private final String api; + private final PrintWriter printWriter; + private final int objectSize; + private final Path tempDirectory; + private final String bucketName; + private final boolean isWarmup; + + W1R3( + Storage storage, + int workers, + String api, + PrintWriter printWriter, + int objectSize, + Path tempDirectory, + String bucketName, + boolean isWarmup) { + this.storage = storage; + this.workers = workers; + this.api = api; + this.printWriter = printWriter; + this.objectSize = objectSize; + this.tempDirectory = tempDirectory; + this.bucketName = bucketName; + this.isWarmup = isWarmup; + } + + @Override + public String call() { + // Create the file to be uploaded and fill it with data + + try (TmpFile file = DataGenerator.base64Characters().tempFile(tempDirectory, objectSize)) { + BlobInfo blob = BlobInfo.newBuilder(bucketName, file.toString()).build(); + // Get the start time + Clock clock = Clock.systemDefaultZone(); + Instant startTime = clock.instant(); + Blob created = storage.createFrom(blob, file.getPath()); + Instant endTime = clock.instant(); + Duration elapsedTimeUpload = Duration.between(startTime, endTime); + printResult("WRITE", created, elapsedTimeUpload); + for (int i = 0; i <= StorageSharedBenchmarkingUtils.DEFAULT_NUMBER_OF_READS; i++) { + try (TmpFile dest = TmpFile.of(tempDirectory, "prefix", "bin")) { + startTime = clock.instant(); + storage.downloadTo(created.getBlobId(), dest.getPath()); + endTime = clock.instant(); + Duration elapsedTimeDownload = Duration.between(startTime, endTime); + printResult("READ[" + i + "]", created, elapsedTimeDownload); + } + } + StorageSharedBenchmarkingUtils.cleanupObject(storage, created.asBlobInfo()); + } catch (Exception e) { + CloudMonitoringResult result = + CloudMonitoringResult.newBuilder() + .setLibrary("java") + .setApi(api) + .setOp("W1R3") + .setWorkers(workers) + .setObjectSize(-1) + .setChunksize(-1) + .setCrc32cEnabled(false) + .setMd5Enabled(false) + .setCpuTimeUs(-1) + .setBucketName("") + .setStatus("FAIL") + .setTransferSize("") + .setLatency(0) + .build(); + printWriter.println(result.formatAsCustomMetric()); + } + return "OK"; + } + + private void printResult(String op, Blob created, Duration duration) { + if (!isWarmup) { + printWriter.println( + generateCloudMonitoringResult(op, duration.toMillis(), created.asBlobInfo(), api, workers) + .formatAsCustomMetric()); + } + } +} diff --git a/pom.xml b/pom.xml index 76fbb03a2f1f..72449dca7bdf 100644 --- a/pom.xml +++ b/pom.xml @@ -223,6 +223,7 @@ java-shopping-merchant-reviews java-spanneradapter java-speech + java-storage java-storage-transfer java-storagebatchoperations java-storageinsights diff --git a/versions.txt b/versions.txt index 3edfa9233968..d2eed36ee06e 100644 --- a/versions.txt +++ b/versions.txt @@ -976,3 +976,10 @@ proto-google-cloud-vectorsearch-v1:0.8.0:0.9.0-SNAPSHOT grpc-google-cloud-vectorsearch-v1:0.8.0:0.9.0-SNAPSHOT google-cloud-bigquery:2.61.0:2.62.0-SNAPSHOT google-cloud-bigquery-jdbc:0.5.0:0.6.0-SNAPSHOT +google-cloud-storage:2.64.0:2.64.1-SNAPSHOT +gapic-google-cloud-storage-v2:2.64.0:2.64.1-SNAPSHOT +grpc-google-cloud-storage-v2:2.64.0:2.64.1-SNAPSHOT +proto-google-cloud-storage-v2:2.64.0:2.64.1-SNAPSHOT +google-cloud-storage-control:2.64.0:2.64.1-SNAPSHOT +proto-google-cloud-storage-control-v2:2.64.0:2.64.1-SNAPSHOT +grpc-google-cloud-storage-control-v2:2.64.0:2.64.1-SNAPSHOT